repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
Harvard-Ophthalmology-AI-Lab/FairSeg | SAMed/segment_anything/modeling/image_encoder.py | [
{
"identifier": "LayerNorm2d",
"path": "SAMed/segment_anything/modeling/common.py",
"snippet": "class LayerNorm2d(nn.Module):\n def __init__(self, num_channels: int, eps: float = 1e-6) -> None:\n super().__init__()\n self.weight = nn.Parameter(torch.ones(num_channels))\n self.bias = nn.Parameter(torch.zeros(num_channels))\n self.eps = eps\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n u = x.mean(1, keepdim=True)\n s = (x - u).pow(2).mean(1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n x = self.weight[:, None, None] * x + self.bias[:, None, None]\n return x"
},
{
"identifier": "MLPBlock",
"path": "SAMed/segment_anything/modeling/common.py",
"snippet": "class MLPBlock(nn.Module):\n def __init__(\n self,\n embedding_dim: int,\n mlp_dim: int,\n act: Type[nn.Module] = nn.GELU,\n ) -> None:\n super().__init__()\n self.lin1 = nn.Linear(embedding_dim, mlp_dim)\n self.lin2 = nn.Linear(mlp_dim, embedding_dim)\n self.act = act()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.lin2(self.act(self.lin1(x)))"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from icecream import ic
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock | 1,147 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
), | LayerNorm2d(out_chans), | 0 | 2023-11-03 17:05:40+00:00 | 2k |
anand2312/quill-server | quill_server/realtime/events.py | [
{
"identifier": "User",
"path": "quill_server/db/models.py",
"snippet": "class User(Base):\n __tablename__ = \"user\"\n\n id: Mapped[UUID] = mapped_column(pg_UUID(as_uuid=True), primary_key=True, default=uuid4) # noqa: A003\n username: Mapped[str] = mapped_column(unique=True)\n password: Mapped[str]\n created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())\n\n def __repr__(self) -> str:\n return f\"<User(id={self.id} username={self.username})>\""
},
{
"identifier": "GameMember",
"path": "quill_server/realtime/room.py",
"snippet": "class GameMember(BaseModel):\n \"\"\"Represents a user currently playing in a Quill room.\"\"\"\n\n user_id: str\n username: str"
},
{
"identifier": "Room",
"path": "quill_server/realtime/room.py",
"snippet": "class Room(BaseModel):\n \"\"\"Represents a Quill game room.\"\"\"\n\n room_id: str\n owner: GameMember\n users: list[GameMember]\n status: GameStatus\n\n @classmethod\n def new(cls: type[\"Room\"], owner: User) -> \"Room\":\n return cls(\n room_id=str(uuid4()),\n owner=_db_user_to_game_member(owner),\n users=[],\n status=GameStatus.LOBBY,\n )\n\n async def start(self) -> None:\n \"\"\"Start the game in this room.\"\"\"\n self.status = GameStatus.ONGOING\n logger.info(f\"Setting room:{self.room_id}:status = ONGOING\")\n await cache.client.set(f\"room:{self.room_id}:status\", str(self.status))\n\n async def end(self) -> None:\n \"\"\"End the game in this room.\"\"\"\n self.status = GameStatus.ENDED\n logger.info(f\"Setting room:{self.room_id}:status = ENDED\")\n await cache.client.set(f\"room:{self.room_id}:status\", str(self.status))\n\n async def join(self, user: User) -> None:\n \"\"\"Add a user to this room.\"\"\"\n # reject connection if the user is already in the room...\n if any([u.user_id == str(user.id) for u in self.users]):\n raise ValueError(\"User is already in this room\")\n # or if the game isn't in the lobby state anymore...\n elif self.status != GameStatus.LOBBY:\n raise ValueError(\"Room is no longer accepting members\")\n # or if the room already has 8 members\n elif len(self.users) == 8:\n raise ValueError(\"Maximum room capacity reached\")\n data = _db_user_to_game_member(user)\n self.users.append(data)\n logger.info(f\"Adding {data.username} to room:{self.room_id}\")\n await typing.cast(\n typing.Awaitable[int],\n cache.client.rpush(f\"room:{self.room_id}:users\", data.model_dump_json()),\n )\n\n async def leave(self, user: User) -> None:\n \"\"\"Remove a user from this room.\"\"\"\n data = _db_user_to_game_member(user)\n self.users.remove(data)\n logger.info(f\"Removing {data.username} from room:{self.room_id}\")\n res = await typing.cast(\n typing.Awaitable[int],\n cache.client.lrem(f\"room:{self.room_id}:users\", 1, data.model_dump_json()),\n )\n if res != 1:\n logger.warning(\n f\"Attempted removing {data.username} from room:{self.room_id} \"\n f\"but Redis gave a response != 1 ({res=})\"\n )\n\n async def to_redis(self) -> None:\n \"\"\"Writes the room to Redis.\"\"\"\n # all the dictionaries are being dumped to redis as JSON strings\n # room:id:users will be a list of JSON strings\n key = f\"room:{self.room_id}\"\n owner = self.owner.model_dump_json()\n users = [i.model_dump_json() for i in self.users]\n status = str(self.status)\n logger.info(f\"Writing {key} to Redis\")\n async with cache.client.pipeline(transaction=True) as pipe:\n pipe.set(f\"{key}:owner\", owner)\n pipe.set(f\"{key}:status\", str(status))\n if len(users) > 0:\n pipe.rpush(f\"{key}:users\", *users)\n await pipe.execute()\n logger.info(f\"Saved {key} to Redis\")\n\n @classmethod\n async def from_redis(cls: type[\"Room\"], room_id: str) -> typing.Optional[\"Room\"]:\n key = f\"room:{room_id}\"\n logger.info(f\"Fetching {key} from Redis\")\n status = await cache.client.get(f\"{key}:status\")\n if not status:\n logger.warning(f\"{key} does not exist in cache\")\n return\n owner_res = await cache.client.get(f\"{key}:owner\")\n owner = loads(owner_res)\n # redis-py has incorrect return types set, so we need to cast here\n # https://github.com/redis/redis-py/issues/2933\n users_res = await typing.cast(\n typing.Awaitable[list[bytes]], cache.client.lrange(f\"{key}:users\", 0, -1)\n )\n users = [loads(i) for i in users_res]\n return cls(room_id=room_id, owner=owner, users=users, status=status.decode())"
},
{
"identifier": "ChatMessage",
"path": "quill_server/realtime/room.py",
"snippet": "class ChatMessage(BaseModel):\n \"\"\"Represents a message sent by a Quill player.\"\"\"\n\n username: str\n message: str\n has_guessed: bool"
},
{
"identifier": "_db_user_to_game_member",
"path": "quill_server/realtime/room.py",
"snippet": "def _db_user_to_game_member(user: User) -> GameMember:\n return GameMember(user_id=str(user.id), username=user.username)"
},
{
"identifier": "MessageResponse",
"path": "quill_server/schema.py",
"snippet": "class MessageResponse(BaseModel):\n message: str"
}
] | from enum import StrEnum, auto
from functools import partial
from typing import Any, Generic, TypeVar
from collections.abc import Awaitable
from loguru import logger
from pydantic import BaseModel
from redis.asyncio import Redis
from quill_server.db.models import User
from quill_server.realtime.room import GameMember, Room, ChatMessage, _db_user_to_game_member
from quill_server.schema import MessageResponse
import typing | 1,548 |
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel):
|
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel): | user: GameMember | 1 | 2023-11-03 12:43:18+00:00 | 2k |
OPTML-Group/DeepZero | algorithm/prune/main.py | [
{
"identifier": "zoo_grasp_importance_score",
"path": "algorithm/prune/importance_scores.py",
"snippet": "def zoo_grasp_importance_score(\n model,\n dataloader,\n samples_per_class,\n class_num,\n zoo_rs_size,\n zoo_step_size,\n loss_func = torch.nn.CrossEntropyLoss()\n ):\n\n score_dict = {}\n device = next(model.parameters()).device\n x, y = fetch_data(dataloader, class_num, samples_per_class)\n x, y = x.to(device), y.to(device)\n\n params = extract_conv2d_and_linear_weights(model)\n \n f_theta = partial(f, network=model, x=x, y=y, loss_func=loss_func)\n\n g0 = rge(f_theta, params, zoo_rs_size, zoo_step_size)\n modified_params = {}\n for key, param in params.items():\n modified_params[key] = param.data + g0[key].data * zoo_step_size\n g1 = rge(f_theta, modified_params, zoo_rs_size, zoo_step_size)\n Hg = {}\n for key, param in params.items():\n Hg[key] = (g1[key].data - g0[key].data) / zoo_step_size\n\n for name, m in model.named_modules():\n if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):\n if hasattr(m, \"weight_orig\"):\n score_dict[(m, 'weight')] = -m.weight_orig.clone().detach() * Hg[f'{name}.weight_orig']\n else:\n score_dict[(m, 'weight')] = -m.weight.clone().detach() * Hg[f'{name}.weight']\n\n return score_dict"
},
{
"identifier": "grasp_importance_score",
"path": "algorithm/prune/importance_scores.py",
"snippet": "def grasp_importance_score(\n model,\n dataloader,\n samples_per_class,\n class_num,\n loss_func = torch.nn.CrossEntropyLoss()\n ):\n\n temperature = 200\n score_dict = {}\n model.zero_grad()\n device = next(model.parameters()).device\n x, y = fetch_data(dataloader, class_num, samples_per_class)\n x, y = x.to(device), y.to(device)\n loss = loss_func(model(x) / temperature, y)\n gs = grad(loss, model.parameters(), create_graph=True)\n model.zero_grad()\n t = sum([(g*g.data).sum() for g in gs])\n t.backward()\n\n for m in model.modules():\n if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):\n if hasattr(m, \"weight_orig\"):\n score_dict[(m, 'weight')] = (m.weight_orig.grad.clone().detach() * m.weight.clone().detach()).abs()\n else:\n score_dict[(m, 'weight')] = (m.weight.grad.clone().detach() * m.weight.clone().detach()).abs()\n model.zero_grad()\n for g in gs:\n del g.grad\n return score_dict"
},
{
"identifier": "random_importance_score",
"path": "algorithm/prune/importance_scores.py",
"snippet": "def random_importance_score(\n model\n ):\n score_dict = {}\n for m in model.modules():\n if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):\n score_dict[(m, 'weight')] = torch.randn_like(m.weight)\n return score_dict"
}
] | import torch
from torch.nn.utils import prune
from copy import deepcopy
from .importance_scores import zoo_grasp_importance_score, grasp_importance_score, random_importance_score | 963 |
__all__ = ['global_prune', 'check_sparsity', 'check_grad_sparsity', 'custom_prune', 'extract_mask', 'remove_prune', 'layer_sparsity']
def global_prune(model, ratio, method, class_num=None, dataloader=None, sample_per_classes=25, zoo_sample_size=None, zoo_step_size=None, layer_wise_sparsity=None):
if method == 'grasp':
score_dict = grasp_importance_score(model, dataloader, sample_per_classes, class_num)
prune.global_unstructured(
parameters=score_dict.keys(),
pruning_method=prune.L1Unstructured,
amount=ratio,
importance_scores=score_dict,
)
elif method == 'zo_grasp':
|
__all__ = ['global_prune', 'check_sparsity', 'check_grad_sparsity', 'custom_prune', 'extract_mask', 'remove_prune', 'layer_sparsity']
def global_prune(model, ratio, method, class_num=None, dataloader=None, sample_per_classes=25, zoo_sample_size=None, zoo_step_size=None, layer_wise_sparsity=None):
if method == 'grasp':
score_dict = grasp_importance_score(model, dataloader, sample_per_classes, class_num)
prune.global_unstructured(
parameters=score_dict.keys(),
pruning_method=prune.L1Unstructured,
amount=ratio,
importance_scores=score_dict,
)
elif method == 'zo_grasp': | score_dict = zoo_grasp_importance_score(model, dataloader, sample_per_classes, class_num, zoo_sample_size, zoo_step_size) | 0 | 2023-11-01 14:47:38+00:00 | 2k |
S3raphimCS/Hackathon_telehack | backend/SPO_KROT/metrics/admin.py | [
{
"identifier": "ExcelFile",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class ExcelFile(models.Model):\n file = models.FileField(\n upload_to='metrics',\n unique=True,\n blank=True, null=True,\n validators=[FileExtensionValidator(['xlsx', 'xls', 'xlsm'])],\n )\n\n @property\n def filename(self):\n return self.file.name.split('/')[-1:][0]"
},
{
"identifier": "Measurements",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class Measurements(models.Model):\n \"\"\"Модель записи измерений из отчета, которые будут изменяться.\"\"\"\n class Meta:\n verbose_name = \"Измерение\"\n verbose_name_plural = \"Измерения\"\n\n operator = models.ForeignKey(\n Operator,\n on_delete=models.CASCADE,\n )\n report = models.ForeignKey(\n Report,\n on_delete=models.CASCADE,\n )\n voice_service_non_accessibility = models.FloatField(\n _(\"Доля неуспешных попыток установления голосового соединения\"),\n validators=PERCENTAGE_VALIDATOR\n )\n voice_service_cut_off = models.FloatField(\n _(\"Доля обрывов голосовых соединений\"),\n validators=PERCENTAGE_VALIDATOR\n )\n speech_quality_on_call = models.FloatField(\n _(\"Средняя разборчивость речи на соединение\"),\n )\n negative_mos_samples_ratio = models.FloatField(\n _(\"Доля голосовых соединений с низкой разборчивостью речи\"),\n validators=PERCENTAGE_VALIDATOR\n )\n undelivered_messages = models.FloatField(\n _(\"Доля недоставленных SMS сообщений\"),\n validators=PERCENTAGE_VALIDATOR\n )\n avg_sms_delivery_time = models.FloatField(\n _(\"Среднее время доставки SMS сообщений\"),\n )\n http_failure_session = models.FloatField(\n _(\"Доля неуспешных сессий по протоколу HTTP\"),\n validators=PERCENTAGE_VALIDATOR\n )\n http_ul_mean_userdata_rate = models.FloatField(\n _(\"Среднее значение скорости передачи данных от абонента\"),\n )\n http_dl_mean_userdata_rate = models.FloatField(\n _(\"Среднее значение скорости передачи данных к абоненту\"),\n )\n http_session_time = models.FloatField(\n _(\"Продолжительность успешной сессии\"),\n )\n number_of_test_voice_connections = models.IntegerField(\n _(\"Общее количество тестовых голосовых соединений \"),\n )\n number_of_voice_sequences = models.IntegerField(\n _(\"Общее количество голосовых последовательностей в оцениваемых соединениях\"),\n )\n voice_connections_with_low_intelligibility = models.IntegerField(\n _(\"Количество голосовых соединений с низкой разборчивостью\"),\n )\n number_of_sms_messages = models.IntegerField(\n _(\"Общее количество отправленных SMS - сообщений\"),\n )\n number_of_connections_attempts_http = models.IntegerField(\n _(\"Общее количество попыток соединений с сервером передачи данных HTTP\"),\n )\n number_of_test_sessions_http = models.IntegerField(\n _(\"Общее количество тестовых сессий по протоколу HTTP\"),\n )\n\n def __str__(self):\n return f\"Метрика {self.operator} из отчета {self.report}\""
},
{
"identifier": "Operator",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class Operator(models.Model):\n \"\"\"Модель операторов связи для возможности добавления новых.\"\"\"\n class Meta:\n verbose_name = \"Оператор\"\n verbose_name_plural = \"Операторы\"\n\n name = models.CharField(\n _(\"Название оператора\"),\n max_length=50,\n blank=False, null=False,\n unique=True,\n )\n\n def __str__(self) -> models.CharField:\n return self.name"
},
{
"identifier": "Report",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class Report(models.Model):\n \"\"\"Модель отчетов для потенциального хранения информации об отчетах в БД.\"\"\"\n class Meta:\n verbose_name = \"Отчет\"\n verbose_name_plural = \"Отчеты\"\n\n title = models.CharField(\n _(\"Название отчета\"),\n max_length=200,\n blank=False, null=False,\n )\n region = models.CharField(\n _(\"Регион\"),\n max_length=50,\n blank=True, null=True,\n )\n city = models.CharField(\n _(\"Город\"),\n max_length=100,\n blank=True, null=True\n )\n start_date = models.DateField(\n _(\"Дата начала измерений\"),\n blank=True, null=True,\n )\n end_date = models.DateField(\n _(\"Дата конца измерений\"),\n blank=True, null=True,\n )\n publisher = models.ForeignKey(\n get_user_model(),\n on_delete=models.SET_NULL,\n null=True,\n )\n published = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __str__(self):\n return f\"с {self.start_date} по {self.end_date} Отчет: {self.title}\""
}
] | from django.contrib import admin
from .models import ExcelFile, Measurements, Operator, Report | 1,372 |
@admin.register(Operator)
class OperatorAdmin(admin.ModelAdmin):
list_display = ('name',)
list_per_page = 15
search_fields = ("name",)
readonly_fields = ('id',)
|
@admin.register(Operator)
class OperatorAdmin(admin.ModelAdmin):
list_display = ('name',)
list_per_page = 15
search_fields = ("name",)
readonly_fields = ('id',)
| @admin.register(Report) | 3 | 2023-11-09 12:55:04+00:00 | 2k |
lz1oceani/LLM-As-Hierarchical-Policy | hlm/utils/metric_utils.py | [
{
"identifier": "normalize_answer",
"path": "hlm/utils/math_answer_utils.py",
"snippet": "def normalize_answer(text, answer_type=\"text\"):\n ret = normalize_answer_core(text, answer_type)\n try:\n str(ret)\n except:\n ret = None\n return \"No answer!\" if ret is None else ret"
},
{
"identifier": "is_set",
"path": "hlm/utils/math_answer_utils.py",
"snippet": "def is_set(item):\n type_str = str(type(item)).lower()\n return \"sympy\" in type_str and \"set\" in type_str"
},
{
"identifier": "is_sympy",
"path": "hlm/utils/math_answer_utils.py",
"snippet": "def is_sympy(item):\n return \"sympy\" in str(type(item)).lower()"
},
{
"identifier": "is_constant",
"path": "hlm/utils/math_answer_utils.py",
"snippet": "def is_constant(item):\n if isinstance(item, Number):\n return True\n elif hasattr(item, \"is_constant\") and item.is_constant():\n return True\n else:\n return False"
},
{
"identifier": "to_set",
"path": "hlm/utils/math_answer_utils.py",
"snippet": "def to_set(point): # (x, y) can be a point or a open interval\n if is_point(point, dim=2):\n return Interval.open(point[0], point[1])\n elif isinstance(point, Number):\n return FiniteSet(point)\n elif isinstance(point, (list, tuple)):\n return FiniteSet(*point)\n else:\n return point"
},
{
"identifier": "is_relation",
"path": "hlm/utils/math_answer_utils.py",
"snippet": "def is_relation(item):\n type_str = str(type(item)).lower()\n return \"sympy\" in type_str and \"relation\" in type_str"
}
] | import os, warnings
import numpy as np, re, time, signal, sympy, scipy
from sympy.utilities.exceptions import SymPyDeprecationWarning
from collections import defaultdict
from numbers import Number
from IPython import embed
from copy import deepcopy
from itertools import chain
from sympy.parsing.latex import parse_latex
from sympy.core.expr import Expr
from sympy import Interval, conjugate, Abs
from .math_answer_utils import normalize_answer, is_set, is_sympy, is_constant, to_set, is_relation
from math import *
from .misc import timeout_call | 1,417 |
os.environ["USE_SYMENGINE"] = "1"
warnings.simplefilter("ignore", SyntaxWarning)
warnings.simplefilter("ignore", RuntimeWarning)
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
# from sympy import Symbol, Eq, simplify, solve
NO_ANSWER = "No answer!"
SKIP_ANSWER_TEMPLATE = [
"Code cannot be executed!",
"Code contains infinite loop!",
"no possible values",
NO_ANSWER,
]
SKIP_ANSWER_TEMPLATE = SKIP_ANSWER_TEMPLATE + [_.lower() for _ in SKIP_ANSWER_TEMPLATE]
ZERO_ANSWER_TEMPLATE = [
"doesn't have any money left",
"used up all of",
]
def check_basics(source, target):
if not (isinstance(source, (Expr, Number)) and isinstance(target, (Expr, Number))):
return True
source_symbols = source.free_symbols if isinstance(source, Expr) else {}
target_symbols = target.free_symbols if isinstance(target, Expr) else {}
if source_symbols != target_symbols:
return False
try:
if len(source_symbols) > 0:
values = {_: np.random.rand() for _ in source_symbols}
source = source.subs(values)
target = target.subs(values)
else:
source = source.evalf()
target = target.evalf()
if not isinstance(source, Number) or not isinstance(target, Number):
source = abs(source).simplify() if not isinstance(source, Number) else source
target = abs(target).simplify() if not isinstance(target, Number) else target
return bool(np.abs(source - target) < 1e-6)
except:
pass
return True
def run_sympy_compare(source, target):
def has_fn(x):
for name in ["equals", "compare", "intersect"]:
if hasattr(x, name):
return True
return False
# print(is_constant(source), is_constant(target))
# return False
if is_constant(source) and is_constant(target):
source = source if isinstance(source, Number) else source.evalf()
target = target if isinstance(target, Number) else target.evalf()
try:
return bool(np.abs(source - target) < 1e-6)
except:
return False
if is_set(source) or is_set(target):
source = to_set(source)
target = to_set(target)
if not has_fn(source):
source, target = target, source
assert has_fn(source), [source, target, type(source), type(target)]
try:
if hasattr(source, "equals"): # Work for expressions and points
if is_relation(source) != is_relation(target):
return False
if not is_relation(source) and not check_basics(source, target):
return False
ret = source.equals(target)
ret = False if ret is None else bool(ret)
elif hasattr(source, "intersect"):
sign1 = source.intersect(target.complement(sympy.S.Reals)).simplify()
sign1 = sign1.is_empty or (np.abs(sign1.measure) < 1e-6 and sign1.is_open)
sign2 = target.intersect(source.complement(sympy.S.Reals)).simplify()
sign2 = sign2.is_empty or (np.abs(sign2.measure) < 1e-6 and sign2.is_open)
ret = sign1 and sign2
elif hasattr(source, "compare"):
ret = source.compare(target) == 0
except:
ret = False
return bool(ret)
def compare_items(source, target, answer_type="text", need_normalize=True):
if isinstance(source, (list, tuple)):
return [compare_items(_, target, answer_type=answer_type, need_normalize=need_normalize) for _ in source]
if source == "No answer!" or target == "No answer!" or source is None or target is None:
return False
if answer_type in ["text", "date", "bool"]:
return source.lower() == target.lower()
if isinstance(source, str) and isinstance(target, str):
if "=" in source and "=" not in target:
source = source.split("=")[-1]
if "=" in target and "=" not in source:
target = target.split("=")[-1]
if need_normalize:
|
os.environ["USE_SYMENGINE"] = "1"
warnings.simplefilter("ignore", SyntaxWarning)
warnings.simplefilter("ignore", RuntimeWarning)
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
# from sympy import Symbol, Eq, simplify, solve
NO_ANSWER = "No answer!"
SKIP_ANSWER_TEMPLATE = [
"Code cannot be executed!",
"Code contains infinite loop!",
"no possible values",
NO_ANSWER,
]
SKIP_ANSWER_TEMPLATE = SKIP_ANSWER_TEMPLATE + [_.lower() for _ in SKIP_ANSWER_TEMPLATE]
ZERO_ANSWER_TEMPLATE = [
"doesn't have any money left",
"used up all of",
]
def check_basics(source, target):
if not (isinstance(source, (Expr, Number)) and isinstance(target, (Expr, Number))):
return True
source_symbols = source.free_symbols if isinstance(source, Expr) else {}
target_symbols = target.free_symbols if isinstance(target, Expr) else {}
if source_symbols != target_symbols:
return False
try:
if len(source_symbols) > 0:
values = {_: np.random.rand() for _ in source_symbols}
source = source.subs(values)
target = target.subs(values)
else:
source = source.evalf()
target = target.evalf()
if not isinstance(source, Number) or not isinstance(target, Number):
source = abs(source).simplify() if not isinstance(source, Number) else source
target = abs(target).simplify() if not isinstance(target, Number) else target
return bool(np.abs(source - target) < 1e-6)
except:
pass
return True
def run_sympy_compare(source, target):
def has_fn(x):
for name in ["equals", "compare", "intersect"]:
if hasattr(x, name):
return True
return False
# print(is_constant(source), is_constant(target))
# return False
if is_constant(source) and is_constant(target):
source = source if isinstance(source, Number) else source.evalf()
target = target if isinstance(target, Number) else target.evalf()
try:
return bool(np.abs(source - target) < 1e-6)
except:
return False
if is_set(source) or is_set(target):
source = to_set(source)
target = to_set(target)
if not has_fn(source):
source, target = target, source
assert has_fn(source), [source, target, type(source), type(target)]
try:
if hasattr(source, "equals"): # Work for expressions and points
if is_relation(source) != is_relation(target):
return False
if not is_relation(source) and not check_basics(source, target):
return False
ret = source.equals(target)
ret = False if ret is None else bool(ret)
elif hasattr(source, "intersect"):
sign1 = source.intersect(target.complement(sympy.S.Reals)).simplify()
sign1 = sign1.is_empty or (np.abs(sign1.measure) < 1e-6 and sign1.is_open)
sign2 = target.intersect(source.complement(sympy.S.Reals)).simplify()
sign2 = sign2.is_empty or (np.abs(sign2.measure) < 1e-6 and sign2.is_open)
ret = sign1 and sign2
elif hasattr(source, "compare"):
ret = source.compare(target) == 0
except:
ret = False
return bool(ret)
def compare_items(source, target, answer_type="text", need_normalize=True):
if isinstance(source, (list, tuple)):
return [compare_items(_, target, answer_type=answer_type, need_normalize=need_normalize) for _ in source]
if source == "No answer!" or target == "No answer!" or source is None or target is None:
return False
if answer_type in ["text", "date", "bool"]:
return source.lower() == target.lower()
if isinstance(source, str) and isinstance(target, str):
if "=" in source and "=" not in target:
source = source.split("=")[-1]
if "=" in target and "=" not in source:
target = target.split("=")[-1]
if need_normalize: | source = normalize_answer(source, answer_type) | 0 | 2023-11-01 17:15:42+00:00 | 2k |
mitre/arlin | tests/test_dataset/test_collectors/test_sb3_collectors.py | [
{
"identifier": "SB3DQNDataCollector",
"path": "arlin/dataset/collectors/sb3_collectors.py",
"snippet": "class SB3DQNDataCollector(BaseDataCollector):\n \"\"\"Data collector for a model trained with DQN in stable-baselines3.\"\"\"\n\n def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: BasePolicy):\n super().__init__(datapoint_cls=datapoint_cls)\n self.policy = policy\n\n def collect_internal_data(\n self, observation: np.ndarray\n ) -> Tuple[type[BaseDatapoint], int]:\n with th.no_grad():\n obs = th.Tensor(np.expand_dims(observation, 0))\n\n features = self.policy.extract_features(\n obs, self.policy.q_net.features_extractor\n )\n latent_q = self.policy.q_net.q_net[:-1](features)\n q_vals = self.policy.q_net.q_net[-1](latent_q)\n action = q_vals.argmax(dim=1).reshape(-1).item()\n\n datapoint = self.datapoint_cls(\n q_vals=th.squeeze(q_vals).numpy(),\n latent_qs=th.squeeze(latent_q).numpy(),\n features=th.squeeze(features).numpy(),\n )\n\n return datapoint, action"
},
{
"identifier": "SB3PPODataCollector",
"path": "arlin/dataset/collectors/sb3_collectors.py",
"snippet": "class SB3PPODataCollector(BaseDataCollector):\n \"\"\"Data collector for a model trained with PPO in stable-baselines3.\"\"\"\n\n def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: BasePolicy):\n super().__init__(datapoint_cls=datapoint_cls)\n self.policy = policy\n\n def collect_internal_data(\n self, observation: np.ndarray\n ) -> Tuple[type[BaseDatapoint], int]:\n with th.no_grad():\n obs = th.Tensor(np.expand_dims(observation, 0))\n policy_dist = self.policy.get_distribution(obs)\n action = policy_dist.get_actions(deterministic=True).item()\n probs = policy_dist.distribution.probs\n value = self.policy.predict_values(obs)\n\n features = self.policy.extract_features(obs)\n if self.policy.share_features_extractor:\n latent_pi, latent_vf = self.policy.mlp_extractor(features)\n pi_features = features\n vf_features = features\n else:\n pi_features, vf_features = features\n latent_pi = self.policy.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.policy.mlp_extractor.forward_critic(vf_features)\n\n datapoint = self.datapoint_cls(\n latent_actors=th.squeeze(latent_pi).numpy(),\n latent_critics=th.squeeze(latent_vf).numpy(),\n dist_probs=th.squeeze(probs).numpy(),\n critic_values=th.squeeze(value).item(),\n pi_features=th.squeeze(pi_features).numpy(),\n vf_features=th.squeeze(vf_features).numpy(),\n )\n\n return datapoint, action"
},
{
"identifier": "SB3DQNDatapoint",
"path": "arlin/dataset/collectors/datapoints.py",
"snippet": "class SB3DQNDatapoint(BaseDatapoint):\n \"\"\"Datapoint for a DQN algorithm trained in stable-baselines3.\"\"\"\n\n q_vals: Optional[np.ndarray] = None\n latent_qs: Optional[np.ndarray] = None\n features: Optional[np.ndarray] = None"
},
{
"identifier": "SB3PPODatapoint",
"path": "arlin/dataset/collectors/datapoints.py",
"snippet": "class SB3PPODatapoint(BaseDatapoint):\n \"\"\"Datapoint for a PPO algorithm trained in stable-baselines3.\"\"\"\n\n latent_actors: Optional[np.ndarray] = None\n latent_critics: Optional[np.ndarray] = None\n dist_probs: Optional[np.ndarray] = None\n critic_values: Optional[float] = None\n pi_features: Optional[np.ndarray] = None\n vf_features: Optional[np.ndarray] = None"
}
] | import pytest
from stable_baselines3 import DQN
from arlin.dataset.collectors import SB3DQNDataCollector, SB3PPODataCollector
from arlin.dataset.collectors.datapoints import SB3DQNDatapoint, SB3PPODatapoint | 1,031 |
@pytest.fixture
def dqn_model(env):
model = DQN("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=int(100))
return model
class TestSB3Collectors:
def test_sb3_ppo_collector(self, ppo_model, env):
|
@pytest.fixture
def dqn_model(env):
model = DQN("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=int(100))
return model
class TestSB3Collectors:
def test_sb3_ppo_collector(self, ppo_model, env): | collector = SB3PPODataCollector(SB3PPODatapoint, ppo_model.policy) | 1 | 2023-11-08 13:57:45+00:00 | 2k |
Giftify-Bot/Giftify-Bot | utils/paginator.py | [
{
"identifier": "ARROW_BACK_EMOJI",
"path": "utils/constants.py",
"snippet": "ARROW_BACK_EMOJI = \"<:GiftifyBack:1120372002939744308>\""
},
{
"identifier": "ARROW_EMOJI",
"path": "utils/constants.py",
"snippet": "ARROW_EMOJI = \"<:GiftifyArrow:1117849870678638653>\""
},
{
"identifier": "STOP_EMOJI",
"path": "utils/constants.py",
"snippet": "STOP_EMOJI = \"<:GiftifyStop:1120372964811079771>\""
},
{
"identifier": "Interaction",
"path": "utils/tree.py",
"snippet": "class CommandTree(app_commands.CommandTree):\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r"
}
] | import abc
import discord
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
from discord.ext import commands
from typing import TypeAlias
from typing_extensions import TypeAlias
from utils.constants import ARROW_BACK_EMOJI, ARROW_EMOJI, STOP_EMOJI
from utils.tree import Interaction
from bot import Giftify | 1,496 | @property
def max_page(self) -> int:
"""The max page count for this paginator."""
return len(self.pages)
@property
def min_page(self) -> int:
"""The min page count for this paginator."""
return 1
@property
def current_page(self) -> int:
"""The current page the user is on."""
return self._current_page_index + 1
@property
def total_pages(self) -> int:
"""Returns the total amount of pages."""
return len(self.pages)
@abc.abstractmethod
def format_page(self, entries: List[T], /) -> discord.Embed:
"""
Used to make the embed that the user sees. This can be a coroutine or a regular
function. This must be overwritten by the subclass.
Parameters
----------
entries: List[Any]
A list of entries for the current page.
Returns
-------
discord.Embed
The embed for this page.
"""
raise NotImplementedError("Subclass did not overwrite format_page coro.")
async def embed(self) -> discord.Embed:
"""
A helper function to get the embed for the current page.
Returns
-------
discord.Embed
The embed for the current page.
"""
return await discord.utils.maybe_coroutine(
self.format_page, self.pages[self._current_page_index]
)
async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]:
"""
The base interaction check for the given view.
This will always return ``True`` if the target is ``None``, otherwise it will check
that the user invoking the paginator is the same user that is interacting with the
paginator.
Parameters
----------
interaction: discord.Interaction
The interaction to check.
Returns
-------
Optional[bool]
The result of the interaction check. If this returns ``None`` then the interaction
was responded to with an error message to the user.
"""
if self.target is None:
return True
assert self.author
# Ensure this is the correct invoker
if self.author.id != interaction.user.id:
return await interaction.response.send_message(
"Hey, this isn't yours!", ephemeral=True
)
# Ensure they invoke it in the correct channel.
if (
self.target.channel
and interaction.channel
and self.target.channel.id != interaction.channel.id
):
return await interaction.response.send_message(
"Hey, this isn't in the right channel!", ephemeral=True
)
return True
def _switch_page(self, count: int, /) -> None:
self._current_page_index += count
if self.clamp_pages:
if count < 0: # Going down
if self._current_page_index < 0:
self._current_page_index = self.max_page - 1
elif count > 0: # Going up
if self._current_page_index > self.max_page - 1: # - 1 for indexing
self._current_page_index = 0
return
@discord.ui.button(emoji=ARROW_BACK_EMOJI)
async def on_arrow_backward(
self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]
) -> discord.InteractionMessage:
"""
The button to represent going backwards a page.
Parameters
----------
interaction: discord.Interaction
The interaction created from the user invoking the button.
button: discord.ui.Button
The button that was pressed.
"""
await interaction.response.defer()
self._switch_page(-1)
embed = await self.embed()
return await interaction.edit_original_response(embed=embed)
| from __future__ import annotations
try:
except ImportError:
if TYPE_CHECKING:
T = TypeVar("T")
TargetType: TypeAlias = Union[Interaction, commands.Context["Giftify"]]
class BaseButtonPaginator(Generic[T], discord.ui.View, abc.ABC):
"""The base implementation of a button paginator. This class should be inherited
then the custom instance defined.
Parameters
----------
entries: List[Any]
The entries to paginate.
per_page: int
The amount of entries to show per page.
clamp_pages: bool
Whether to clamp the pages to the max and min page. This means that when the user
reaches the max page, it will go back to the first page. Likewise, when the user
reaches the first page, it will go back to the last page.
target: Optional[Union[discord.Interaction, commands.Context]]
The target interaction or context to use for the paginator. This is used to
ensure that the user invoking the paginator is the same user that is interacting
with the paginator.
If this is ``None`` then the interaction check will always return True.
"""
def __init__(
self,
*,
entries: List[T],
per_page: int = 6,
clamp_pages: bool = True,
target: Optional[TargetType] = None,
extras: Optional[Dict[Any, Any]] = None,
) -> None:
super().__init__(timeout=180)
self.entries: List[T] = entries
self.per_page: int = per_page
self.clamp_pages: bool = clamp_pages
self.target: Optional[TargetType] = target
self.extras = extras
self.author: Optional[Union[discord.User, discord.Member]] = target and (
target.user if isinstance(target, discord.Interaction) else target.author
)
self.bot: Optional[Giftify] = target and (
target.client if isinstance(target, discord.Interaction) else target.bot
)
self._current_page_index = 0
self.pages = [
entries[i : i + per_page] for i in range(0, len(entries), per_page)
]
@property
def max_page(self) -> int:
"""The max page count for this paginator."""
return len(self.pages)
@property
def min_page(self) -> int:
"""The min page count for this paginator."""
return 1
@property
def current_page(self) -> int:
"""The current page the user is on."""
return self._current_page_index + 1
@property
def total_pages(self) -> int:
"""Returns the total amount of pages."""
return len(self.pages)
@abc.abstractmethod
def format_page(self, entries: List[T], /) -> discord.Embed:
"""
Used to make the embed that the user sees. This can be a coroutine or a regular
function. This must be overwritten by the subclass.
Parameters
----------
entries: List[Any]
A list of entries for the current page.
Returns
-------
discord.Embed
The embed for this page.
"""
raise NotImplementedError("Subclass did not overwrite format_page coro.")
async def embed(self) -> discord.Embed:
"""
A helper function to get the embed for the current page.
Returns
-------
discord.Embed
The embed for the current page.
"""
return await discord.utils.maybe_coroutine(
self.format_page, self.pages[self._current_page_index]
)
async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]:
"""
The base interaction check for the given view.
This will always return ``True`` if the target is ``None``, otherwise it will check
that the user invoking the paginator is the same user that is interacting with the
paginator.
Parameters
----------
interaction: discord.Interaction
The interaction to check.
Returns
-------
Optional[bool]
The result of the interaction check. If this returns ``None`` then the interaction
was responded to with an error message to the user.
"""
if self.target is None:
return True
assert self.author
# Ensure this is the correct invoker
if self.author.id != interaction.user.id:
return await interaction.response.send_message(
"Hey, this isn't yours!", ephemeral=True
)
# Ensure they invoke it in the correct channel.
if (
self.target.channel
and interaction.channel
and self.target.channel.id != interaction.channel.id
):
return await interaction.response.send_message(
"Hey, this isn't in the right channel!", ephemeral=True
)
return True
def _switch_page(self, count: int, /) -> None:
self._current_page_index += count
if self.clamp_pages:
if count < 0: # Going down
if self._current_page_index < 0:
self._current_page_index = self.max_page - 1
elif count > 0: # Going up
if self._current_page_index > self.max_page - 1: # - 1 for indexing
self._current_page_index = 0
return
@discord.ui.button(emoji=ARROW_BACK_EMOJI)
async def on_arrow_backward(
self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]
) -> discord.InteractionMessage:
"""
The button to represent going backwards a page.
Parameters
----------
interaction: discord.Interaction
The interaction created from the user invoking the button.
button: discord.ui.Button
The button that was pressed.
"""
await interaction.response.defer()
self._switch_page(-1)
embed = await self.embed()
return await interaction.edit_original_response(embed=embed)
| @discord.ui.button(emoji=STOP_EMOJI) | 2 | 2023-11-09 15:00:15+00:00 | 2k |
Zjy0401/CoCoFormer | model/rpr.py | [
{
"identifier": "get_device",
"path": "utilities/device.py",
"snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE"
},
{
"identifier": "parse_train_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()"
}
] | import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.nn import Module
from torch.nn.modules.transformer import _get_clones
from torch.nn.modules.linear import Linear
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.normalization import LayerNorm
from torch.nn.init import *
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.functional import linear, softmax, dropout
from utilities.device import get_device
from utilities.argument_funcs import parse_train_args | 1,158 |
# TransformerEncoderRPR
class TransformerEncoderRPR(Module):
def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None):
super(TransformerEncoderRPR, self).__init__()
self.past_layers = _get_clones(encoder_past, 1)
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.max_seq = max_seq
self.c_max_seq = c_max_seq
self.b_max_seq = b_max_seq
def forward(self, src, mask=None, src_key_padding_mask=None):
|
# TransformerEncoderRPR
class TransformerEncoderRPR(Module):
def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None):
super(TransformerEncoderRPR, self).__init__()
self.past_layers = _get_clones(encoder_past, 1)
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.max_seq = max_seq
self.c_max_seq = c_max_seq
self.b_max_seq = b_max_seq
def forward(self, src, mask=None, src_key_padding_mask=None):
| args = parse_train_args() | 1 | 2023-11-01 08:33:08+00:00 | 2k |
a16z-infra/sunlight | model/agent.py | [
{
"identifier": "DiffbotClient",
"path": "model/diffbot.py",
"snippet": "class DiffbotClient(object):\n\n BASE_API_URL = 'http://api.diffbot.com'\n TIMEOUT_MS = 15000\n\n def request(self, url, token, api, version=3):\n ''' Issue a request to the Diffbot API and return the response if valid JSON '''\n params = {'url': url, 'token': token, 'timeout': self.TIMEOUT_MS}\n\n try:\n response = requests.get(f'{self.BASE_API_URL}/v{version}/{api}', params=params, timeout=self.TIMEOUT_MS)\n response.raise_for_status()\n except:\n raise Exception('REMOTE_ERROR')\n\n return response.json()"
},
{
"identifier": "BIAS_REPORT",
"path": "model/prompts.py",
"snippet": "BIAS_REPORT = '''Critique the following possibly-biased article unless it is too short.\nInstructions:\n1. Identify any bias -- especially political bias.\n2. If the article is fair, be fair in your critique. If it is biased, be harsh and critical about the issues.\n3. Use specific examples and quote directly where possible.\n4. Call out any opinion, hyperbole, and speculation.\n5. Assess where this article lies on the political spectrum.\n6. Write the critique as 3-5 paragraphs separated by two (2) newline characters.\n7. If the article is very short or truncated, explain the problem in one paragraph and do not critique it.\n\n### Headline:\n{headline}\n\n### Body:\n{body}\n\n### Critical Review:\n'''"
},
{
"identifier": "FACTUAL_CLAIMS",
"path": "model/prompts.py",
"snippet": "FACTUAL_CLAIMS = u'''Summarize the factual claims made in this article in a bulleted list separated by \\u2022 unless it is too short.\nInstructions:\n1. Order the facts by decreasing importance\n2. Use extremely concise, simple language\n3. If the article is very short or truncated, request that user elaborate or re-host.\n\n### Headline:\n{headline}\n\n### Body:\n{body}:\n\n### Factual Claims:\n'''"
},
{
"identifier": "SLANT_DESCRIPTION",
"path": "model/prompts.py",
"snippet": "SLANT_DESCRIPTION = '''Describe the slant critiqued in the following Bias Report in 1-2 words. Be creative, pithy, and accurate.\nExample slants: Fair, Left-leaning, Extreme Right, Environmentalist, Bitcoin Maximalist, Conservative, Conspiracist, Impartial\n\n### Bias Report:\n{bias_report}\n\n### Slant:\n'''"
}
] | from datetime import datetime
from threading import Thread
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from .diffbot import DiffbotClient
from .prompts import BIAS_REPORT, FACTUAL_CLAIMS, SLANT_DESCRIPTION
import fcntl
import json
import logging
import multiprocessing
import os
import tiktoken | 1,398 |
DIFFBOT_API_KEY = os.environ['DIFFBOT_API_KEY']
REQUEST_LOG_FILE = os.environ['REQUEST_LOG_FILE']
MAX_MODEL_CONTEXT = {
'gpt-3.5-turbo': 4096,
'text-davinci-003': 4096,
'gpt-4': 8192,
}
class OpenAIStreamHandler(BaseCallbackHandler):
def __init__(self, stream_queue, *args, **kwargs):
super(OpenAIStreamHandler, self).__init__(*args, **kwargs)
self.stream_queue = stream_queue
def on_llm_new_token(self, token, *args, **kwargs):
self.stream_queue.put(token)
def on_llm_end(self, *args, **kwargs):
self.stream_queue.put(False)
class Agent(multiprocessing.Process):
def __init__(self, in_queue, out_queue):
super(Agent, self).__init__()
logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
self.in_queue = in_queue
self.out_queue = out_queue
self.fact_prompt = PromptTemplate(input_variables=['headline', 'body'], template=FACTUAL_CLAIMS)
self.critique_prompt = PromptTemplate(input_variables=['headline', 'body'], template=BIAS_REPORT)
self.slant_prompt = PromptTemplate(input_variables=['bias_report'], template=SLANT_DESCRIPTION)
gpt35 = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.0, request_timeout=300)
davinci = ChatOpenAI(model_name='text-davinci-003', temperature=0.0, request_timeout=300)
gpt4 = ChatOpenAI(model_name='gpt-4', temperature=0.0, request_timeout=900)
self.stream_queue = multiprocessing.Queue()
gpt4_stream = ChatOpenAI(
model_name='gpt-4',
temperature=0.0,
streaming=True,
callbacks=[OpenAIStreamHandler(stream_queue=self.stream_queue)],
request_timeout=900,
)
self.fact_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.fact_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.fact_prompt),
'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.fact_prompt),
}
self.critique_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.critique_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.critique_prompt),
'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.critique_prompt),
}
self.slant_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.slant_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.slant_prompt),
'gpt-4': LLMChain(llm=gpt4, prompt=self.slant_prompt),
}
self._load_processed_jobs()
def run(self):
logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
|
DIFFBOT_API_KEY = os.environ['DIFFBOT_API_KEY']
REQUEST_LOG_FILE = os.environ['REQUEST_LOG_FILE']
MAX_MODEL_CONTEXT = {
'gpt-3.5-turbo': 4096,
'text-davinci-003': 4096,
'gpt-4': 8192,
}
class OpenAIStreamHandler(BaseCallbackHandler):
def __init__(self, stream_queue, *args, **kwargs):
super(OpenAIStreamHandler, self).__init__(*args, **kwargs)
self.stream_queue = stream_queue
def on_llm_new_token(self, token, *args, **kwargs):
self.stream_queue.put(token)
def on_llm_end(self, *args, **kwargs):
self.stream_queue.put(False)
class Agent(multiprocessing.Process):
def __init__(self, in_queue, out_queue):
super(Agent, self).__init__()
logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
self.in_queue = in_queue
self.out_queue = out_queue
self.fact_prompt = PromptTemplate(input_variables=['headline', 'body'], template=FACTUAL_CLAIMS)
self.critique_prompt = PromptTemplate(input_variables=['headline', 'body'], template=BIAS_REPORT)
self.slant_prompt = PromptTemplate(input_variables=['bias_report'], template=SLANT_DESCRIPTION)
gpt35 = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.0, request_timeout=300)
davinci = ChatOpenAI(model_name='text-davinci-003', temperature=0.0, request_timeout=300)
gpt4 = ChatOpenAI(model_name='gpt-4', temperature=0.0, request_timeout=900)
self.stream_queue = multiprocessing.Queue()
gpt4_stream = ChatOpenAI(
model_name='gpt-4',
temperature=0.0,
streaming=True,
callbacks=[OpenAIStreamHandler(stream_queue=self.stream_queue)],
request_timeout=900,
)
self.fact_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.fact_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.fact_prompt),
'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.fact_prompt),
}
self.critique_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.critique_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.critique_prompt),
'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.critique_prompt),
}
self.slant_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.slant_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.slant_prompt),
'gpt-4': LLMChain(llm=gpt4, prompt=self.slant_prompt),
}
self._load_processed_jobs()
def run(self):
logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO) | diffbot = DiffbotClient() | 0 | 2023-11-01 17:19:54+00:00 | 2k |
elenacliu/GraspStudio | cameras/realsense.py | [
{
"identifier": "CameraConfig",
"path": "cameras/camera.py",
"snippet": "class CameraConfig(InstantiateConfig):\n \"\"\"Camera Config\"\"\"\n _target: Type = field(default_factory=lambda : Camera)\n # focal length of x axis\n fx: float = 0.0\n # focal length of y axis\n fy: float = 0.0\n # optical center of x\n ppx: float = 0.0\n # optical center of y\n ppy: float = 0.0\n # resolution x (width)\n w: int = 0.0\n # resolution y (height)\n h: int = 0.0\n # image size\n image_size_w: int = 1280\n image_size_h: int = 720\n # calibration matrix (camera on hand or camera on base)\n calibration: NDArray[np.float64] = None\n # depth camera focal length of x axis (optional)\n depth_fx: Optional[float] = None\n # depth camera focal length of y axis (optional)\n depth_fy: Optional[float] = None\n # depth camera ppx\n depth_ppx: Optional[float] = None\n # depth camera ppy\n depth_ppy: Optional[float] = None\n # depth resolution x (width)\n depth_w: Optional[int] = None\n # depth esolution y (height)\n depth_h: Optional[int] = None"
},
{
"identifier": "Camera",
"path": "cameras/camera.py",
"snippet": "class Camera:\n config: CameraConfig\n\n def __init__(self, config : CameraConfig):\n self.config = config\n \n def rgb(self) -> NDArray:\n raise NotImplementedError('You should use a specified subclass!')\n\n def rgbd(self) -> Tuple[NDArray, NDArray]:\n raise NotImplementedError('You should use a specified subclass!')\n\n def depth_to_point_cloud(self, organized=False) -> Tuple[NDArray, NDArray]:\n \"\"\"\n organized: bool\n whether to keep the cloud in image shape (H,W,3)\n \"\"\"\n color_img, depth_img = self.rgbd()\n color_img = np.array(color_img, dtype=np.float32) / 255.0\n depth_img = np.array(depth_img / 1000, dtype=np.float32)\n\n # depth image resize to the color image size\n # just use the original size of depth image and color image\n # depth_img = cv2.resize(depth_img, (self.config.image_size_w, self.config.image_size_h), interpolation=cv2.INTER_NEAREST)\n # color_img = cv2.resize(color_img, (self.config.image_size_w, self.config.image_size_h), interpolation=cv2.INTER_LINEAR)\n # the scale should be considering again\n h, w = depth_img.shape\n\n # scale camera parameters\n scale_x = w / self.config.depth_w\n scale_y = h / self.config.depth_h\n\n fx = self.config.depth_fx * scale_x\n fy = self.config.depth_fy * scale_y\n\n x_offset = self.config.depth_ppx * scale_x\n y_offset = self.config.depth_ppy * scale_y\n\n indices = torch.from_numpy(np.indices((h, w), dtype=np.float32).transpose(1,2,0))\n \n z_e = torch.from_numpy(depth_img)\n x_e = (indices[..., 1] - x_offset) * z_e / fx\n y_e = (indices[..., 0] - y_offset) * z_e / fy\n point_cloud = torch.stack([x_e, y_e, z_e], axis=-1).numpy() # Shape: [H x W x 3]\n\n if not organized:\n color_img = color_img.reshape(-1, 3)\n point_cloud = point_cloud.reshape(-1, 3)\n return color_img, point_cloud\n\n @property\n def intrinsic(self):\n return {\n 'fx': self.config.fx,\n 'fy': self.config.fy,\n 'cx': self.config.ppx,\n 'cy': self.config.ppy,\n 'w': self.config.w,\n 'h': self.config.h\n }\n\n @property\n def depth_intrinsic(self):\n return {\n 'fx': self.config.depth_fx,\n 'fy': self.config.depth_fy,\n 'cx': self.config.depth_ppx,\n 'cy': self.config.depth_ppy,\n 'w': self.config.depth_w,\n 'h': self.config.depth_h\n }"
}
] | from dataclasses import dataclass, field
from typing import Type
from .camera import CameraConfig, Camera
import pyrealsense2 as rs
import numpy as np
import cv2 | 1,298 | # Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@dataclass
class RealSenseCameraConfig(CameraConfig):
_target: Type = field(default_factory=lambda : RealSenseCamera)
exposure: float = 500.0
max_depth_value: float = 800.0
| # Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@dataclass
class RealSenseCameraConfig(CameraConfig):
_target: Type = field(default_factory=lambda : RealSenseCamera)
exposure: float = 500.0
max_depth_value: float = 800.0
| class RealSenseCamera(Camera): | 1 | 2023-11-08 09:44:22+00:00 | 2k |
serl-robot/serl | serl/wrappers/pixels.py | [
{
"identifier": "FrameStack",
"path": "serl/wrappers/frame_stack.py",
"snippet": "class FrameStack(gym.Wrapper):\n def __init__(self, env, num_stack: int, stacking_key: str = \"pixels\"):\n super().__init__(env)\n self._num_stack = num_stack\n self._stacking_key = stacking_key\n\n for key in stacking_key:\n assert key in self.observation_space.spaces\n pixel_obs_spaces = self.observation_space.spaces[key]\n self._env_dim = pixel_obs_spaces.shape[-1]\n low = np.repeat(pixel_obs_spaces.low[..., np.newaxis], num_stack, axis=-1)\n high = np.repeat(pixel_obs_spaces.high[..., np.newaxis], num_stack, axis=-1)\n new_pixel_obs_spaces = Box(low=low, high=high, dtype=pixel_obs_spaces.dtype)\n self.observation_space.spaces[key] = new_pixel_obs_spaces\n\n self._frames = collections.deque(maxlen=num_stack)\n\n def reset(self):\n obs, info = self.env.reset()\n for i in range(self._num_stack):\n self._frames.append({key: obs[key] for key in self._stacking_key})\n for k in self._stacking_key:\n obs[k] = self.frames[k]\n return obs, info\n\n @property\n def frames(self):\n tmp = {}\n for k in self._stacking_key:\n tmp[k] = np.stack([frame[k] for frame in self._frames], axis=-1)\n return tmp\n\n def step(self, action):\n obs, reward, done, truncated, info = self.env.step(action)\n self._frames.append({k: obs[k] for k in self._stacking_key})\n for k in self._stacking_key:\n obs[k] = self.frames[k]\n return obs, reward, done, truncated, info"
},
{
"identifier": "RepeatAction",
"path": "serl/wrappers/repeat_action.py",
"snippet": "class RepeatAction(gym.Wrapper):\n def __init__(self, env, action_repeat=4):\n super().__init__(env)\n self._action_repeat = action_repeat\n\n def step(self, action: np.ndarray):\n total_reward = 0.0\n done = None\n combined_info = {}\n\n for _ in range(self._action_repeat):\n obs, reward, done, info = self.env.step(action)\n total_reward += reward\n combined_info.update(info)\n if done:\n break\n\n return obs, total_reward, done, combined_info"
},
{
"identifier": "UniversalSeed",
"path": "serl/wrappers/universal_seed.py",
"snippet": "class UniversalSeed(gym.Wrapper):\n def seed(self, seed: int):\n seeds = self.env.seed(seed)\n self.env.observation_space.seed(seed)\n self.env.action_space.seed(seed)\n return seeds"
}
] | from typing import Optional, Tuple
from gym.wrappers.pixel_observation import PixelObservationWrapper
from serl.wrappers.frame_stack import FrameStack
from serl.wrappers.repeat_action import RepeatAction
from serl.wrappers.universal_seed import UniversalSeed
import gym | 809 |
def wrap_pixels(
env: gym.Env,
action_repeat: int,
image_size: int = 84,
num_stack: Optional[int] = 3,
camera_id: int = 0,
pixel_keys: Tuple[str, ...] = ("pixels",),
) -> gym.Env:
if action_repeat > 1:
env = RepeatAction(env, action_repeat)
|
def wrap_pixels(
env: gym.Env,
action_repeat: int,
image_size: int = 84,
num_stack: Optional[int] = 3,
camera_id: int = 0,
pixel_keys: Tuple[str, ...] = ("pixels",),
) -> gym.Env:
if action_repeat > 1:
env = RepeatAction(env, action_repeat)
| env = UniversalSeed(env) | 2 | 2023-11-02 23:32:24+00:00 | 2k |
daily-demos/ai-meeting-assistant | server/llm/openai_assistant.py | [
{
"identifier": "Assistant",
"path": "server/llm/assistant.py",
"snippet": "class Assistant(ABC):\n \"\"\"Abstract class defining methods that should be implemented by any assistant\"\"\"\n\n @abstractmethod\n def register_new_context(self, new_text: str,\n name: list[str] = None) -> str:\n \"\"\"Registers new context (usually a transcription line).\"\"\"\n\n @abstractmethod\n async def query(self, custom_query: str) -> str:\n \"\"\"Runs a query against the assistant and returns the answer.\"\"\"\n\n @abstractmethod\n def get_clean_transcript(self) -> str:\n \"\"\"Returns latest clean transcript.\"\"\"\n\n @abstractmethod\n async def cleanup_transcript(self) -> str:\n \"\"\"Cleans up transcript from raw context.\"\"\"\n\n @abstractmethod\n def destroy(self) -> str:\n \"\"\"Destroys the assistant.\"\"\""
},
{
"identifier": "NoContextError",
"path": "server/llm/assistant.py",
"snippet": "class NoContextError(Exception):\n \"\"\"Raised when a query is made but no context is available\"\"\"\n\n def __init__(self):\n m = \"No context available.\"\n super().__init__(m)"
}
] | import asyncio
import logging
import threading
from collections import deque
from openai import OpenAI
from openai.types.beta import Assistant
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionSystemMessageParam, \
ChatCompletionUserMessageParam
from server.llm.assistant import Assistant, NoContextError | 1,479 | def probe_api_key(api_key: str) -> bool:
"""Probes the OpenAI API with the provided key to ensure it is valid."""
try:
client = OpenAI(api_key=api_key)
client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
ChatCompletionUserMessageParam(
content="This is a test",
role="user")],
)
return True
except Exception as e:
print(f"Failed to probe OpenAI API key: {e}")
return False
class OpenAIAssistant(Assistant):
"""Class that implements assistant features using the OpenAI API"""
_client: OpenAI = None
_oai_assistant_id: int = None
_oai_summary_thread_id: int = None
_model_name: str = None
_logger: logging.Logger = None
# For now, just store context in memory.
_raw_context: deque([ChatCompletionMessageParam]) = None
_clean_transcript: str = None
_clean_transcript_running: bool = False
_summary_context: str = None
# Process 20 context items at a time.
_transcript_batch_size: int = 25
_default_transcript_prompt = ChatCompletionSystemMessageParam(content="""
Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to:
1. Correct punctuation and spelling mistakes.
2. Merge broken sentences into complete ones.
3. Remove timestamps and transcript types.
4. Clearly indicate the speaker's name at the beginning of their dialogue.
Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability.
""",
role="system")
_default_prompt = """
Primary Instruction:
Based on the provided meeting transcripts, please create a concise summary.
Your summary should include:
1. Key discussion points.
2. Decisions made.
3. Action items assigned.
Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call.
"""
def __init__(self, api_key: str, model_name: str = None,
logger: logging.Logger = None):
if not api_key:
raise Exception("OpenAI API key not provided, but required.")
self._raw_context = deque()
self._summary_context = ""
self._clean_transcript = ""
self._logger = logger
if not model_name:
model_name = "gpt-4-1106-preview"
self._model_name = model_name
self._client = OpenAI(
api_key=api_key,
)
self._oai_assistant_id = self.get_or_create_assistant(model_name)
def get_or_create_assistant(self, model_name) -> str:
"""Gets or creates an OpenAI assistant"""
all_assistants = self._client.beta.assistants.list()
for assistant in all_assistants.data:
if assistant.name == _assistant_name and assistant.instructions == self._default_prompt:
return assistant.id
return self._client.beta.assistants.create(name=_assistant_name, description="Daily meeting summary assistant",
instructions=self._default_prompt,
model=model_name).id
def destroy(self):
"""Destroys the assistant and relevant resources"""
self._logger.info(
"Destroying thread (%s) and assistant (%s)",
self._oai_summary_thread_id,
self._oai_assistant_id)
bc = self._client.beta
if self._oai_summary_thread_id:
bc.threads.delete(self._oai_summary_thread_id)
if self._oai_assistant_id:
bc.assistants.delete(self._oai_assistant_id)
def register_new_context(self, new_text: str, metadata: list[str] = None):
"""Registers new context (usually a transcription line)."""
content = self._compile_ctx_content(new_text, metadata)
user_msg = ChatCompletionUserMessageParam(content=content, role="user")
self._raw_context.append(user_msg)
def get_clean_transcript(self) -> str:
"""Returns latest clean transcript."""
return self._clean_transcript
async def cleanup_transcript(self) -> str:
"""Cleans up transcript from raw context."""
if self._clean_transcript_running:
raise Exception("Clean transcript process already running")
# Set this bool to ensure only one cleanup process
# is running at a time.
self._clean_transcript_running = True
if len(self._raw_context) == 0:
self._clean_transcript_running = False
| """Module that defines an OpenAI assistant."""
_assistant_name = "daily-ai-assistant"
def probe_api_key(api_key: str) -> bool:
"""Probes the OpenAI API with the provided key to ensure it is valid."""
try:
client = OpenAI(api_key=api_key)
client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
ChatCompletionUserMessageParam(
content="This is a test",
role="user")],
)
return True
except Exception as e:
print(f"Failed to probe OpenAI API key: {e}")
return False
class OpenAIAssistant(Assistant):
"""Class that implements assistant features using the OpenAI API"""
_client: OpenAI = None
_oai_assistant_id: int = None
_oai_summary_thread_id: int = None
_model_name: str = None
_logger: logging.Logger = None
# For now, just store context in memory.
_raw_context: deque([ChatCompletionMessageParam]) = None
_clean_transcript: str = None
_clean_transcript_running: bool = False
_summary_context: str = None
# Process 20 context items at a time.
_transcript_batch_size: int = 25
_default_transcript_prompt = ChatCompletionSystemMessageParam(content="""
Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to:
1. Correct punctuation and spelling mistakes.
2. Merge broken sentences into complete ones.
3. Remove timestamps and transcript types.
4. Clearly indicate the speaker's name at the beginning of their dialogue.
Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability.
""",
role="system")
_default_prompt = """
Primary Instruction:
Based on the provided meeting transcripts, please create a concise summary.
Your summary should include:
1. Key discussion points.
2. Decisions made.
3. Action items assigned.
Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call.
"""
def __init__(self, api_key: str, model_name: str = None,
logger: logging.Logger = None):
if not api_key:
raise Exception("OpenAI API key not provided, but required.")
self._raw_context = deque()
self._summary_context = ""
self._clean_transcript = ""
self._logger = logger
if not model_name:
model_name = "gpt-4-1106-preview"
self._model_name = model_name
self._client = OpenAI(
api_key=api_key,
)
self._oai_assistant_id = self.get_or_create_assistant(model_name)
def get_or_create_assistant(self, model_name) -> str:
"""Gets or creates an OpenAI assistant"""
all_assistants = self._client.beta.assistants.list()
for assistant in all_assistants.data:
if assistant.name == _assistant_name and assistant.instructions == self._default_prompt:
return assistant.id
return self._client.beta.assistants.create(name=_assistant_name, description="Daily meeting summary assistant",
instructions=self._default_prompt,
model=model_name).id
def destroy(self):
"""Destroys the assistant and relevant resources"""
self._logger.info(
"Destroying thread (%s) and assistant (%s)",
self._oai_summary_thread_id,
self._oai_assistant_id)
bc = self._client.beta
if self._oai_summary_thread_id:
bc.threads.delete(self._oai_summary_thread_id)
if self._oai_assistant_id:
bc.assistants.delete(self._oai_assistant_id)
def register_new_context(self, new_text: str, metadata: list[str] = None):
"""Registers new context (usually a transcription line)."""
content = self._compile_ctx_content(new_text, metadata)
user_msg = ChatCompletionUserMessageParam(content=content, role="user")
self._raw_context.append(user_msg)
def get_clean_transcript(self) -> str:
"""Returns latest clean transcript."""
return self._clean_transcript
async def cleanup_transcript(self) -> str:
"""Cleans up transcript from raw context."""
if self._clean_transcript_running:
raise Exception("Clean transcript process already running")
# Set this bool to ensure only one cleanup process
# is running at a time.
self._clean_transcript_running = True
if len(self._raw_context) == 0:
self._clean_transcript_running = False | raise NoContextError() | 1 | 2023-11-02 11:17:16+00:00 | 2k |
Kushalhk/AutoFilter | plugins/inline.py | [
{
"identifier": "get_search_results",
"path": "database/ia_filterdb.py",
"snippet": "async def get_search_results(chat_id, query, file_type=None, max_results=10, offset=0, filter=False):\n \"\"\"For given query return (results, next_offset)\"\"\"\n if chat_id is not None:\n settings = await get_settings(int(chat_id))\n try:\n if settings['max_btn']:\n max_results = 10\n else:\n max_results = int(MAX_B_TN)\n except KeyError:\n await save_group_settings(int(chat_id), 'max_btn', False)\n settings = await get_settings(int(chat_id))\n if settings['max_btn']:\n max_results = 10\n else:\n max_results = int(MAX_B_TN)\n query = query.strip()\n #if filter:\n #better ?\n #query = query.replace(' ', r'(\\s|\\.|\\+|\\-|_)')\n #raw_pattern = r'(\\s|_|\\-|\\.|\\+)' + query + r'(\\s|_|\\-|\\.|\\+)'\n if not query:\n raw_pattern = '.'\n elif ' ' not in query:\n raw_pattern = r'(\\b|[\\.\\+\\-_])' + query + r'(\\b|[\\.\\+\\-_])'\n else:\n raw_pattern = query.replace(' ', r'.*[\\s\\.\\+\\-_]')\n \n try:\n regex = re.compile(raw_pattern, flags=re.IGNORECASE)\n except:\n return []\n\n if USE_CAPTION_FILTER:\n filter = {'$or': [{'file_name': regex}, {'caption': regex}]}\n else:\n filter = {'file_name': regex}\n\n if file_type:\n filter['file_type'] = file_type\n\n total_results = await Media.count_documents(filter)\n next_offset = offset + max_results\n\n if next_offset > total_results:\n next_offset = ''\n\n cursor = Media.find(filter)\n # Sort by recent\n cursor.sort('$natural', -1)\n # Slice files according to offset and max results\n cursor.skip(offset).limit(max_results)\n # Get list of files\n files = await cursor.to_list(length=max_results)\n\n return files, next_offset, total_results"
},
{
"identifier": "is_subscribed",
"path": "utils.py",
"snippet": "async def is_subscribed(bot, query):\n try:\n user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)\n except UserNotParticipant:\n pass\n except Exception as e:\n logger.exception(e)\n else:\n if user.status != enums.ChatMemberStatus.BANNED:\n return True\n\n return False"
},
{
"identifier": "get_size",
"path": "utils.py",
"snippet": "def get_size(size):\n \"\"\"Get size in readable format\"\"\"\n\n units = [\"Bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n size = float(size)\n i = 0\n while size >= 1024.0 and i < len(units):\n i += 1\n size /= 1024.0\n return \"%.2f %s\" % (size, units[i])"
},
{
"identifier": "temp",
"path": "utils.py",
"snippet": "class temp(object):\n BANNED_USERS = []\n BANNED_CHATS = []\n ME = None\n CURRENT=int(os.environ.get(\"SKIP\", 2))\n CANCEL = False\n MELCOW = {}\n U_NAME = None\n B_NAME = None\n GETALL = {}\n SHORT = {}\n SETTINGS = {}"
},
{
"identifier": "CACHE_TIME",
"path": "info.py",
"snippet": "CACHE_TIME = int(environ.get('CACHE_TIME', 99999))"
},
{
"identifier": "AUTH_USERS",
"path": "info.py",
"snippet": "AUTH_USERS = (auth_users + ADMINS) if auth_users else []"
},
{
"identifier": "AUTH_CHANNEL",
"path": "info.py",
"snippet": "AUTH_CHANNEL = int(auth_channel) if auth_channel and id_pattern.search(auth_channel) else None"
},
{
"identifier": "CUSTOM_FILE_CAPTION",
"path": "info.py",
"snippet": "CUSTOM_FILE_CAPTION = environ.get(\"CUSTOM_FILE_CAPTION\", f\"{script.CAPTION}\")"
},
{
"identifier": "active_connection",
"path": "database/connections_mdb.py",
"snippet": "async def active_connection(user_id):\n\n query = mycol.find_one(\n { \"_id\": user_id },\n { \"_id\": 0, \"group_details\": 0 }\n )\n if not query:\n return None\n\n group_id = query['active_group']\n return int(group_id) if group_id != None else None"
}
] | import logging
from pyrogram import Client, emoji, filters
from pyrogram.errors.exceptions.bad_request_400 import QueryIdInvalid
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultCachedDocument, InlineQuery
from database.ia_filterdb import get_search_results
from utils import is_subscribed, get_size, temp
from info import CACHE_TIME, AUTH_USERS, AUTH_CHANNEL, CUSTOM_FILE_CAPTION
from database.connections_mdb import active_connection | 1,485 |
logger = logging.getLogger(__name__)
cache_time = 0 if AUTH_USERS or AUTH_CHANNEL else CACHE_TIME
async def inline_users(query: InlineQuery):
if AUTH_USERS:
if query.from_user and query.from_user.id in AUTH_USERS:
return True
else:
return False
if query.from_user and query.from_user.id not in temp.BANNED_USERS:
return True
return False
@Client.on_inline_query()
async def answer(bot, query):
"""Show search results for given inline query"""
chat_id = await active_connection(str(query.from_user.id))
if not await inline_users(query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='okDa',
switch_pm_parameter="hehe")
return
if AUTH_CHANNEL and not await is_subscribed(bot, query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='You have to subscribe my channel to use the bot',
switch_pm_parameter="subscribe")
return
results = []
if '|' in query.query:
string, file_type = query.query.split('|', maxsplit=1)
string = string.strip()
file_type = file_type.strip().lower()
else:
string = query.query.strip()
file_type = None
offset = int(query.offset or 0)
reply_markup = get_reply_markup(query=string)
files, next_offset, total = await get_search_results(
chat_id,
string,
file_type=file_type,
max_results=10,
offset=offset)
for file in files:
title=file.file_name
size=get_size(file.file_size)
f_caption=file.caption
|
logger = logging.getLogger(__name__)
cache_time = 0 if AUTH_USERS or AUTH_CHANNEL else CACHE_TIME
async def inline_users(query: InlineQuery):
if AUTH_USERS:
if query.from_user and query.from_user.id in AUTH_USERS:
return True
else:
return False
if query.from_user and query.from_user.id not in temp.BANNED_USERS:
return True
return False
@Client.on_inline_query()
async def answer(bot, query):
"""Show search results for given inline query"""
chat_id = await active_connection(str(query.from_user.id))
if not await inline_users(query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='okDa',
switch_pm_parameter="hehe")
return
if AUTH_CHANNEL and not await is_subscribed(bot, query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='You have to subscribe my channel to use the bot',
switch_pm_parameter="subscribe")
return
results = []
if '|' in query.query:
string, file_type = query.query.split('|', maxsplit=1)
string = string.strip()
file_type = file_type.strip().lower()
else:
string = query.query.strip()
file_type = None
offset = int(query.offset or 0)
reply_markup = get_reply_markup(query=string)
files, next_offset, total = await get_search_results(
chat_id,
string,
file_type=file_type,
max_results=10,
offset=offset)
for file in files:
title=file.file_name
size=get_size(file.file_size)
f_caption=file.caption | if CUSTOM_FILE_CAPTION: | 7 | 2023-11-03 12:21:26+00:00 | 2k |
tiendatnguyen-vision/Orbit-symmetrize | RotatedMNIST/LPS/emlp-pytorch/tests/model_tests.py | [
{
"identifier": "rel_error",
"path": "RotatedMNIST/LPS/emlp-pytorch/tests/equivariance_tests.py",
"snippet": "def rel_error(t1, t2):\r\n \"\"\" Computes the relative error of two tensors. \"\"\"\r\n error = torch.sqrt(torch.mean(torch.abs(t1-t2)**2))\r\n scale = torch.sqrt(torch.mean(torch.abs(t1)**2)) + \\\r\n torch.sqrt(torch.mean(torch.abs(t2)**2))\r\n return error/torch.clamp(scale, min=1e-7)\r"
},
{
"identifier": "scale_adjusted_rel_error",
"path": "RotatedMNIST/LPS/emlp-pytorch/tests/equivariance_tests.py",
"snippet": "def scale_adjusted_rel_error(t1, t2, g):\r\n \"\"\" Computes the relative error of two tensors t1 and t2 under the action of g. \"\"\"\r\n error = torch.sqrt(torch.mean(torch.abs(t1-t2)**2))\r\n tscale = torch.sqrt(torch.mean(torch.abs(t1)**2)) + \\\r\n torch.sqrt(torch.mean(torch.abs(t2)**2))\r\n gscale = torch.sqrt(torch.mean(torch.abs(g-torch.eye(g.size(-1), device=t1.device))**2))\r\n scale = torch.max(tscale, gscale)\r\n return error/torch.clamp(scale, min=1e-7)\r"
}
] | import torch
from torch.utils.data import DataLoader
from oil.utils.utils import FixedNumpySeed, FixedPytorchSeed
from emlp_pytorch.nn import EMLP
from emlp_pytorch.groups import S, SO, DirectProduct
from emlp_pytorch.reps import vis, sparsify_basis, V, Rep, LazyKron, T
from .equivariance_tests import rel_error, scale_adjusted_rel_error | 1,384 | """ Tests for the EMLP model."""
def equivariance_err(model, mb, repin, repout, group):
""" Computes the equivariance error of a model on a minibatch mb. """
x, y = mb
gs = group.samples(x.size(0))
rho_gin = torch.vmap(repin(group).rho_dense)(gs)
rho_gout = torch.vmap(repout(group).rho_dense)(gs)
y1 = model((rho_gin@x[..., None])[..., 0])
y2 = (rho_gout@model(x)[..., None])[..., 0]
return scale_adjusted_rel_error(y1, y2, gs)
def get_dsmb(dsclass, device='cpu'):
""" Returns a dataset and minibatch for a given dataset class. """
seed = 2021
bs = 50
with FixedNumpySeed(seed), FixedPytorchSeed(seed):
ds = dsclass(100)
ds = ds.to(device)
dataloader = DataLoader(ds, batch_size=min(bs, len(ds)), num_workers=0, pin_memory=False)
mb = next(iter(dataloader))
return ds, mb
def test_init_forward_and_equivariance(dsclass, device='cpu'):
""" Tests that the model can be initialized, forward pass is correct,
and equivariance is correct. """
network = EMLP
ds, mb = get_dsmb(dsclass, device)
model = network(ds.rep_in, ds.rep_out, group=ds.symmetry).to(device)
assert equivariance_err(model, mb, ds.rep_in, ds.rep_out, ds.symmetry) < 1e-4, \
"EMLP failed equivariance test"
def test_utilities(device='cpu'):
""" Tests that the utilities work. """
W = V(SO(3).to(device))
# W = V(DirectProduct(SO(3).to(device), S(6).to(device)))
vis(W, W)
Q = (W**2 >> W).equivariant_basis()
SQ = sparsify_basis(Q)
A = SQ@(1+torch.arange(SQ.size(-1), device=device)).to(torch.float)
nunique = len(torch.unique(torch.abs(A)))
assert nunique in (SQ.size(-1), SQ.size(-1) + 1), "Sparsify failes on SO(3) T3"
def test_bespoke_representations(device='cpu'):
""" Tests that bespoke representations work. """
class ProductSubRep(Rep):
""" A representation of a product group G = G1 x G2 as a sum of two subrepresentations """
def __init__(self, G, subgroup_id, size):
""" Produces the representation of the subgroup of G = G1 x G2
with the index subgroup_id in {0,1} specifying G1 or G2.
Also requires specifying the size of the representation given by G1.d or G2.d """
super().__init__()
self.G = G
self.index = subgroup_id
self._size = size
self.device = device
def __repr__(self):
return "V_"+str(self.G).split('x')[self.index]
def __hash__(self):
return hash((type(self), (self.G, self.index)))
def size(self):
return self._size
def rho(self, M):
# Given that M is a LazyKron object, we can just get the argument
return M.Ms[self.index]
def drho(self, A):
return A.Ms[self.index]
def __call__(self, G):
# adding this will probably not be necessary in a future release,
# necessary now because rep is __call__ed in nn.EMLP constructor
assert self.G == G
return self
G1, G2 = SO(3).to(device), S(5).to(device)
G = G1 * G2
VSO3 = ProductSubRep(G, 0, G1.d)
VS5 = ProductSubRep(G, 1, G2.d)
Vin = VS5 + V(G)
Vout = VSO3
str(Vin >> Vout)
model = EMLP(Vin, Vout, group=G)
model.to(device)
input_point = torch.randn(Vin.size(), device=device)*10
lazy_G_sample = LazyKron([G1.sample(), G2.sample()])
out1 = model(Vin.rho(lazy_G_sample)@input_point)
out2 = Vout.rho(lazy_G_sample)@model(input_point)
| """ Tests for the EMLP model."""
def equivariance_err(model, mb, repin, repout, group):
""" Computes the equivariance error of a model on a minibatch mb. """
x, y = mb
gs = group.samples(x.size(0))
rho_gin = torch.vmap(repin(group).rho_dense)(gs)
rho_gout = torch.vmap(repout(group).rho_dense)(gs)
y1 = model((rho_gin@x[..., None])[..., 0])
y2 = (rho_gout@model(x)[..., None])[..., 0]
return scale_adjusted_rel_error(y1, y2, gs)
def get_dsmb(dsclass, device='cpu'):
""" Returns a dataset and minibatch for a given dataset class. """
seed = 2021
bs = 50
with FixedNumpySeed(seed), FixedPytorchSeed(seed):
ds = dsclass(100)
ds = ds.to(device)
dataloader = DataLoader(ds, batch_size=min(bs, len(ds)), num_workers=0, pin_memory=False)
mb = next(iter(dataloader))
return ds, mb
def test_init_forward_and_equivariance(dsclass, device='cpu'):
""" Tests that the model can be initialized, forward pass is correct,
and equivariance is correct. """
network = EMLP
ds, mb = get_dsmb(dsclass, device)
model = network(ds.rep_in, ds.rep_out, group=ds.symmetry).to(device)
assert equivariance_err(model, mb, ds.rep_in, ds.rep_out, ds.symmetry) < 1e-4, \
"EMLP failed equivariance test"
def test_utilities(device='cpu'):
""" Tests that the utilities work. """
W = V(SO(3).to(device))
# W = V(DirectProduct(SO(3).to(device), S(6).to(device)))
vis(W, W)
Q = (W**2 >> W).equivariant_basis()
SQ = sparsify_basis(Q)
A = SQ@(1+torch.arange(SQ.size(-1), device=device)).to(torch.float)
nunique = len(torch.unique(torch.abs(A)))
assert nunique in (SQ.size(-1), SQ.size(-1) + 1), "Sparsify failes on SO(3) T3"
def test_bespoke_representations(device='cpu'):
""" Tests that bespoke representations work. """
class ProductSubRep(Rep):
""" A representation of a product group G = G1 x G2 as a sum of two subrepresentations """
def __init__(self, G, subgroup_id, size):
""" Produces the representation of the subgroup of G = G1 x G2
with the index subgroup_id in {0,1} specifying G1 or G2.
Also requires specifying the size of the representation given by G1.d or G2.d """
super().__init__()
self.G = G
self.index = subgroup_id
self._size = size
self.device = device
def __repr__(self):
return "V_"+str(self.G).split('x')[self.index]
def __hash__(self):
return hash((type(self), (self.G, self.index)))
def size(self):
return self._size
def rho(self, M):
# Given that M is a LazyKron object, we can just get the argument
return M.Ms[self.index]
def drho(self, A):
return A.Ms[self.index]
def __call__(self, G):
# adding this will probably not be necessary in a future release,
# necessary now because rep is __call__ed in nn.EMLP constructor
assert self.G == G
return self
G1, G2 = SO(3).to(device), S(5).to(device)
G = G1 * G2
VSO3 = ProductSubRep(G, 0, G1.d)
VS5 = ProductSubRep(G, 1, G2.d)
Vin = VS5 + V(G)
Vout = VSO3
str(Vin >> Vout)
model = EMLP(Vin, Vout, group=G)
model.to(device)
input_point = torch.randn(Vin.size(), device=device)*10
lazy_G_sample = LazyKron([G1.sample(), G2.sample()])
out1 = model(Vin.rho(lazy_G_sample)@input_point)
out2 = Vout.rho(lazy_G_sample)@model(input_point) | assert rel_error(out1, out2) < 1e-4, "EMLP equivariance fails on bespoke productsubrep" | 0 | 2023-11-01 07:19:02+00:00 | 2k |
crizbae/PictoPlan | backend/mongo_api/app/server/routes/item_routes.py | [
{
"identifier": "collection",
"path": "backend/mongo_api/app/server/database.py",
"snippet": "MONGO_URI = config(\"MONGO_URI\")\ndef item_helper(item) -> dict:\ndef ret_link(item) -> dict:\nasync def retrieve_all_items():\nasync def retrieve_item(item_id: str):\nasync def retrieve_links(session_id: str):\nasync def update_item_in_db(item_id: str, updated_item: dict) -> bool:\nasync def delete_item_from_db(item_id: str) -> bool:"
},
{
"identifier": "Item",
"path": "backend/mongo_api/app/server/models/item.py",
"snippet": "class Item(BaseModel):\n Title: str\n SessionId: str\n Objective: str\n Materials: str\n Procedure: Dict[str, str]\n Assessment: str"
},
{
"identifier": "retrieve_all_items",
"path": "backend/mongo_api/app/server/database.py",
"snippet": "async def retrieve_all_items():\n items = []\n cursor = collection.find()\n for item in cursor:\n items.append(item_helper(item))\n return items"
},
{
"identifier": "retrieve_item",
"path": "backend/mongo_api/app/server/database.py",
"snippet": "async def retrieve_item(item_id: str):\n cursor = collection.find_one({\"_id\": ObjectId(item_id)})\n return [ret_link(cursor)]"
},
{
"identifier": "update_item_in_db",
"path": "backend/mongo_api/app/server/database.py",
"snippet": "async def update_item_in_db(item_id: str, updated_item: dict) -> bool:\n cursor = collection.update_one({\"_id\": ObjectId(item_id)}, {\"$set\": updated_item})\n return cursor.modified_count > 0"
},
{
"identifier": "delete_item_from_db",
"path": "backend/mongo_api/app/server/database.py",
"snippet": "async def delete_item_from_db(item_id: str) -> bool:\n cursor = collection.delete_one({\"_id\": ObjectId(item_id)})\n return cursor.deleted_count > 0"
},
{
"identifier": "retrieve_links",
"path": "backend/mongo_api/app/server/database.py",
"snippet": "async def retrieve_links(session_id: str):\n cursor = collection.find({\"SessionId\": session_id})\n links = []\n for item in cursor:\n links.append(str(item[\"_id\"]))\n return links"
}
] | from fastapi import APIRouter, Depends, HTTPException
from ..database import collection
from ..models.item import Item
from ..database import retrieve_all_items, retrieve_item, update_item_in_db, delete_item_from_db, retrieve_links | 815 |
router = APIRouter()
@router.post("/items/")
def create_item(item: Item):
item_dict = item.dict()
inserted_item = collection.insert_one(item_dict)
item_id = str(inserted_item.inserted_id)
del item_dict["_id"]
item_dict["id"] = item_id
return item_dict
@router.get("/items/")
async def get_all_items():
items = await retrieve_all_items()
return items
# get by frontend UUID
@router.get("/items/session/{session_id}")
async def get_item_by_session_id(session_id: str):
item = await retrieve_links(session_id)
if len(item) == 0:
raise HTTPException(status_code=404, detail="Items not found")
return item
# get by link
@router.get("/items/{item_id}")
async def get_item_by_id(item_id: str):
item = await retrieve_item(item_id)
if len(item) == 0:
raise HTTPException(status_code=404, detail="Item not found")
return item
@router.put("/items/{item_id}")
async def update_item(item_id: str, updated_item: Item):
updated_item = updated_item.dict()
success = await update_item_in_db(item_id, updated_item)
if not success:
raise HTTPException(status_code=404, detail="Item not found")
return {**updated_item, "id": item_id}
@router.delete("/items/{item_id}")
async def delete_item(item_id: str):
|
router = APIRouter()
@router.post("/items/")
def create_item(item: Item):
item_dict = item.dict()
inserted_item = collection.insert_one(item_dict)
item_id = str(inserted_item.inserted_id)
del item_dict["_id"]
item_dict["id"] = item_id
return item_dict
@router.get("/items/")
async def get_all_items():
items = await retrieve_all_items()
return items
# get by frontend UUID
@router.get("/items/session/{session_id}")
async def get_item_by_session_id(session_id: str):
item = await retrieve_links(session_id)
if len(item) == 0:
raise HTTPException(status_code=404, detail="Items not found")
return item
# get by link
@router.get("/items/{item_id}")
async def get_item_by_id(item_id: str):
item = await retrieve_item(item_id)
if len(item) == 0:
raise HTTPException(status_code=404, detail="Item not found")
return item
@router.put("/items/{item_id}")
async def update_item(item_id: str, updated_item: Item):
updated_item = updated_item.dict()
success = await update_item_in_db(item_id, updated_item)
if not success:
raise HTTPException(status_code=404, detail="Item not found")
return {**updated_item, "id": item_id}
@router.delete("/items/{item_id}")
async def delete_item(item_id: str): | success = await delete_item_from_db(item_id) | 5 | 2023-11-04 16:48:55+00:00 | 2k |
xenxxxx/BitPay-Crypto-Signal-Trading-Bot | tests/data/test_btanalysis.py | [
{
"identifier": "CURRENT_TEST_STRATEGY",
"path": "tests/conftest.py",
"snippet": "CURRENT_TEST_STRATEGY = 'StrategyTestV3'"
},
{
"identifier": "create_mock_trades",
"path": "tests/conftest.py",
"snippet": "def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):\n \"\"\"\n Create some fake trades ...\n :param is_short: Optional bool, None creates a mix of long and short trades.\n \"\"\"\n def add_trade(trade):\n if use_db:\n Trade.session.add(trade)\n else:\n LocalTrade.add_bt_trade(trade)\n is_short1 = is_short if is_short is not None else True\n is_short2 = is_short if is_short is not None else False\n # Simulate dry_run entries\n trade = mock_trade_1(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_2(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_3(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_4(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_5(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_6(fee, is_short1)\n add_trade(trade)\n\n if use_db:\n Trade.commit()"
},
{
"identifier": "MOCK_TRADE_COUNT",
"path": "tests/conftest_trades.py",
"snippet": "MOCK_TRADE_COUNT = 6"
}
] | from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock
from pandas import DataFrame, DateOffset, Timestamp, to_datetime
from freqtrade.configuration import TimeRange
from freqtrade.constants import LAST_BT_RESULT_FN
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism,
extract_trades_of_period, get_latest_backtest_filename,
get_latest_hyperopt_file, load_backtest_data,
load_backtest_metadata, load_trades, load_trades_from_db)
from freqtrade.data.history import load_data, load_pair_history
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
calculate_expectancy, calculate_market_change,
calculate_max_drawdown, calculate_sharpe, calculate_sortino,
calculate_underwater, combine_dataframes_with_mean,
create_cum_profit)
from freqtrade.exceptions import OperationalException
from freqtrade.util import dt_utc
from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades
from tests.conftest_trades import MOCK_TRADE_COUNT
import pytest | 1,532 |
def test_get_latest_backtest_filename(testdatadir, mocker):
with pytest.raises(ValueError, match=r"Directory .* does not exist\."):
get_latest_backtest_filename(testdatadir / 'does_not_exist')
with pytest.raises(ValueError,
match=r"Directory .* does not seem to contain .*"):
get_latest_backtest_filename(testdatadir)
testdir_bt = testdatadir / "backtest_results"
res = get_latest_backtest_filename(testdir_bt)
assert res == 'backtest-result.json'
res = get_latest_backtest_filename(str(testdir_bt))
assert res == 'backtest-result.json'
mocker.patch("freqtrade.data.btanalysis.json_load", return_value={})
with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."):
get_latest_backtest_filename(testdir_bt)
def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')
assert res == testdatadir / 'does_not_exist/testfile.pickle'
res = get_latest_hyperopt_file(testdatadir.parent)
assert res == testdatadir.parent / "hyperopt_results.pickle"
res = get_latest_hyperopt_file(str(testdatadir.parent))
assert res == testdatadir.parent / "hyperopt_results.pickle"
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')
assert res == {}
mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')
mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())
with pytest.raises(OperationalException,
match=r"Unexpected error.*loading backtest metadata\."):
load_backtest_metadata(testdatadir / 'nonexistant.file.json')
def test_load_backtest_data_old_format(testdatadir, mocker):
filename = testdatadir / "backtest-result_test222.json"
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
with pytest.raises(OperationalException,
match=r"Backtest-results with only trades data are no longer supported."):
load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
|
def test_get_latest_backtest_filename(testdatadir, mocker):
with pytest.raises(ValueError, match=r"Directory .* does not exist\."):
get_latest_backtest_filename(testdatadir / 'does_not_exist')
with pytest.raises(ValueError,
match=r"Directory .* does not seem to contain .*"):
get_latest_backtest_filename(testdatadir)
testdir_bt = testdatadir / "backtest_results"
res = get_latest_backtest_filename(testdir_bt)
assert res == 'backtest-result.json'
res = get_latest_backtest_filename(str(testdir_bt))
assert res == 'backtest-result.json'
mocker.patch("freqtrade.data.btanalysis.json_load", return_value={})
with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."):
get_latest_backtest_filename(testdir_bt)
def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')
assert res == testdatadir / 'does_not_exist/testfile.pickle'
res = get_latest_hyperopt_file(testdatadir.parent)
assert res == testdatadir.parent / "hyperopt_results.pickle"
res = get_latest_hyperopt_file(str(testdatadir.parent))
assert res == testdatadir.parent / "hyperopt_results.pickle"
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')
assert res == {}
mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')
mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())
with pytest.raises(OperationalException,
match=r"Unexpected error.*loading backtest metadata\."):
load_backtest_metadata(testdatadir / 'nonexistant.file.json')
def test_load_backtest_data_old_format(testdatadir, mocker):
filename = testdatadir / "backtest-result_test222.json"
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
with pytest.raises(OperationalException,
match=r"Backtest-results with only trades data are no longer supported."):
load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
| create_mock_trades(fee, is_short) | 1 | 2023-11-07 18:46:03+00:00 | 2k |
ssajedi/SAiF-GPT | bin/main.py | [
{
"identifier": "anonymize_text",
"path": "utils.py",
"snippet": "def augment_prompt(prompt,ref_doc):\ndef extract_pdf_text(file):"
},
{
"identifier": "extract_pdf_text",
"path": "utils.py",
"snippet": "def extract_pdf_text(file):\n \"\"\"\n Extracts text paragraphs from a PDF file.\n \"\"\"\n pdf_reader = PyPDF2.PdfReader(file)\n pdf_dict={}\n for ip in range(len(pdf_reader.pages)):\n pdf_dict[ip] = pdf_reader.pages[ip].extract_text()\n dataset = [pdf_dict[ip] for ip in range(len(pdf_reader.pages))]\n return pdf_dict,dataset"
},
{
"identifier": "highlight_phrases_in_paragraph",
"path": "text_effects.py",
"snippet": "def highlight_phrases_in_paragraph(paragraph, phrases_to_colors):\n \"\"\"\n Highlights specific phrases within a paragraph in Streamlit markdown using generated pale colors and rounded edges.\n \n Args:\n - paragraph (str): The paragraph of text where phrases will be highlighted.\n - phrases_to_colors (dict): Dictionary where keys are phrases to be highlighted. Colors will be generated automatically.\n \n Returns:\n - None: Directly renders the HTML in Streamlit using markdown.\n \"\"\"\n # Filter out phrases that don't exist in the paragraph\n phrases_present = {phrase: color for phrase, color in phrases_to_colors.items() if re.search(re.escape(phrase), paragraph, re.IGNORECASE)}\n\n # Sort phrases by length in descending order to handle nested phrases\n phrases_sorted = sorted(phrases_present.keys(), key=len, reverse=True)\n\n # Initialize a hue value\n hue = 0\n hue_increment = 1 / len(phrases_sorted) if phrases_sorted else 0 # Prevent division by zero\n \n # Escape phrases for regex and replace them with highlighted HTML\n for phrase in phrases_sorted:\n color_code = generate_pale_color(hue)\n hue += hue_increment # Increment hue to get a different color\n \n escaped_phrase = re.escape(phrase)\n pattern = r'\\b' + escaped_phrase + r'\\b' # Use word boundaries\n replacement = (\n f'<span style=\"background-color: {color_code}; '\n f'border-radius: 0.5em; padding: 0.3em 0.6em;\">{phrase}🔒</span>'\n )\n paragraph = re.sub(pattern, replacement, paragraph, flags=re.IGNORECASE)\n \n # Render the HTML in Streamlit using the markdown function with unsafe_allow_html set to True\n # st.markdown(paragraph, unsafe_allow_html=True)\n return paragraph"
}
] | import streamlit as st
import random
import time
import openai
import openai
import streamlit as st
from utils import anonymize_text, deanonymize_text, chatbot_response
from utils import extract_pdf_text
from text_effects import highlight_phrases_in_paragraph
from DetectEntity import DetectEntity | 815 |
st.title("AInonymous")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
st.sidebar.multiselect("Entity list", ["email", "phone",'location'], ["email", "phone","location"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.cls = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None:
|
st.title("AInonymous")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
st.sidebar.multiselect("Entity list", ["email", "phone",'location'], ["email", "phone","location"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.cls = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None: | _,chunks = extract_pdf_text(uploaded_file) | 1 | 2023-11-04 18:14:49+00:00 | 2k |
awslabs/optimizing-multitask-training-through-dynamic-pipelines | tests/test_kv_store.py | [
{
"identifier": "_get_from_shared_kv_store",
"path": "dynapipe/pipe/data_loader.py",
"snippet": "def _get_from_shared_kv_store(\n kv_store: RedisKVStore,\n key: str,\n reader_idx: int,\n n_total_readers: int,\n decode: bool = True,\n logger=None,\n):\n reader_count_key = key + \"_rc\"\n reader_ack_key = key + \"_r{}_ack\".format(reader_idx)\n # wait for reader ack\n if logger is not None:\n logger.debug(\"Waiting for reader ack key: {}\".format(reader_ack_key))\n kv_store.get(reader_ack_key)\n if logger is not None:\n logger.debug(\n \"Got reader ack key: {}, waiting for data key: {}\".format(\n reader_ack_key, key\n )\n )\n data = kv_store.get(key)\n if logger is not None:\n logger.debug(\"Removing reader ack key: {}\".format(reader_ack_key))\n # remove reader ack\n _checked_delete_key(kv_store, reader_ack_key, logger=logger)\n # get reader count\n reader_count = kv_store.add(reader_count_key, 1)\n if reader_count == n_total_readers:\n if logger is not None:\n logger.debug(\n \"Last reader, reset reader count: {}\".format(reader_count_key)\n )\n # reset reader count\n result_readers = kv_store.add(reader_count_key, -n_total_readers)\n assert result_readers == 0\n if logger is not None:\n logger.debug(\"Last reader, remove data key: {}\".format(key))\n # remove data key\n _checked_delete_key(kv_store, key, logger=logger)\n if logger is not None:\n logger.debug(\"Last reader, set ack key: {}\".format(key + \"_ack\"))\n # set all reader ack keys\n keys_to_reset = [\n key + \"_r{}_ack\".format(i) for i in range(n_total_readers)\n ]\n if logger is not None:\n logger.debug(\"Last reader, reset keys: {}\".format(keys_to_reset))\n for reset_key in keys_to_reset:\n val = kv_store.add(reset_key, 1)\n # make sure the key is set\n got_val = int(kv_store.get(reset_key).decode())\n if not val == got_val:\n raise RuntimeError(\n \"Failed to set reader ack key: {}\".format(reset_key)\n )\n if logger is not None:\n logger.debug(\"Set reader ack key: {}\".format(reset_key))\n # set data ack key\n kv_store.add(key + \"_ack\", 1)\n\n if decode:\n return data.decode()\n return data"
},
{
"identifier": "_init_kv_store",
"path": "dynapipe/pipe/data_loader.py",
"snippet": "def _init_kv_store(is_master, logger=None):\n host = os.environ.get(\"DYNAPIPE_KV_HOST\", \"localhost\")\n port = os.environ.get(\"DYNAPIPE_KV_PORT\", 29500)\n if logger is not None:\n logger.debug(\n \"Init kv store, is_master: {}, host: {}, port: {}\".format(\n is_master, host, port\n )\n )\n # kv_store = torch.distributed.TCPStore(\n # \"127.0.0.1\",\n # port,\n # is_master=is_master,\n # timeout=timedelta(seconds=KVSTORE_TIMEOUT),\n # )\n kv_store = RedisKVStore(host, port, is_master=is_master)\n return kv_store, host, port"
},
{
"identifier": "_put_to_shared_kv_store",
"path": "dynapipe/pipe/data_loader.py",
"snippet": "def _put_to_shared_kv_store(\n kv_store: RedisKVStore, key: str, data, logger=None\n):\n # put execution plan into local kv store\n ack_key = key + \"_ack\"\n if logger is not None:\n logger.debug(\"Wait for data ack key: {}\".format(ack_key))\n # wait for ack key\n kv_store.get(ack_key)\n # remove ack key\n _checked_delete_key(kv_store, ack_key, logger=logger)\n if logger is not None:\n logger.debug(\"Set data key: {}\".format(key))\n # set data key\n kv_store.set(key, data)"
}
] | import multiprocessing as mp
import time
import traceback
import traceback
from dynapipe.pipe.data_loader import (
_get_from_shared_kv_store,
_init_kv_store,
_put_to_shared_kv_store,
) | 1,336 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Note: this test requires torch
# to run this test, exec:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=1 test_kv_store.py
def _producer_process(max_iters, buffer_size=32):
try:
kv_store, _, _ = _init_kv_store(is_master=True)
# set all ack keys
for i in range(buffer_size):
kv_store.set(f"key_{i}_ack".format(i), "1")
kv_store.set(f"key_{i}_r0_ack".format(i), "1")
for i in range(max_iters):
key = "key_{}".format(i % buffer_size)
payload = str(i)
_put_to_shared_kv_store(kv_store, key, payload)
print("[producer] put key: {}".format(key), flush=True)
time.sleep(2)
except Exception as e:
traceback.print_exc()
raise e
def _consumer_process(max_iters, buffer_size=32):
try:
kv_store, _, _ = _init_kv_store(is_master=False)
for i in range(max_iters):
key = "key_{}".format(i % buffer_size)
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Note: this test requires torch
# to run this test, exec:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=1 test_kv_store.py
def _producer_process(max_iters, buffer_size=32):
try:
kv_store, _, _ = _init_kv_store(is_master=True)
# set all ack keys
for i in range(buffer_size):
kv_store.set(f"key_{i}_ack".format(i), "1")
kv_store.set(f"key_{i}_r0_ack".format(i), "1")
for i in range(max_iters):
key = "key_{}".format(i % buffer_size)
payload = str(i)
_put_to_shared_kv_store(kv_store, key, payload)
print("[producer] put key: {}".format(key), flush=True)
time.sleep(2)
except Exception as e:
traceback.print_exc()
raise e
def _consumer_process(max_iters, buffer_size=32):
try:
kv_store, _, _ = _init_kv_store(is_master=False)
for i in range(max_iters):
key = "key_{}".format(i % buffer_size) | payload = _get_from_shared_kv_store( | 0 | 2023-11-08 07:58:20+00:00 | 2k |
dask-contrib/dask-databricks | dask_databricks/tests/test_databricks.py | [
{
"identifier": "DatabricksCluster",
"path": "dask_databricks/databrickscluster.py",
"snippet": "class DatabricksCluster(Cluster):\n \"\"\"Connect to a Dask cluster deployed via databricks.\"\"\"\n\n def __init__(\n self,\n loop: Optional[IOLoop] = None,\n asynchronous: bool = False,\n ):\n self.spark_local_ip = os.getenv(\"SPARK_LOCAL_IP\")\n if self.spark_local_ip is None:\n raise KeyError(\n \"Unable to find expected environment variable SPARK_LOCAL_IP. \"\n \"Are you running this on a Databricks driver node?\"\n )\n try:\n name = spark.conf.get(\"spark.databricks.clusterUsageTags.clusterId\")\n except AttributeError:\n name = \"unknown-databricks-\" + uuid.uuid4().hex[:10]\n super().__init__(name=name, loop=loop, asynchronous=asynchronous)\n\n if not self.called_from_running_loop:\n self._loop_runner.start()\n self.sync(self._start)\n\n async def _start(self):\n self.scheduler_comm = rpc(f\"{self.spark_local_ip}:8786\")\n await super()._start()\n\n @property\n def dashboard_link(self):\n cluster_id = spark.conf.get(\"spark.databricks.clusterUsageTags.clusterId\")\n org_id = spark.conf.get(\"spark.databricks.clusterUsageTags.orgId\")\n return f\"https://dbc-dp-{org_id}.cloud.databricks.com/driver-proxy/o/{org_id}/{cluster_id}/8087/status\""
},
{
"identifier": "get_client",
"path": "dask_databricks/databrickscluster.py",
"snippet": "def get_client():\n \"\"\"Get a Dask client connected to a Databricks cluster.\"\"\"\n return DatabricksCluster().get_client()"
}
] | import os
import pytest
from dask.distributed import Client
from distributed.deploy import Cluster, LocalCluster
from dask_databricks import DatabricksCluster, get_client | 669 |
@pytest.fixture(scope="session")
def dask_cluster():
"""Start a LocalCluster to simulate the cluster that would be started on Databricks."""
return LocalCluster(scheduler_port=8786)
@pytest.fixture
def remove_spark_local_ip():
original_spark_local_ip = os.getenv("SPARK_LOCAL_IP")
if original_spark_local_ip:
del os.environ["SPARK_LOCAL_IP"]
yield None
if original_spark_local_ip:
os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip
@pytest.fixture
def set_spark_local_ip():
original_spark_local_ip = os.getenv("SPARK_LOCAL_IP")
os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
yield None
if original_spark_local_ip:
os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip
else:
del os.environ["SPARK_LOCAL_IP"]
def test_databricks_cluster_raises_key_error_when_initialised_outside_of_databricks(remove_spark_local_ip):
with pytest.raises(KeyError):
|
@pytest.fixture(scope="session")
def dask_cluster():
"""Start a LocalCluster to simulate the cluster that would be started on Databricks."""
return LocalCluster(scheduler_port=8786)
@pytest.fixture
def remove_spark_local_ip():
original_spark_local_ip = os.getenv("SPARK_LOCAL_IP")
if original_spark_local_ip:
del os.environ["SPARK_LOCAL_IP"]
yield None
if original_spark_local_ip:
os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip
@pytest.fixture
def set_spark_local_ip():
original_spark_local_ip = os.getenv("SPARK_LOCAL_IP")
os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
yield None
if original_spark_local_ip:
os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip
else:
del os.environ["SPARK_LOCAL_IP"]
def test_databricks_cluster_raises_key_error_when_initialised_outside_of_databricks(remove_spark_local_ip):
with pytest.raises(KeyError): | DatabricksCluster() | 0 | 2023-11-02 13:49:27+00:00 | 2k |
indiefan/king_smith | custom_components/king_smith/coordinator.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/king_smith/const.py",
"snippet": "DOMAIN = \"king_smith\""
},
{
"identifier": "WalkingPadApi",
"path": "custom_components/king_smith/walking_pad.py",
"snippet": "class WalkingPadApi:\n \"\"\"Walkingpad device.\"\"\"\n\n def __init__(self, name: str, ble_device: BLEDevice) -> None:\n \"\"\"Create a new walking pad api instance.\"\"\"\n self._name = name\n self._ble_device = ble_device\n self._ctrl = Controller()\n self._callbacks = []\n self._status_lock = False\n self._last_cmd_time = time.time()\n\n self._connected = False\n self._moving = False\n self._speed = 0\n self._distance = 0\n\n self._register_controller_callbacks()\n\n def _register_controller_callbacks(self):\n self._ctrl.handler_cur_status = self._on_status_update\n\n def _begin_cmd(self) -> asyncio.Lock:\n self._status_lock = True\n return asyncio.Lock()\n\n async def _end_cmd(self):\n await asyncio.sleep(0.75)\n self._last_cmd_time = time.time()\n self._status_lock = False\n\n def _on_status_update(self, sender, status: WalkingPadCurStatus) -> None:\n \"\"\"Update current state.\"\"\"\n # Don't update if we're still running a command or just did (status from device is outdated at first)\n if (\n self._status_lock\n or time.time() - self._last_cmd_time < STATUS_LOCK_ON_CMD_SECONDS\n ):\n return\n\n self._moving = status.speed > 0\n self._speed = status.speed\n self._distance = status.dist\n\n if len(self._callbacks) > 0:\n for callback in self._callbacks:\n callback(status)\n\n def register_status_callback(self, callback) -> None:\n \"\"\"Register a status callback.\"\"\"\n self._callbacks.append(callback)\n\n @property\n def mac(self):\n \"\"\"Mac address.\"\"\"\n return self._ble_device.address\n\n @property\n def name(self):\n \"\"\"Name.\"\"\"\n return self._name\n\n @property\n def connected(self):\n \"\"\"Connected status.\"\"\"\n return self._connected\n\n @property\n def moving(self):\n \"\"\"Whether or not the device is currently moving.\"\"\"\n return self._moving\n\n @property\n def speed(self):\n \"\"\"The current device speed.\"\"\"\n return self._speed\n\n @property\n def distance(self):\n \"\"\"The current device distance.\"\"\"\n return self._distance\n\n async def connect(self) -> None:\n \"\"\"Connect the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.run(self._ble_device)\n self._connected = True\n await self._end_cmd()\n\n async def disconnect(self) -> None:\n \"\"\"Disconnect the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.disconnect()\n self._connected = False\n await self._end_cmd()\n\n async def turn_on(self) -> None:\n \"\"\"Turn on the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.switch_mode(WalkingPad.MODE_MANUAL)\n await self._end_cmd()\n\n async def turn_off(self) -> None:\n \"\"\"Turn off the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.switch_mode(WalkingPad.MODE_STANDBY)\n await self._end_cmd()\n\n async def start_belt(self) -> None:\n \"\"\"Start the belt.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.start_belt()\n self._moving = True\n await self._end_cmd()\n\n async def stop_belt(self) -> None:\n \"\"\"Stop the belt.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.stop_belt()\n self._moving = False\n await self._end_cmd()\n\n async def change_speed(self, speed: int) -> None:\n \"\"\"Change the speed.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.change_speed(speed)\n self._speed = speed\n await self._end_cmd()\n\n async def update_state(self) -> None:\n \"\"\"Update device state.\"\"\"\n # Grab the lock so we don't run while another command is running\n lock = self._begin_cmd()\n async with lock:\n # Disable status lock so our update triggers a refresh\n self._status_lock = False\n await self._ctrl.ask_stats()\n # Skip callback so we don't reset debouncer"
}
] | from datetime import datetime
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from ph4_walkingpad.pad import WalkingPadCurStatus
from .const import DOMAIN
from .walking_pad import WalkingPadApi
import logging
import time | 1,339 | """The Walking Pad Coordinator."""
_LOGGER = logging.getLogger(__name__)
NEVER_TIME = -86400.0
DEBOUNCE_SECONDS = 1.0
class WalkingPadCoordinator(DataUpdateCoordinator[None]):
"""Data coordinator for receiving Walking Pad updates."""
def __init__(self, hass: HomeAssistant, walking_pad_api: WalkingPadApi) -> None:
"""Initialise the coordinator."""
super().__init__(
hass,
_LOGGER,
| """The Walking Pad Coordinator."""
_LOGGER = logging.getLogger(__name__)
NEVER_TIME = -86400.0
DEBOUNCE_SECONDS = 1.0
class WalkingPadCoordinator(DataUpdateCoordinator[None]):
"""Data coordinator for receiving Walking Pad updates."""
def __init__(self, hass: HomeAssistant, walking_pad_api: WalkingPadApi) -> None:
"""Initialise the coordinator."""
super().__init__(
hass,
_LOGGER, | name=DOMAIN, | 0 | 2023-11-03 20:45:03+00:00 | 2k |
ndiamant/spice | spice/conditional_histogram.py | [
{
"identifier": "BaseLightning",
"path": "spice/utils.py",
"snippet": "class BaseLightning(LightningModule):\n def _configure_optimizers(self, parameters: Iterator[torch.nn.Parameter]):\n opt = optim.AdamW(\n parameters, lr=self.hparams.lr, weight_decay=self.hparams.wd,\n )\n scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, T_max=self.hparams.max_iter)\n return [opt], [{\"scheduler\": scheduler, \"interval\": \"step\"}]\n\n def configure_optimizers(self):\n return self._configure_optimizers(self.parameters())\n\n def training_step(self, batch: list[torch.Tensor]) -> torch.Tensor:\n return self.get_loss(batch, \"train\")\n\n def validation_step(self, batch: list[torch.Tensor], *args) -> torch.Tensor:\n return self.get_loss(batch, \"val\")\n\n @abstractmethod\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n pass\n\n def epoch_log(\n self,\n name: str,\n value: torch.Tensor,\n ) -> None:\n super().log(name, value, on_epoch=True, on_step=False)"
},
{
"identifier": "MLP",
"path": "spice/utils.py",
"snippet": "class MLP(nn.Module):\n def __init__(self, input_dim: int, hidden: int, n_hidden: int, output_dim: int = None):\n super().__init__()\n output_dim = output_dim or hidden\n self.model = nn.Sequential(\n nn.Sequential(nn.Linear(input_dim, hidden), nn.GELU()),\n )\n for _ in range(n_hidden):\n self.model.append(\n nn.Sequential(nn.Linear(hidden, hidden), nn.GELU()),\n )\n self.model.append(nn.Linear(hidden, output_dim))\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)"
},
{
"identifier": "unique_quantile",
"path": "spice/utils.py",
"snippet": "def unique_quantile(\n x: torch.Tensor, n_bins: int, first_bin_zero: bool = True,\n max_try_n_bins: int = None, verbose: bool = False,\n) -> torch.Tensor:\n \"\"\"binary search to find the right number of bins to yield n_bins unique quantiles\"\"\"\n if len(x.unique()) == 1:\n raise ValueError(\"Must have more than one value to find unique quantiles.\")\n\n def _print(x: Any):\n if not verbose:\n return\n print(x)\n\n min_n_bins = n_bins\n max_try_n_bins = max_try_n_bins or 5 * n_bins\n og_max_try = max_try_n_bins\n unique_quantiles = None\n while min_n_bins <= max_try_n_bins:\n try_n_bins = (min_n_bins + max_try_n_bins) // 2\n first_bin = (0 if first_bin_zero else 1) / try_n_bins\n quantiles = torch.linspace(first_bin, 1, try_n_bins)\n unique_quantiles = torch.unique(x.quantile(quantiles))\n n_unique = unique_quantiles.shape[0]\n _print(f\"tried {try_n_bins=} and got {len(unique_quantiles)=} / {n_bins}\")\n if n_unique == n_bins:\n _print(\"found correct number of bins\")\n return unique_quantiles\n if n_unique > n_bins:\n max_try_n_bins = try_n_bins - 1\n else:\n min_n_bins = try_n_bins + 1\n if min_n_bins >= og_max_try:\n _print(f\"Trying again with 2x max try bins\")\n return unique_quantile(\n x, n_bins, first_bin_zero, max_try_n_bins * 2, verbose=verbose,\n )\n _print(f\"Algorithm failed, returning closest guess.\")\n # likely results in unused bins\n if n_unique < n_bins:\n start, stop = unique_quantiles[-2:]\n lengthened = torch.cat([\n unique_quantiles[:-2],\n torch.linspace(start, stop, n_bins - n_unique + 2)\n ])\n return lengthened\n else:\n deltas = unique_quantiles[1:] - unique_quantiles[:-1]\n min_delta_idx = deltas.argsort()\n idx_to_keep = [\n i for i in list(range(n_unique))\n if i not in min_delta_idx[:n_unique - n_bins]\n ]\n shortened = unique_quantiles[idx_to_keep]\n return shortened"
},
{
"identifier": "score_to_q_hat",
"path": "spice/utils.py",
"snippet": "def score_to_q_hat(score: torch.Tensor, alpha: float) -> float:\n n = score.shape[0]\n quantile = math.ceil((n + 1) * (1 - alpha)) / n\n q_hat = score.quantile(quantile).item()\n return q_hat"
},
{
"identifier": "compute_conformal_metrics",
"path": "spice/utils.py",
"snippet": "def compute_conformal_metrics(\n x_test: torch.Tensor, y_test: torch.Tensor, sizes: torch.Tensor, covered: torch.Tensor,\n) -> dict[str, float]:\n x_test = x_test.cpu()\n y_test = y_test.cpu().squeeze()\n sizes = sizes.cpu().squeeze()\n covered = covered.cpu().squeeze()\n metrics = dict()\n metrics[\"coverage\"] = covered.float().mean().item()\n metrics[\"size\"] = sizes.mean().item()\n metrics[\"wsc_coverage\"] = wsc_unbiased(x_test.cpu().numpy(), covered.cpu().numpy())\n # y stratified coverage\n y_quantiles = unique_quantile(y_test, n_bins=5, first_bin_zero=False)\n discrete_y = torch.bucketize(y_test, y_quantiles)\n metrics[\"y_stratified_coverage\"] = stratified_coverage(covered, discrete_y)\n # size stratified coverage\n try:\n size_quantiles = unique_quantile(sizes / sizes.max(), n_bins=5, first_bin_zero=False)\n discrete_size = torch.bucketize(sizes, size_quantiles)\n metrics[\"size_stratified_coverage\"] = stratified_coverage(covered, discrete_size)\n except ValueError:\n pass # no unique sizes case\n return metrics"
}
] | import copy
import math
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from tqdm import tqdm
from torch import nn
from spice.utils import (
BaseLightning, MLP, unique_quantile,
score_to_q_hat, compute_conformal_metrics,
) | 1,548 |
def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:
return unique_quantile(y, n_bins, first_bin_zero=False)
def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor:
return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)
|
def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:
return unique_quantile(y, n_bins, first_bin_zero=False)
def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor:
return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)
| class ConditionalHist(BaseLightning): | 0 | 2023-11-01 18:04:29+00:00 | 2k |
nik-sm/com-hom-emg | tests/test_data.py | [
{
"identifier": "get_datasets",
"path": "com_hom_emg/data.py",
"snippet": "def get_datasets(\n per_subj_data: dict,\n fold: int,\n n_train_subj: int,\n n_val_subj: int,\n n_test_subj: int,\n use_preprocessed_data: bool,\n return_subj_names: bool = False, # For testing\n) -> Tuple[TensorDataset, TensorDataset, TensorDataset]:\n \"\"\"\n Separate subjects; some for learning embedding, some for val and some for test.\n (Unseen subjects for val and test)\n Returns train, val, test datasets.\n \"\"\"\n assert fold in range(len(per_subj_data))\n\n def collect_one(_subjs, subj_id_offset=0):\n data, labels, is_single, subj_ids = [], [], [], []\n for subj_id, subj in enumerate(_subjs):\n # Add doubles\n x = per_subj_data[subj][\"data\"]\n y = per_subj_data[subj][\"labels\"]\n data.append(x)\n labels.append(y)\n # NOTE - careful to use bool dtype; used for masking later\n # Single gestures have 4 in one component or the other\n is_sing = np.logical_or(y[:, 0] == 4, y[:, 1] == 4)\n is_single.append(is_sing)\n subj_ids.append((subj_id + subj_id_offset) * np.ones(len(x), dtype=int))\n\n data = np.concatenate(data)\n if use_preprocessed_data:\n data = preprocess(data)\n data = torch.from_numpy(data).float()\n labels = torch.from_numpy(np.concatenate(labels))\n is_single = torch.from_numpy(np.concatenate(is_single))\n subj_ids = torch.from_numpy(np.concatenate(subj_ids))\n data, labels, is_single, subj_ids = shuffle_together(data, labels, is_single, subj_ids)\n return TensorDataset(data, labels, is_single, subj_ids)\n\n subjs = np.roll(list(per_subj_data.keys()), -fold)\n if n_train_subj + n_val_subj + n_test_subj != len(subjs):\n raise ValueError(f\"Num subjects in train/val/test splits must sum to {len(subjs)}\")\n\n test_subj = subjs[0:n_test_subj]\n val_subj = subjs[n_test_subj : n_test_subj + n_val_subj]\n train_subj = subjs[n_test_subj + n_val_subj :]\n\n assert np.intersect1d(train_subj, val_subj).size == 0\n assert np.intersect1d(train_subj, test_subj).size == 0\n assert np.intersect1d(val_subj, test_subj).size == 0\n\n train_set, val_set, test_set = collect_one(train_subj), collect_one(val_subj), collect_one(test_subj)\n logger.info(f\"Train subjects: {train_subj}\")\n logger.info(f\"Val subjects: {val_subj}\")\n logger.info(f\"Test subjects: {test_subj}\")\n logger.info(f\"Train on {len(train_subj)} subjects:\\n{[x.shape for x in train_set.tensors]}\")\n logger.info(f\"Validate on {len(val_subj)} subjects:\\n{[x.shape for x in val_set.tensors]}\")\n logger.info(f\"Test on {len(test_subj)} subjects:\\n{[x.shape for x in test_set.tensors]}\")\n if not return_subj_names:\n return train_set, val_set, test_set\n return train_set, val_set, test_set, train_subj, val_subj, test_subj"
},
{
"identifier": "get_per_subj_data",
"path": "com_hom_emg/data.py",
"snippet": "def get_per_subj_data():\n path = PROJECT_PATH / \"data\" / \"combination-gesture-dataset\" / \"python\"\n per_subj_data = {}\n for subj_idx in range(10):\n per_subj_data[subj_idx] = {\n \"data\": np.load(path / f\"subj{subj_idx}/data.npy\"),\n \"labels\": np.load(path / f\"subj{subj_idx}/labels.npy\"),\n }\n return per_subj_data"
}
] | import torch
from com_hom_emg.data import get_datasets, get_per_subj_data | 1,309 |
def test_get_datasets_disjoint_val_test():
# The subject used for val should be different each time
# Likewise for test
per_subj_data = get_per_subj_data()
all_val_subj = []
all_test_subj = []
n_train = 8
n_val = 1
n_test = 1
expected_train_size = 8 * 1224 # 1224 gestures per subject
expected_val_size = n_val * 1224
expected_test_size = n_test * 1224
def check_contents(dataset, N):
## Check shapes
# data = 8-channel EMG, 962 timesteps (= 500ms at 1926 Hz)
assert dataset.tensors[0].shape == torch.Size([N, 8, 962])
# labels = 2D labels
assert dataset.tensors[1].shape == torch.Size([N, 2])
# is_single = bool labels
assert dataset.tensors[2].shape == torch.Size([N])
# subj_ids = 1d labels
assert dataset.tensors[3].shape == torch.Size([N])
## Check dtypes
assert dataset.tensors[0].dtype == torch.float32
assert dataset.tensors[1].dtype == torch.int64
assert dataset.tensors[2].dtype == torch.bool
assert dataset.tensors[3].dtype == torch.int64
for i in range(10):
|
def test_get_datasets_disjoint_val_test():
# The subject used for val should be different each time
# Likewise for test
per_subj_data = get_per_subj_data()
all_val_subj = []
all_test_subj = []
n_train = 8
n_val = 1
n_test = 1
expected_train_size = 8 * 1224 # 1224 gestures per subject
expected_val_size = n_val * 1224
expected_test_size = n_test * 1224
def check_contents(dataset, N):
## Check shapes
# data = 8-channel EMG, 962 timesteps (= 500ms at 1926 Hz)
assert dataset.tensors[0].shape == torch.Size([N, 8, 962])
# labels = 2D labels
assert dataset.tensors[1].shape == torch.Size([N, 2])
# is_single = bool labels
assert dataset.tensors[2].shape == torch.Size([N])
# subj_ids = 1d labels
assert dataset.tensors[3].shape == torch.Size([N])
## Check dtypes
assert dataset.tensors[0].dtype == torch.float32
assert dataset.tensors[1].dtype == torch.int64
assert dataset.tensors[2].dtype == torch.bool
assert dataset.tensors[3].dtype == torch.int64
for i in range(10): | train_set, val_set, test_set, train_subj, val_subj, test_subj = get_datasets( | 0 | 2023-11-01 21:12:05+00:00 | 2k |
alengwenus/ha-sma-ev-charger | custom_components/smaev/select.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/smaev/const.py",
"snippet": "DOMAIN = \"smaev\""
},
{
"identifier": "SMAEV_COORDINATOR",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_COORDINATOR = \"coordinator\""
},
{
"identifier": "SMAEV_DEVICE_INFO",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_DEVICE_INFO = \"device_info\""
},
{
"identifier": "SMAEV_PARAMETER",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_PARAMETER = \"parameter\""
},
{
"identifier": "SMAEV_POSSIBLE_VALUES",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_POSSIBLE_VALUES = \"possibleValues\""
},
{
"identifier": "SMAEV_VALUE",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_VALUE = \"value\""
}
] | from dataclasses import dataclass, field
from datetime import datetime
from typing import TYPE_CHECKING
from pysmaev.const import SmaEvChargerParameters
from pysmaev.helpers import get_parameters_channel
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
DOMAIN,
SMAEV_COORDINATOR,
SMAEV_DEVICE_INFO,
SMAEV_PARAMETER,
SMAEV_POSSIBLE_VALUES,
SMAEV_VALUE,
)
import logging | 1,047 | """Select platform for SMA EV Charger integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
@dataclass
class SmaEvChargerSelectEntityDescription(SelectEntityDescription):
"""Describes SMA EV Charger select entities."""
type: str = ""
channel: str = ""
value_mapping: dict = field(default_factory=dict)
SELECT_DESCRIPTIONS: tuple[SmaEvChargerSelectEntityDescription, ...] = (
SmaEvChargerSelectEntityDescription(
key="operating_mode_of_charge_session",
translation_key="operating_mode_of_charge_session",
type=SMAEV_PARAMETER,
channel="Parameter.Chrg.ActChaMod",
value_mapping={
SmaEvChargerParameters.BOOST_CHARGING: "boost_charging",
SmaEvChargerParameters.OPTIMIZED_CHARGING: "optimized_charging",
SmaEvChargerParameters.SETPOINT_CHARGING: "setpoint_charging",
SmaEvChargerParameters.CHARGE_STOP: "charge_stop",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSelectEntityDescription(
key="led_brightness",
translation_key="led_brightness",
type=SMAEV_PARAMETER,
channel="Parameter.Sys.DevSigBri",
value_mapping={
SmaEvChargerParameters.LED_LOW: "low",
SmaEvChargerParameters.LED_AVERAGE: "average",
SmaEvChargerParameters.LED_HIGH: "high",
},
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger select entities."""
data = hass.data[DOMAIN][config_entry.entry_id]
coordinator = data[SMAEV_COORDINATOR]
device_info = data[SMAEV_DEVICE_INFO]
if TYPE_CHECKING:
assert config_entry.unique_id
entities = []
for entity_description in SELECT_DESCRIPTIONS:
entities.append(
SmaEvChargerSelect(
coordinator, config_entry.unique_id, device_info, entity_description
)
)
async_add_entities(entities)
class SmaEvChargerSelect(CoordinatorEntity, SelectEntity):
"""Representation of a SMA EV Charger select entity."""
entity_description: SmaEvChargerSelectEntityDescription
_attr_has_entity_name = True
def __init__(
self,
coordinator: DataUpdateCoordinator,
config_entry_unique_id: str,
device_info: DeviceInfo,
entity_description: SmaEvChargerSelectEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = entity_description
self._attr_device_info = device_info
self._attr_unique_id = f"{config_entry_unique_id}-{self.entity_description.key}"
self._attr_options = []
self._attr_current_option = None
self.inv_value_mapping = {
value: key for key, value in self.entity_description.value_mapping.items()
}
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
channel = get_parameters_channel(
self.coordinator.data[SMAEV_PARAMETER],
self.entity_description.channel,
)
possible_values = channel[SMAEV_POSSIBLE_VALUES]
| """Select platform for SMA EV Charger integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
@dataclass
class SmaEvChargerSelectEntityDescription(SelectEntityDescription):
"""Describes SMA EV Charger select entities."""
type: str = ""
channel: str = ""
value_mapping: dict = field(default_factory=dict)
SELECT_DESCRIPTIONS: tuple[SmaEvChargerSelectEntityDescription, ...] = (
SmaEvChargerSelectEntityDescription(
key="operating_mode_of_charge_session",
translation_key="operating_mode_of_charge_session",
type=SMAEV_PARAMETER,
channel="Parameter.Chrg.ActChaMod",
value_mapping={
SmaEvChargerParameters.BOOST_CHARGING: "boost_charging",
SmaEvChargerParameters.OPTIMIZED_CHARGING: "optimized_charging",
SmaEvChargerParameters.SETPOINT_CHARGING: "setpoint_charging",
SmaEvChargerParameters.CHARGE_STOP: "charge_stop",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSelectEntityDescription(
key="led_brightness",
translation_key="led_brightness",
type=SMAEV_PARAMETER,
channel="Parameter.Sys.DevSigBri",
value_mapping={
SmaEvChargerParameters.LED_LOW: "low",
SmaEvChargerParameters.LED_AVERAGE: "average",
SmaEvChargerParameters.LED_HIGH: "high",
},
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger select entities."""
data = hass.data[DOMAIN][config_entry.entry_id]
coordinator = data[SMAEV_COORDINATOR]
device_info = data[SMAEV_DEVICE_INFO]
if TYPE_CHECKING:
assert config_entry.unique_id
entities = []
for entity_description in SELECT_DESCRIPTIONS:
entities.append(
SmaEvChargerSelect(
coordinator, config_entry.unique_id, device_info, entity_description
)
)
async_add_entities(entities)
class SmaEvChargerSelect(CoordinatorEntity, SelectEntity):
"""Representation of a SMA EV Charger select entity."""
entity_description: SmaEvChargerSelectEntityDescription
_attr_has_entity_name = True
def __init__(
self,
coordinator: DataUpdateCoordinator,
config_entry_unique_id: str,
device_info: DeviceInfo,
entity_description: SmaEvChargerSelectEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = entity_description
self._attr_device_info = device_info
self._attr_unique_id = f"{config_entry_unique_id}-{self.entity_description.key}"
self._attr_options = []
self._attr_current_option = None
self.inv_value_mapping = {
value: key for key, value in self.entity_description.value_mapping.items()
}
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
channel = get_parameters_channel(
self.coordinator.data[SMAEV_PARAMETER],
self.entity_description.channel,
)
possible_values = channel[SMAEV_POSSIBLE_VALUES] | value = channel[SMAEV_VALUE] | 5 | 2023-11-04 07:08:41+00:00 | 2k |
microsoft/promptbase | azureml/components/src/shared/jsonl_utils.py | [
{
"identifier": "JSONLReader",
"path": "azureml/components/src/shared/jsonl_file_utils.py",
"snippet": "class JSONLReader:\n \"\"\"Line-by-line iteration over a JSONL file\n\n Can be used in a 'with' statement, and then iterated over.\n The returned value is a decoded JSON object, rather than\n the line itself\n \"\"\"\n\n def __init__(self, jsonl_file: pathlib.Path, encoding: str):\n self._file_path = jsonl_file\n self._encoding = encoding\n self._jf = None\n\n def __iter__(self):\n return self\n\n def __next__(self) -> dict[str, Any]:\n nxt_line = next(self._jf)\n result = json.loads(nxt_line)\n return result\n\n def __enter__(self):\n self._jf = open(self._file_path, \"r\", encoding=self._encoding)\n return self\n\n def __exit__(self, *args):\n self._jf.close()"
},
{
"identifier": "JSONLWriter",
"path": "azureml/components/src/shared/jsonl_file_utils.py",
"snippet": "class JSONLWriter:\n def __init__(self, jsonl_file: pathlib.Path | None, encoding: str | None):\n self._file_path = jsonl_file\n self._encoding = encoding\n self._jf = None\n\n def __enter__(self):\n if self._file_path is not None:\n self._jf = open(self._file_path, \"w\", encoding=self._encoding)\n else:\n _logger.info(f\"No target path specified, writing to TemporaryFile\")\n self._jf = tempfile.TemporaryFile(mode=\"w\", encoding=\"utf-8-sig\")\n return self\n\n def __exit__(self, *args):\n self._jf.close()\n\n def write_line(self, target_object: dict[str, Any]):\n nxt_line = json.dumps(target_object)\n self._jf.write(nxt_line)\n self._jf.write(\"\\n\")"
},
{
"identifier": "get_standard_logger_for_file",
"path": "azureml/components/src/shared/logging_utils.py",
"snippet": "def get_standard_logger_for_file(\n file_path: str, logging_level=logging.INFO\n) -> logging.Logger:\n _logger = logging.getLogger(pathlib.Path(file_path).name)\n _logger.setLevel(logging_level)\n sh = logging.StreamHandler(stream=sys.stdout)\n sh.setFormatter(\n logging.Formatter(\n \"%(asctime)s - %(name)s [%(levelname)s] : %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n )\n _logger.addHandler(sh)\n return _logger"
}
] | import json
import pathlib
import tempfile
import traceback
from typing import Any, Callable, Tuple
from .jsonl_file_utils import JSONLReader, JSONLWriter
from .logging_utils import get_standard_logger_for_file | 808 | # Copied from Medprompt.... perhaps those utils should go to PyPi?
_logger = get_standard_logger_for_file(__file__)
def line_map(
*,
map_func: Callable[[dict[str, Any]], dict[str, Any] | None],
source_file: pathlib.Path,
dest_file: pathlib.Path,
source_encoding: str,
dest_encoding: str,
error_file: pathlib.Path | None = None,
error_encoding: str | None = None,
max_errors: int = -1,
) -> Tuple[int, int]:
"""Iterate over a JSONL file, applying map_func to each line"""
assert source_file.exists()
successful_lines = 0
error_lines = 0
| # Copied from Medprompt.... perhaps those utils should go to PyPi?
_logger = get_standard_logger_for_file(__file__)
def line_map(
*,
map_func: Callable[[dict[str, Any]], dict[str, Any] | None],
source_file: pathlib.Path,
dest_file: pathlib.Path,
source_encoding: str,
dest_encoding: str,
error_file: pathlib.Path | None = None,
error_encoding: str | None = None,
max_errors: int = -1,
) -> Tuple[int, int]:
"""Iterate over a JSONL file, applying map_func to each line"""
assert source_file.exists()
successful_lines = 0
error_lines = 0 | with JSONLReader(source_file, source_encoding) as in_file: | 0 | 2023-12-12 08:00:11+00:00 | 2k |
openai/weak-to-strong | weak_to_strong/train.py | [
{
"identifier": "clear_mem",
"path": "weak_to_strong/common.py",
"snippet": "def clear_mem(verbose: bool = False):\n \"\"\"\n This function is used to clear the memory allocated by PyTorch.\n It does so by calling the garbage collector to release unused GPU memory.\n After clearing the memory, it prints the current amount of memory still allocated by PyTorch (post-clean).\n\n Parameters:\n verbose (bool): Whether to print additional information.\n \"\"\"\n\n gc.collect()\n torch.cuda.empty_cache()\n print(\n f\"torch.cuda.memory_allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f}GB\"\n )\n\n if verbose:\n\n def try_attr(x, a):\n try:\n return getattr(x, a)\n except:\n # amazing that this can cause...\n # (AttributeError, OSError, AssertionError, RuntimeError, ModuleNotFoundError)\n return None\n\n for obj in gc.get_objects():\n if torch.is_tensor(obj) or torch.is_tensor(try_attr(obj, \"data\")):\n print(type(obj), obj.size(), obj.dtype)"
},
{
"identifier": "eval_model_acc",
"path": "weak_to_strong/eval.py",
"snippet": "def eval_model_acc(model: nn.Module, ds: datasets.Dataset, eval_batch_size: int = 16) -> None:\n \"\"\"\n This function evaluates the accuracy of a given model on a given dataset.\n\n Parameters:\n model (nn.Module): The model to be evaluated.\n ds (datasets.Dataset): The dataset on which the model is to be evaluated.\n\n Returns:\n results (list): A list of dictionaries containing the input_ids, ground truth label, predicted label,\n accuracy of prediction, logits and soft label for each example in the dataset.\n \"\"\"\n\n model.eval()\n\n with torch.no_grad():\n results = []\n # for ex in ds:\n for batch in to_batch(ds, eval_batch_size):\n # pad input_ids to common length\n input_ids = torch.nn.utils.rnn.pad_sequence(\n [torch.tensor(ex) for ex in batch[\"input_ids\"]], batch_first=True\n ).to(model.device if hasattr(model, \"device\") else \"cpu\")\n labels = batch[\"soft_label\"]\n # run forward pass\n raw_logits = model(input_ids)\n\n probs = unpack(torch.nn.functional.softmax(raw_logits, dim=-1))\n logits = unpack(raw_logits)\n\n preds = np.argmax(probs, axis=-1)\n labels = np.argmax(labels, axis=-1)\n\n results.extend(\n [\n dict(\n txt=txt,\n input_ids=input_id,\n gt_label=label,\n hard_label=pred,\n acc=label == pred,\n logits=logit,\n soft_label=prob,\n )\n for input_id, txt, label, pred, prob, logit in zip(\n batch[\"input_ids\"], batch[\"txt\"], labels, preds, probs, logits\n )\n ]\n )\n accs = [r[\"acc\"] for r in results]\n print(\"Accuracy:\", np.mean(accs), \"+/-\", np.std(accs) / np.sqrt(len(accs)))\n\n return datasets.Dataset.from_list(results)"
},
{
"identifier": "xent_loss",
"path": "weak_to_strong/loss.py",
"snippet": "class xent_loss(LossFnBase):\n def __call__(\n self, logits: torch.Tensor, labels: torch.Tensor, step_frac: float\n ) -> torch.Tensor:\n \"\"\"\n This function calculates the cross entropy loss between logits and labels.\n\n Parameters:\n logits: The predicted values.\n labels: The actual values.\n step_frac: The fraction of total training steps completed.\n\n Returns:\n The mean of the cross entropy loss.\n \"\"\"\n loss = torch.nn.functional.cross_entropy(logits, labels)\n return loss.mean()"
},
{
"identifier": "TransformerWithHead",
"path": "weak_to_strong/model.py",
"snippet": "class TransformerWithHead(PreTrainedModel):\n \"\"\"\n This class initializes the linear head to zeros\n \"\"\"\n\n def __init__(self, name, linear_probe=False, **kwargs):\n config = AutoConfig.from_pretrained(name, **kwargs)\n super().__init__(config)\n self.num_labels = config.num_labels\n lm = AutoModelForCausalLM.from_pretrained(name, **kwargs)\n self.lm = lm\n self.transformer = lm.transformer\n hidden_size = getattr(config, \"n_embd\", getattr(config, \"hidden_size\", None))\n self.score = torch.nn.Linear(hidden_size, self.num_labels, bias=False).to(\n lm.lm_head.weight.dtype\n )\n torch.nn.init.normal_(self.score.weight, std=0.0)\n self.linear_probe = linear_probe\n\n @classmethod\n def from_pretrained(cls, name, **kwargs):\n return cls(name, **kwargs)\n\n def gradient_checkpointing_enable(self):\n model = self.transformer\n (\n model if hasattr(model, \"save_pretrained\") else model.module\n ).gradient_checkpointing_enable()\n\n def forward(self, input_ids: torch.LongTensor):\n \"\"\"\n Forward pass of the model with a linear head.\n\n Parameters:\n input_ids (torch.LongTensor): Input tensor containing the token ids.\n\n Returns:\n HeadOutput: Output dataclass containing the logits.\n \"\"\"\n input_lens = (input_ids != 0).sum(dim=-1)\n transformer_outputs = self.transformer(input_ids)\n hidden_states = torch.stack(\n [transformer_outputs[0][i, input_lens[i] - 1, :] for i in range(len(input_lens))]\n )\n self.score.to(hidden_states.device)\n if self.linear_probe:\n hidden_states = hidden_states.detach()\n logits = self.score(hidden_states)\n return logits"
}
] | import itertools
import os
import pickle
import time
import datasets
import numpy as np
import torch
import torch_optimizer as toptim
import weak_to_strong.logger as logger
from dataclasses import dataclass
from typing import Callable, Optional
from transformers.modeling_utils import load_sharded_checkpoint
from weak_to_strong.common import clear_mem
from weak_to_strong.eval import eval_model_acc
from weak_to_strong.loss import xent_loss
from weak_to_strong.model import TransformerWithHead | 1,558 |
@dataclass
class ModelConfig:
name: str
default_lr: float
eval_batch_size: int
custom_kwargs: Optional[dict] = None
gradient_checkpointing: bool = False
model_parallel: bool = False
default_optimizer: str = "adam"
def train_model(
model: torch.nn.Module,
ds: datasets.Dataset,
batch_size: int,
lr: float = 1e-5,
|
@dataclass
class ModelConfig:
name: str
default_lr: float
eval_batch_size: int
custom_kwargs: Optional[dict] = None
gradient_checkpointing: bool = False
model_parallel: bool = False
default_optimizer: str = "adam"
def train_model(
model: torch.nn.Module,
ds: datasets.Dataset,
batch_size: int,
lr: float = 1e-5, | loss_fn: Callable = xent_loss, | 2 | 2023-12-13 23:53:13+00:00 | 2k |
SqueezeAILab/LLMCompiler | configs/hotpotqa/configs.py | [
{
"identifier": "OUTPUT_PROMPT",
"path": "configs/hotpotqa/gpt_prompts.py",
"snippet": "OUTPUT_PROMPT = (\n \"Solve a question answering task with interleaving Observation, Thought, and Action steps. Here are some guidelines:\\n\"\n \" - You will be given a Question and some Wikipedia passages, which are the Observations.\\n\"\n \" - Thought needs to reason about the question based on the Observations in 1-2 sentences.\\n\"\n \" - There are cases where the Observations are unclear or irrelevant (in the case wikipedia search was not successful). In such a case where the Observations are unclear, you must make a best guess based on your own knowledge if you don't know the answer. You MUST NEVER say in your thought that you don't know the answer.\\n\\n\"\n \"Action can be only one type:\\n\"\n f\" (1) {JOINNER_FINISH}(answer): returns the answer and finishes the task. \"\n \"Answer should be short and a single item and MUST not be multiple choices. Answer MUST NEVER be 'unclear', 'unknown', 'neither', 'unrelated' or 'undetermined', and otherwise you will be PENALIZED.\\n\"\n \"\\n\"\n \"Here are some examples:\\n\"\n \"\\n\"\n \"Question: Which magazine was started first Arthur's Magazine or First for Women?\\n\"\n \"\\n\"\n \"search(Arthur's Magazine)\\n\"\n \"Observation: Arthur's Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century.\\n\"\n \"search(First for Women (magazine))\\n\"\n \"Observation: First for Women is a woman's magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989.\\n\"\n \"Thought: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First for Women), so Arthur's Magazine was started first.\\n\"\n f\"Action: {JOINNER_FINISH}(Arthur's Magazine)\\n\"\n \"###\\n\"\n \"\\n\"\n \"Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?\\n\"\n \"search(Pavel Urysohn)\\n\"\n \"Observation: Pavel Samuilovich Urysohn (February 3, 1898 - August 17, 1924) was a Soviet mathematician who is best known for his contributions in dimension theory.\\n\"\n \"search(Leonid Levin)\\n\"\n \"Observation: Leonid Anatolievich Levin is a Soviet-American mathematician and computer scientist.\\n\"\n \"Thought: Pavel Urysohn is a mathematician. Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn and Leonid Levin have the same type of work.\\n\"\n f\"Action: {JOINNER_FINISH}(yes)\\n\"\n \"###\\n\"\n \"\\n\"\n)"
},
{
"identifier": "PLANNER_PROMPT",
"path": "configs/hotpotqa/gpt_prompts.py",
"snippet": "PLANNER_PROMPT = (\n \"Question: Which magazine was started first Arthur's Magazine or First for Women?\\n\"\n '1. search(\"Arthur\\'s Magazine\")\\n'\n '2. search(\"First for Women (magazine)\")\\n'\n \"Thought: I can answer the question now.\\n\"\n f\"3. join(){END_OF_PLAN}\\n\"\n \"###\\n\"\n \"\\n\"\n \"Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?\\n\"\n '1. search(\"Pavel Urysohn\")\\n'\n '2. search(\"Leonid Levin\")\\n'\n \"Thought: I can answer the question now.\\n\"\n f\"3. join(){END_OF_PLAN}\\n\"\n \"###\\n\"\n \"\\n\"\n)"
}
] | from configs.hotpotqa.gpt_prompts import OUTPUT_PROMPT, PLANNER_PROMPT | 945 |
CONFIGS = {
"default_model": "gpt-3.5-turbo-1106",
"planner_prompt": PLANNER_PROMPT,
|
CONFIGS = {
"default_model": "gpt-3.5-turbo-1106",
"planner_prompt": PLANNER_PROMPT, | "output_prompt": OUTPUT_PROMPT, | 0 | 2023-12-06 21:12:54+00:00 | 2k |
open-compass/MixtralKit | mixtralkit/layers/attention.py | [
{
"identifier": "ModelArgs",
"path": "mixtralkit/layers/utils.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n ffn_dim_multiplier: Optional[float] = None\n norm_eps: float = 1e-5\n\n max_batch_size: int = 32\n max_seq_len: int = 2048"
},
{
"identifier": "repeat_kv",
"path": "mixtralkit/layers/utils.py",
"snippet": "def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:\n \"\"\"torch.repeat_interleave(x, dim=2, repeats=n_rep)\"\"\"\n bs, slen, n_kv_heads, head_dim = x.shape\n if n_rep == 1:\n return x\n return (\n x[:, :, :, None, :]\n .expand(bs, slen, n_kv_heads, n_rep, head_dim)\n .reshape(bs, slen, n_kv_heads * n_rep, head_dim)\n )"
},
{
"identifier": "apply_rotary_emb",
"path": "mixtralkit/layers/position_embeding.py",
"snippet": "def apply_rotary_emb(\n xq: torch.Tensor,\n xk: torch.Tensor,\n freqs_cis: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Apply rotary embeddings to input tensors using the given frequency tensor.\n\n This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided\n frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor\n is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are\n returned as real tensors.\n\n Args:\n xq (torch.Tensor): Query tensor to apply rotary embeddings.\n xk (torch.Tensor): Key tensor to apply rotary embeddings.\n freqs_cis (torch.Tensor): Precomputed frequency tensor for complex exponentials.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.\n\n \n\n \"\"\"\n xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))\n xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))\n freqs_cis = reshape_for_broadcast(freqs_cis, xq_)\n xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)\n xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)\n return xq_out.type_as(xq), xk_out.type_as(xk)"
}
] | import math
import torch
import torch.nn.functional as F
import fairscale.nn.model_parallel.initialize as fs_init
from typing import Optional, Tuple
from torch import nn
from .utils import ModelArgs, repeat_kv
from .position_embeding import apply_rotary_emb
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
) | 1,488 | # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
| # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
| xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) | 2 | 2023-12-09 15:05:26+00:00 | 2k |
aymenfurter/microagents | gradio_ui/agent_manager.py | [
{
"identifier": "MicroAgentManager",
"path": "agents/microagent_manager.py",
"snippet": "class MicroAgentManager:\n \"\"\"\n Manages the creation and retrieval of micro agents.\n \"\"\"\n\n def __init__(self, api_key: str, max_agents: int = 20, db_filename=\"agents.db\"):\n self.api_key = api_key\n self.max_agents = max_agents\n self.openai_wrapper = OpenAIAPIWrapper(api_key)\n self.agent_persistence = AgentPersistenceManager(db_filename)\n self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)\n self.load_agents()\n\n def cleanup_agents(self):\n \"\"\"Remove all agents with status stopped = True\"\"\"\n self.agent_lifecycle.cleanup_agents()\n \n def load_agents(self):\n \"\"\"Loads agents from the database.\"\"\"\n loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)\n self.agent_lifecycle.agents.extend(loaded_agents)\n logger.info(f\"Loaded {len(loaded_agents)} agents from the database.\")\n\n\n def get_agents(self) -> List[Any]:\n \"\"\"Returns the list of agents.\"\"\"\n self.cleanup_agents()\n return self.agent_lifecycle.agents\n\n def create_agents(self) -> None:\n \"\"\"Creates prime agents and logs the process.\"\"\"\n logger.info(\"Creating agents...\")\n try:\n self.agent_lifecycle.create_prime_agent()\n logger.info(\"Agents created successfully.\")\n except Exception as e:\n logger.exception(f\"Error in creating agents: {e}\")\n raise\n \n def get_or_create_agent(self, purpose: str, depth: int, sample_input: str) -> Any:\n \"\"\"\n Retrieves an existing agent or creates a new one based on the given purpose.\n \"\"\"\n logger.info(f\"Getting or creating agent for purpose: {purpose}\")\n try:\n agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input)\n logger.info(f\"Agent for purpose '{purpose}' retrieved or created.\")\n return agent\n except Exception as e:\n logging.exception(f\"Error in getting or creating agent: {e}\")\n raise\n\n\n def display_agent_status(self):\n \"\"\"Displays the current status of all agents.\"\"\"\n for agent in self.get_agents():\n logger.info(f\"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}\")\n\n def display_active_agent_tree(self):\n \"\"\"Displays a tree view of active agent relationships.\"\"\"\n for agent in self.get_agents():\n if agent.active_agents:\n logger.info(f\"Agent {agent.purpose} is calling: {agent.active_agents}\")\n else:\n logger.info(f\"Agent {agent.purpose} is currently idle.\")"
},
{
"identifier": "MicroAgent",
"path": "agents/microagent.py",
"snippet": "class MicroAgent:\n \"\"\"\n The MicroAgent class encapsulates the behavior of a small, purpose-driven agent\n that interacts with the OpenAI API.\n \"\"\"\n\n def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None) :\n self.dynamic_prompt = initial_prompt\n self.purpose = purpose\n self.purpose_embedding = purpose_embedding \n self.depth = depth\n self.max_depth = max_depth\n self.usage_count = 0\n self.working_agent = bootstrap_agent\n self.agent_lifecycle = agent_lifecycle\n self.openai_wrapper = openai_wrapper\n self.evolve_count = 0\n self.number_of_code_executions = 0 \n self.current_status = None\n self.active_agents = {} \n self.last_input = \"\"\n self.last_output = \"\"\n self.last_conversation = \"\"\n self.stopped = False\n self.is_prime = is_prime\n\n # Initialize components used by the agent\n self.agent_evaluator = AgentEvaluator(self.openai_wrapper)\n self.code_executor = CodeExecution()\n self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)\n self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)\n self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)\n self.response_extractor = ResponseExtraction(self.openai_wrapper)\n self.response_handler = ResponseHandler(self)\n\n def update_status(self, status):\n \"\"\"Update the agent's current status.\"\"\"\n self.check_for_stopped()\n self.current_status = status\n logger.info(f\"Agent {self.purpose} status updated to: {status}\")\n\n def update_active_agents(self, calling_agent, called_agent=None):\n \"\"\"Update the tree view of active agents.\"\"\"\n if called_agent:\n self.active_agents[calling_agent] = called_agent\n else:\n self.active_agents.pop(calling_agent, None)\n logger.info(f\"Active agents updated: {self.active_agents}\")\n\n def set_agent_as_working(self):\n \"\"\"Set the agent as a working agent.\"\"\"\n self.working_agent = True\n self.agent_lifecycle.save_agent(self)\n logger.info(f\"Agent {self.purpose} set as working agent.\")\n\n def is_working_agent(self):\n return self.working_agent\n\n def set_agent_deleted(self): \n \"\"\"Set the agent as deleted.\"\"\"\n self.working_agent = False\n self.current_status = \"❌ Deleted\"\n self.stopped = True\n logger.info(f\"Agent {self.purpose} set as deleted.\")\n\n def check_for_stopped(self):\n \"\"\"Check if the agent has been stopped.\"\"\"\n if self.stopped:\n raise AgentStoppedException(\"Agent stopped.\")\n\n def respond(self, input_text, evolve_count=0):\n \"\"\"\n Generate a response to the given input text.\n \"\"\"\n return self.response_handler.respond(input_text, evolve_count)"
}
] | import logging
from typing import Any, List
from agents.microagent_manager import MicroAgentManager
from agents.microagent import MicroAgent | 1,527 |
logger = logging.getLogger(__name__)
class GradioAgentManager:
"""
A wrapper class for interacting with MicroAgentManager in a Gradio interface.
"""
def __init__(self, api_key: str):
self.manager = MicroAgentManager(api_key)
self.manager.create_agents()
def get_agents_info(self) -> List[dict]:
"""
Retrieve information about all agents for display in Gradio.
"""
agents = self.manager.get_agents()
return [self.format_agent_info(agent) for agent in agents]
|
logger = logging.getLogger(__name__)
class GradioAgentManager:
"""
A wrapper class for interacting with MicroAgentManager in a Gradio interface.
"""
def __init__(self, api_key: str):
self.manager = MicroAgentManager(api_key)
self.manager.create_agents()
def get_agents_info(self) -> List[dict]:
"""
Retrieve information about all agents for display in Gradio.
"""
agents = self.manager.get_agents()
return [self.format_agent_info(agent) for agent in agents]
| def format_agent_info(self, agent: MicroAgent) -> dict: | 1 | 2023-12-11 08:17:09+00:00 | 2k |
bytedance/ImageDream | extern/ldm_zero123/thirdp/psp/model_irse.py | [
{
"identifier": "Flatten",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "class Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)"
},
{
"identifier": "bottleneck_IR",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth),\n )\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut"
},
{
"identifier": "bottleneck_IR_SE",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR_SE(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR_SE, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth),\n )\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n SEModule(depth, 16),\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut"
},
{
"identifier": "get_blocks",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "def get_blocks(num_layers):\n if num_layers == 50:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=4),\n get_block(in_channel=128, depth=256, num_units=14),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n elif num_layers == 100:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=13),\n get_block(in_channel=128, depth=256, num_units=30),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n elif num_layers == 152:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=8),\n get_block(in_channel=128, depth=256, num_units=36),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n else:\n raise ValueError(\n \"Invalid number of layers: {}. Must be one of [50, 100, 152]\".format(\n num_layers\n )\n )\n return blocks"
},
{
"identifier": "l2_norm",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "def l2_norm(input, axis=1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n return output"
}
] | from torch.nn import (
BatchNorm1d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
PReLU,
Sequential,
)
from extern.ldm_zero123.thirdp.psp.helpers import (
Flatten,
bottleneck_IR,
bottleneck_IR_SE,
get_blocks,
l2_norm,
) | 1,205 | # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
| # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir": | unit_module = bottleneck_IR | 1 | 2023-12-13 21:09:37+00:00 | 2k |
TencentARC/MotionCtrl | lvdm/modules/attention_temporal.py | [
{
"identifier": "checkpoint",
"path": "lvdm/common.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n try:\n return ckpt(func, *inputs)\n except:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "exists",
"path": "lvdm/common.py",
"snippet": "def exists(val):\n return val is not None"
},
{
"identifier": "uniq",
"path": "lvdm/common.py",
"snippet": "def uniq(arr):\n return{el: True for el in arr}.keys()"
},
{
"identifier": "default",
"path": "lvdm/common.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "max_neg_value",
"path": "lvdm/common.py",
"snippet": "def max_neg_value(t):\n return -torch.finfo(t.dtype).max"
},
{
"identifier": "init_",
"path": "lvdm/common.py",
"snippet": "def init_(tensor):\n dim = tensor.shape[-1]\n std = 1 / math.sqrt(dim)\n tensor.uniform_(-std, std)\n return tensor"
},
{
"identifier": "conv_nd",
"path": "lvdm/basics.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "lvdm/basics.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "lvdm/basics.py",
"snippet": "def normalization(channels, num_groups=32):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNormSpecific(num_groups, channels)"
}
] | import math
import torch
import torch as th
import torch.nn.functional as F
import xformers
import xformers.ops
from inspect import isfunction
from torch import nn, einsum
from einops import rearrange, repeat
from lvdm.common import (
checkpoint,
exists,
uniq,
default,
max_neg_value,
init_
)
from lvdm.basics import (
conv_nd,
zero_module,
normalization
) | 842 |
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
|
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult) | dim_out = default(dim_out, dim) | 3 | 2023-12-06 07:27:45+00:00 | 2k |
s-casci/tinyzero | tictactoe/one_dim/eval.py | [
{
"identifier": "LinearNetwork",
"path": "models.py",
"snippet": "class LinearNetwork(nn.Module):\n def __init__(self, input_shape, action_space, first_layer_size=512, second_layer_size=256):\n super().__init__()\n self.first_layer = nn.Linear(input_shape[0], first_layer_size)\n self.second_layer = nn.Linear(first_layer_size, second_layer_size)\n self.value_head = nn.Linear(second_layer_size, 1)\n self.policy_head = nn.Linear(second_layer_size, action_space)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.to(self.device)\n\n def __call__(self, observations):\n self.train()\n x = F.relu(self.first_layer(observations))\n x = F.relu(self.second_layer(x))\n value = F.tanh(self.value_head(x))\n log_policy = F.log_softmax(self.policy_head(x), dim=-1)\n return value, log_policy\n\n def value_forward(self, observation):\n self.eval()\n with torch.no_grad():\n x = F.relu(self.first_layer(observation))\n x = F.relu(self.second_layer(x))\n value = F.tanh(self.value_head(x))\n return value\n\n def policy_forward(self, observation):\n self.eval()\n with torch.no_grad():\n x = F.relu(self.first_layer(observation))\n x = F.relu(self.second_layer(x))\n log_policy = F.softmax(self.policy_head(x), dim=-1)\n return log_policy"
},
{
"identifier": "AlphaZeroAgent",
"path": "agents.py",
"snippet": "class AlphaZeroAgent:\n def __init__(self, model):\n self.model = model\n\n def value_fn(self, game):\n observation = torch.tensor(game.to_observation(), device=self.model.device, requires_grad=False)\n value = self.model.value_forward(observation)\n return value.item()\n\n def policy_fn(self, game):\n observation = torch.tensor(game.to_observation(), device=self.model.device, requires_grad=False)\n policy = self.model.policy_forward(observation)\n return policy.cpu().numpy()"
},
{
"identifier": "ClassicMCTSAgent",
"path": "agents.py",
"snippet": "class ClassicMCTSAgent:\n @staticmethod\n def value_fn(game):\n game = copy.deepcopy(game)\n while first_person_result := game.get_first_person_result() is None:\n game.step(np.random.choice(game.get_legal_actions()))\n return first_person_result\n\n @staticmethod\n def policy_fn(game):\n return np.ones(game.action_space) / game.action_space"
},
{
"identifier": "pit",
"path": "mcts.py",
"snippet": "def pit(game, agent1, agent2, agent1_play_kwargs, agent2_play_kwargs):\n current_agent, other_agent = agent1, agent2\n current_agent_play_kwargs, other_agent_play_kwargs = agent1_play_kwargs, agent2_play_kwargs\n while (result := game.get_result()) is None:\n action = play(game, current_agent, **current_agent_play_kwargs)\n game.step(action)\n current_agent, other_agent = other_agent, current_agent\n current_agent_play_kwargs, other_agent_play_kwargs = other_agent_play_kwargs, current_agent_play_kwargs\n return result"
}
] | from game import TicTacToe
from train import OUT_DIR, SEARCH_ITERATIONS
from tqdm import tqdm
from models import LinearNetwork # noqa: E402
from agents import AlphaZeroAgent, ClassicMCTSAgent # noqa: E402
from mcts import pit # noqa: E402
import torch
import os
import sys | 948 |
sys.path.append(os.getcwd())
EVAL_GAMES = 100
if __name__ == "__main__":
game = TicTacToe()
model = LinearNetwork(game.observation_shape, game.action_space)
model.load_state_dict(torch.load(f"{OUT_DIR}/model.pth"))
agent = AlphaZeroAgent(model)
agent_play_kwargs = {"search_iterations": SEARCH_ITERATIONS * 2, "c_puct": 1.0, "dirichlet_alpha": None}
print(f"Playing {EVAL_GAMES} games against itself")
results = {0: 0, 1: 0, -1: 0}
for _ in tqdm(range(EVAL_GAMES)):
game.reset()
|
sys.path.append(os.getcwd())
EVAL_GAMES = 100
if __name__ == "__main__":
game = TicTacToe()
model = LinearNetwork(game.observation_shape, game.action_space)
model.load_state_dict(torch.load(f"{OUT_DIR}/model.pth"))
agent = AlphaZeroAgent(model)
agent_play_kwargs = {"search_iterations": SEARCH_ITERATIONS * 2, "c_puct": 1.0, "dirichlet_alpha": None}
print(f"Playing {EVAL_GAMES} games against itself")
results = {0: 0, 1: 0, -1: 0}
for _ in tqdm(range(EVAL_GAMES)):
game.reset() | result = pit( | 3 | 2023-12-14 11:36:50+00:00 | 2k |
facebookresearch/PurpleLlama | CybersecurityBenchmarks/insecure_code_detector/tests/test_python_insecure_code_detector.py | [
{
"identifier": "Language",
"path": "CybersecurityBenchmarks/insecure_code_detector/languages.py",
"snippet": "class Language(str, enum.Enum):\n C = \"c\"\n CPP = \"cpp\"\n CSHARP = \"csharp\"\n HACK = \"hack\"\n JAVA = \"java\"\n JAVASCRIPT = \"javascript\"\n KOTLIN = \"kotlin\"\n OBJECTIVE_C = \"objective_c\"\n OBJECTIVE_CPP = \"objective_cpp\"\n PHP = \"php\"\n PYTHON = \"python\"\n RUBY = \"ruby\"\n RUST = \"rust\"\n SWIFT = \"swift\"\n XML = \"xml\"\n\n def __str__(self) -> str:\n return self.name.lower()"
},
{
"identifier": "InsecureCodeDetectorTest",
"path": "CybersecurityBenchmarks/insecure_code_detector/tests/insecure_code_detector_test.py",
"snippet": "class InsecureCodeDetectorTest(unittest.IsolatedAsyncioTestCase, ABC):\n @unittest.skipUnless(oss.ENABLE_REGEX_ANALYZER, \"Regex analyzer not enabled\")\n async def run_regex_tests(\n self, language: Language, tests: list[tuple[str, int]]\n ) -> None:\n for test_case in tests:\n test_input, expected_count = test_case\n result = await insecure_code_detector.analyze(language, test_input)\n print(test_case)\n print(result)\n self.assertEqual(len(result), expected_count)\n\n @unittest.skipUnless(oss.ENABLE_WEGGLI, \"Weggli analyzer not enabled\")\n async def run_weggli_tests(\n self, language: Language, tests: list[tuple[str, int]]\n ) -> None:\n for test_case in tests:\n test_input, expected_count = test_case\n result = await insecure_code_detector.analyze(\n language, test_input, \"void foo(){\\n\", \"\\n}\"\n )\n self.assertEqual(\n len(result), expected_count, \"Test failed: {}\".format(test_input)\n )\n\n @unittest.skipUnless(oss.ENABLE_SEMGREP, \"Semgrep analyzer not enabled\")\n async def run_semgrep_tests(\n self, language: Language, tests: list[tuple[str, int]]\n ) -> None:\n for test_case in tests:\n test_input, expected_count = test_case\n result = await insecure_code_detector.analyze(language, test_input)\n self.assertEqual(\n len(result), expected_count, \"Test failed: {}\".format(test_input)\n )"
}
] | from ..languages import Language
from .insecure_code_detector_test import InsecureCodeDetectorTest | 716 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
PYTHON_TEST_CASES = [
("import os", 0),
("import hashlib", 0),
("from hashlib import sha256", 0),
("hashlib.new('sha1')", 1),
("hashlib.md5()", 1),
]
| # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
PYTHON_TEST_CASES = [
("import os", 0),
("import hashlib", 0),
("from hashlib import sha256", 0),
("hashlib.new('sha1')", 1),
("hashlib.md5()", 1),
]
| class TestPythonInsecureCodeDetector(InsecureCodeDetectorTest): | 1 | 2023-12-06 21:29:41+00:00 | 2k |
allenai/unified-io-2 | t5x/examples/unified_io/modality_processing.py | [
{
"identifier": "AudioEncoder",
"path": "t5x/examples/unified_io/audio_encoder.py",
"snippet": "class AudioEncoder(nn.Module):\n \"\"\"Encodes raw audio spectrograms as features\"\"\"\n config: Union[ImageVitFeatureConfig, AudioVitFeatureConfig]\n\n def setup(self):\n cfg = self.config\n # `vision_transformer` is a misnomer but we leave to keep checkpoint compatibility\n self.vision_transformer = AudioTransformer(config=cfg)\n\n @nn.compact\n def __call__(self, x, mask, pos_ids, *, enable_dropout=True, patch_num=(16, 8)):\n cfg = self.config\n\n if cfg.transpose_input:\n pos_ids = transpose_input(pos_ids, cfg.default_input_size, cfg.patch_size)\n x, x1 = self.vision_transformer(x, mask, pos_ids, enable_dropout=enable_dropout)\n \n return x, x1"
},
{
"identifier": "ImageEncoder",
"path": "t5x/examples/unified_io/image_encoder.py",
"snippet": "class ImageEncoder(nn.Module):\n \"\"\"Builds features from an image\"\"\"\n config: Union[ImageVitFeatureConfig, AudioVitFeatureConfig]\n\n def setup(self):\n cfg = self.config\n self.vision_transformer = VisionTransformer(config=cfg, param_dict=None)\n\n @nn.compact\n def __call__(self, x, mask, pos_ids, *, enable_dropout: bool = True, patch_num: Any = (16, 16)):\n x, x1 = self.vision_transformer(x, mask, pos_ids, enable_dropout=enable_dropout, patch_num=patch_num)\n return x, x1"
}
] | from collections import OrderedDict
from typing import Mapping
from flax import traverse_util
from seqio import TaskRegistry, FeatureConverter
from t5x.examples.unified_io.audio_encoder import AudioEncoder
from t5x.examples.unified_io.image_encoder import ImageEncoder
from t5x.examples.unified_io.input_modalities import *
from t5x.examples.unified_io.target_modalities import * | 890 | """Code for handling modalities"""
@gin.configurable
def get_target_modalities(
target_modality=['text', 'image', 'audio'],
image_vae_config: ImageViTVQGANConfig=VAEConfig(),
audio_vae_config: AudioViTVQGANConfig=AudioViTVQGANConfig(),
) -> Dict[str, ModalityEncoder]:
"""Return the encoders to use for target modalities"""
out = {}
if 'text' in target_modality:
out['text'] = TargetTextEncoder()
if 'image' in target_modality:
out['image'] = TargetImageDVAEEmbedder(image_vae_config)
if 'audio' in target_modality:
out['audio'] = TargetAudioDVAEEmbedder(audio_vae_config)
return out
@gin.configurable
def get_input_modalities(
input_modality=('text', 'image', 'image_history', 'audio', 'audio_history'),
image_vit_cfg: ImageVitFeatureConfig=ImageVitFeatureConfig(),
audio_vit_cfg: AudioVitFeatureConfig=AudioVitFeatureConfig(),
image_history_cfg: ImageResamplerConfig=ImageResamplerConfig(),
audio_history_cfg: AudioResamplerConfig=AudioResamplerConfig(),
max_img_history=None,
max_audio_history=None,
use_image_vit = False,
use_audio_vit = False,
freeze_vit=False,
use_image_history_vit = False,
use_audio_history_vit = False,
) -> Dict[str, ModalityEncoder]:
"""Returns the ModalityEncoder for the input modalities"""
out = dict()
if 'text' in input_modality:
out["text"] = InputTextEncoder()
image_encoder = None
if use_image_vit or use_image_history_vit:
image_encoder = ImageEncoder(image_vit_cfg)
audio_encoder = None
if use_audio_vit or use_audio_history_vit:
| """Code for handling modalities"""
@gin.configurable
def get_target_modalities(
target_modality=['text', 'image', 'audio'],
image_vae_config: ImageViTVQGANConfig=VAEConfig(),
audio_vae_config: AudioViTVQGANConfig=AudioViTVQGANConfig(),
) -> Dict[str, ModalityEncoder]:
"""Return the encoders to use for target modalities"""
out = {}
if 'text' in target_modality:
out['text'] = TargetTextEncoder()
if 'image' in target_modality:
out['image'] = TargetImageDVAEEmbedder(image_vae_config)
if 'audio' in target_modality:
out['audio'] = TargetAudioDVAEEmbedder(audio_vae_config)
return out
@gin.configurable
def get_input_modalities(
input_modality=('text', 'image', 'image_history', 'audio', 'audio_history'),
image_vit_cfg: ImageVitFeatureConfig=ImageVitFeatureConfig(),
audio_vit_cfg: AudioVitFeatureConfig=AudioVitFeatureConfig(),
image_history_cfg: ImageResamplerConfig=ImageResamplerConfig(),
audio_history_cfg: AudioResamplerConfig=AudioResamplerConfig(),
max_img_history=None,
max_audio_history=None,
use_image_vit = False,
use_audio_vit = False,
freeze_vit=False,
use_image_history_vit = False,
use_audio_history_vit = False,
) -> Dict[str, ModalityEncoder]:
"""Returns the ModalityEncoder for the input modalities"""
out = dict()
if 'text' in input_modality:
out["text"] = InputTextEncoder()
image_encoder = None
if use_image_vit or use_image_history_vit:
image_encoder = ImageEncoder(image_vit_cfg)
audio_encoder = None
if use_audio_vit or use_audio_history_vit: | audio_encoder = AudioEncoder(audio_vit_cfg) | 0 | 2023-12-12 20:23:33+00:00 | 2k |
zju3dv/EasyVolcap | scripts/gaussian/merge_pcd.py | [
{
"identifier": "load_pts",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def load_pts(filename: str):\n from pyntcloud import PyntCloud\n cloud = PyntCloud.from_file(filename)\n verts = cloud.xyz\n if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:\n r = np.asarray(cloud.points['red'])\n g = np.asarray(cloud.points['green'])\n b = np.asarray(cloud.points['blue'])\n colors = np.stack([r, g, b], axis=-1) / 255\n elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:\n r = np.asarray(cloud.points['r'])\n g = np.asarray(cloud.points['g'])\n b = np.asarray(cloud.points['b'])\n colors = np.stack([r, g, b], axis=-1) / 255\n else:\n colors = None\n\n if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:\n nx = np.asarray(cloud.points['nx'])\n ny = np.asarray(cloud.points['ny'])\n nz = np.asarray(cloud.points['nz'])\n norms = np.stack([nx, ny, nz], axis=-1)\n else:\n norms = None\n\n if 'alpha' in cloud.points:\n cloud.points['alpha'] = cloud.points['alpha'] / 255\n\n reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']\n scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added\n return verts, colors, norms, scalars"
},
{
"identifier": "export_pts",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def export_pts(pts: torch.Tensor, color: torch.Tensor = None, normal: torch.Tensor = None, scalars: dotdict = dotdict(), filename: str = \"default.ply\"):\n from pandas import DataFrame\n from pyntcloud import PyntCloud\n\n data = dotdict()\n pts = to_numpy(pts) # always blocking?\n pts = pts.reshape(-1, 3)\n data.x = pts[:, 0].astype(np.float32)\n data.y = pts[:, 1].astype(np.float32)\n data.z = pts[:, 2].astype(np.float32)\n\n if color is not None:\n color = to_numpy(color)\n color = color.reshape(-1, 3)\n data.red = (color[:, 0] * 255).astype(np.uint8)\n data.green = (color[:, 1] * 255).astype(np.uint8)\n data.blue = (color[:, 2] * 255).astype(np.uint8)\n else:\n data.red = (pts[:, 0] * 255).astype(np.uint8)\n data.green = (pts[:, 1] * 255).astype(np.uint8)\n data.blue = (pts[:, 2] * 255).astype(np.uint8)\n\n if 'alpha' in scalars:\n data.alpha = (scalars.alpha * 255).astype(np.uint8)\n\n if normal is not None:\n normal = to_numpy(normal)\n normal = normal / (np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-13)\n normal = normal.reshape(-1, 3)\n data.nx = normal[:, 0].astype(np.float32)\n data.ny = normal[:, 1].astype(np.float32)\n data.nz = normal[:, 2].astype(np.float32)\n\n if scalars is not None:\n scalars = to_numpy(scalars)\n for k, v in scalars.items():\n v = v.reshape(-1, 1)\n data[k] = v[:, 0]\n\n df = DataFrame(data)\n cloud = PyntCloud(df) # construct the data\n dirname = os.path.dirname(filename)\n if dirname: os.makedirs(dirname, exist_ok=True)\n return cloud.to_file(filename)"
}
] | from easyvolcap.utils.console_utils import *
from easyvolcap.utils.data_utils import load_pts, export_pts
from os.path import join
import argparse
import numpy as np | 1,196 | """
This script will load and convert a .ply visual hull to a points3D file
"""
@catch_throw
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--data_root', default='data/enerf_outdoor/actor2_3')
parser.add_argument('--vhulls_dir', default='merged')
parser.add_argument('--vhulls_dirs', default=['vhulls', 'bkgd/boost'])
parser.add_argument('--pcd_file', default='000000.ply')
args = parser.parse_args()
vs = []
out = join(args.data_root, args.vhulls_dir, args.pcd_file)
for vhull_dir in args.vhulls_dirs:
vhull = join(args.data_root, vhull_dir, args.pcd_file)
| """
This script will load and convert a .ply visual hull to a points3D file
"""
@catch_throw
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--data_root', default='data/enerf_outdoor/actor2_3')
parser.add_argument('--vhulls_dir', default='merged')
parser.add_argument('--vhulls_dirs', default=['vhulls', 'bkgd/boost'])
parser.add_argument('--pcd_file', default='000000.ply')
args = parser.parse_args()
vs = []
out = join(args.data_root, args.vhulls_dir, args.pcd_file)
for vhull_dir in args.vhulls_dirs:
vhull = join(args.data_root, vhull_dir, args.pcd_file) | v, c, n, s = load_pts(vhull) | 0 | 2023-12-07 08:53:42+00:00 | 2k |
minghanqin/LangSplat | scene/cameras.py | [
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "getProjectionMatrix",
"path": "utils/graphics_utils.py",
"snippet": "def getProjectionMatrix(znear, zfar, fovX, fovY):\n tanHalfFovY = math.tan((fovY / 2))\n tanHalfFovX = math.tan((fovX / 2))\n\n top = tanHalfFovY * znear\n bottom = -top\n right = tanHalfFovX * znear\n left = -right\n\n P = torch.zeros(4, 4)\n\n z_sign = 1.0\n\n P[0, 0] = 2.0 * znear / (right - left)\n P[1, 1] = 2.0 * znear / (top - bottom)\n P[0, 2] = (right + left) / (right - left)\n P[1, 2] = (top + bottom) / (top - bottom)\n P[3, 2] = z_sign\n P[2, 2] = z_sign * zfar / (zfar - znear)\n P[2, 3] = -(zfar * znear) / (zfar - znear)\n return P"
}
] | import os
import pickle
import torch
import numpy as np
from torch import nn
from utils.graphics_utils import getWorld2View2, getProjectionMatrix | 922 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda() | self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() | 1 | 2023-12-11 06:33:35+00:00 | 2k |
SciPhi-AI/agent-search | agent_search/search/base.py | [
{
"identifier": "AgentSearchResult",
"path": "agent_search/core/search_types.py",
"snippet": "class AgentSearchResult(BaseModel):\n \"\"\"A dataclass to store the search result\"\"\"\n\n score: float\n url: str\n title: Optional[str]\n dataset: Optional[str]\n # TODO - Add dict(str, [str, float, ..]) validation\n metadata: Any\n text: str\n\n def __init__(self, **data: Any):\n super().__init__(**data)\n if self.title and self.title == self.text[0 : len(self.title)]:\n self.text = self.text[len(self.title) :]\n self.text = self.text.strip()\n\n def to_string_dict(self) -> dict:\n \"\"\"Returns a dictionary representation with all values as strings.\"\"\"\n return {\n \"score\": str(self.score),\n \"url\": self.url,\n \"title\": self.title,\n \"dataset\": self.dataset,\n \"metadata\": self.metadata,\n \"text\": self.text,\n }\n\n @classmethod\n def from_dict(cls, data: dict):\n return cls(**data)"
},
{
"identifier": "cosine_similarity",
"path": "agent_search/core/utils.py",
"snippet": "def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:\n \"\"\"Compute the cosine similarity between two vectors.\"\"\"\n dot_product = np.dot(v1, v2)\n norm_v1 = np.linalg.norm(v1)\n norm_v2 = np.linalg.norm(v2)\n return dot_product / (norm_v1 * norm_v2)"
},
{
"identifier": "get_data_path",
"path": "agent_search/core/utils.py",
"snippet": "def get_data_path() -> str:\n return os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"..\",\n \"data\",\n )"
},
{
"identifier": "load_config",
"path": "agent_search/core/utils.py",
"snippet": "def load_config(config_dir: Optional[str] = None) -> configparser.ConfigParser:\n \"\"\"Load the configuration file.\"\"\"\n config = configparser.ConfigParser()\n if not config_dir:\n config_dir = get_data_path()\n config.read(os.path.join(config_dir, \"config.ini\"))\n return config"
}
] | import csv
import json
import logging
import os
import numpy as np
import psycopg2
import psycopg2
from typing import List
from qdrant_client import QdrantClient
from transformers import AutoModel
from agent_search.core import AgentSearchResult
from agent_search.core.utils import (
cosine_similarity,
get_data_path,
load_config,
) | 650 |
logger = logging.getLogger(__name__)
class WebSearchEngine:
"""A simple search client for the OpenSearch collection"""
def __init__(
self,
):
try:
except ImportError as e:
raise ImportError(
f"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance."
)
# Load config
|
logger = logging.getLogger(__name__)
class WebSearchEngine:
"""A simple search client for the OpenSearch collection"""
def __init__(
self,
):
try:
except ImportError as e:
raise ImportError(
f"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance."
)
# Load config | self.config = load_config()["agent_search"] | 3 | 2023-12-11 17:41:03+00:00 | 2k |
yohanshin/WHAM | lib/data/_dataset.py | [
{
"identifier": "constants",
"path": "configs/constants.py",
"snippet": "IMG_FEAT_DIM = {\n 'resnet': 2048,\n 'vit': 1024\n}\nN_JOINTS = 17\n PARSED_DATA = f'{root}/parsed_data'\n THREEDPW_PTH = f'{root}/3DPW'\n RICH_PTH = f'{root}/RICH'\n EMDB_PTH = f'{root}/EMDB'\n NUM_JOINTS = N_JOINTS\n H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\n H36M_TO_J14 = H36M_TO_J17[:14]\n J17_TO_H36M = [14, 3, 4, 5, 2, 1, 0, 15, 12, 16, 13, 9, 10, 11, 8, 7, 6]\n COCO_AUG_DICT = f'{root}/body_models/coco_aug_dict.pth'\n TREE = [[5, 6], 0, 0, 1, 2, -1, -1, 5, 6, 7, 8, -1, -1, 11, 12, 13, 14, 15, 15, 15, 16, 16, 16]\n S_BIAS = 1e-1\n S_JITTERING = 5e-2\n S_PEAK = 3e-1\n S_PEAK_MASK = 5e-3\n S_MASK = 0.03\n MAIN_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] # reduced_joints\n FLDR = f'{root}/body_models/smpl/'\n SMPLX2SMPL = f'{root}/body_models/smplx2smpl.pkl'\n FACES = f'{root}/body_models/smpl_faces.npy'\n MEAN_PARAMS = f'{root}/body_models/smpl_mean_params.npz'\n JOINTS_REGRESSOR_WHAM = f'{root}/body_models/J_regressor_wham.npy'\n JOINTS_REGRESSOR_H36M = f'{root}/body_models/J_regressor_h36m.npy'\n JOINTS_REGRESSOR_EXTRA = f'{root}/body_models/J_regressor_extra.npy'\n JOINTS_REGRESSOR_FEET = f'{root}/body_models/J_regressor_feet.npy'\n PARENTS = torch.tensor([\n -1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21])\nclass PATHS:\nclass KEYPOINTS:\nclass BMODEL:"
},
{
"identifier": "Normalizer",
"path": "lib/data/normalizer.py",
"snippet": "class Normalizer:\n def __init__(self, cfg):\n pass\n \n def __call__(self, kp_2d, res, cam_intrinsics, patch_width=224, patch_height=224, bbox=None, mask=None):\n if bbox is None:\n bbox = compute_bbox_from_keypoints(kp_2d, do_augment=True, mask=mask)\n \n out_kp_2d = self.bbox_normalization(kp_2d, bbox, res, patch_width, patch_height)\n return out_kp_2d, bbox\n \n def bbox_normalization(self, kp_2d, bbox, res, patch_width, patch_height):\n to_torch = False\n if isinstance(kp_2d, torch.Tensor):\n to_torch = True\n kp_2d = kp_2d.numpy()\n bbox = bbox.numpy()\n \n out_kp_2d = np.zeros_like(kp_2d)\n for idx in range(len(out_kp_2d)):\n out_kp_2d[idx] = transform_keypoints(kp_2d[idx], bbox[idx][:3], patch_width, patch_height)[0]\n out_kp_2d[idx] = normalize_keypoints_to_patch(out_kp_2d[idx], patch_width)\n \n if to_torch:\n out_kp_2d = torch.from_numpy(out_kp_2d)\n bbox = torch.from_numpy(bbox)\n \n centers = normalize_keypoints_to_image(bbox[:, :2].unsqueeze(1), res).squeeze(1)\n scale = bbox[:, 2:] * 200 / res.max()\n location = torch.cat((centers, scale), dim=-1)\n \n out_kp_2d = out_kp_2d.reshape(out_kp_2d.shape[0], -1)\n out_kp_2d = torch.cat((out_kp_2d, location), dim=-1)\n return out_kp_2d"
},
{
"identifier": "transform",
"path": "lib/utils/imutils.py",
"snippet": "def transform(pt, center, scale, res, invert=0, rot=0):\n \"\"\"Transform pixel location to different reference.\"\"\"\n t = get_transform(center, scale, res, rot=rot)\n if invert:\n t = np.linalg.inv(t)\n new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T\n new_pt = np.dot(t, new_pt)\n return np.array([round(new_pt[0]), round(new_pt[1])], dtype=int) + 1"
}
] | import torch
import numpy as np
from skimage.util.shape import view_as_windows
from configs import constants as _C
from .normalizer import Normalizer
from lib.utils.imutils import transform | 1,499 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, cfg, training=True):
super(BaseDataset, self).__init__()
self.n_joints = _C.KEYPOINTS.NUM_JOINTS
self.epoch = 0
self.n_frames = cfg.DATASET.SEQLEN + 1
self.training = training
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, cfg, training=True):
super(BaseDataset, self).__init__()
self.n_joints = _C.KEYPOINTS.NUM_JOINTS
self.epoch = 0
self.n_frames = cfg.DATASET.SEQLEN + 1
self.training = training | self.keypoints_normalizer = Normalizer(cfg) | 1 | 2023-12-08 09:17:54+00:00 | 2k |
octo-models/octo | octo/data/oxe/oxe_standardization_transforms.py | [
{
"identifier": "binarize_gripper_actions",
"path": "octo/data/utils/data_utils.py",
"snippet": "def binarize_gripper_actions(actions: tf.Tensor) -> tf.Tensor:\n \"\"\"Converts gripper actions from continous to binary values (0 and 1).\n\n We exploit that fact that most of the time, the gripper is fully open (near 1.0) or fully closed (near\n 0.0). As it transitions between the two, it sometimes passes through a few intermediate values. We relabel\n those intermediate values based on the state that is reached _after_ those intermediate values.\n\n In the edge case that the trajectory ends with an intermediate value, we give up on binarizing and relabel\n that chunk of intermediate values as the last action in the trajectory.\n\n The scan implements the following code:\n\n new_actions = np.empty_like(actions)\n carry = actions[-1]\n for i in reversed(range(actions.shape[0])):\n if in_between_mask[i]:\n carry = carry\n else:\n carry = float(open_mask[i])\n new_actions[i] = carry\n \"\"\"\n open_mask = actions > 0.95\n closed_mask = actions < 0.05\n in_between_mask = tf.logical_not(tf.logical_or(open_mask, closed_mask))\n\n is_open_float = tf.cast(open_mask, tf.float32)\n\n def scan_fn(carry, i):\n return tf.cond(\n in_between_mask[i],\n lambda: tf.cast(carry, tf.float32),\n lambda: is_open_float[i],\n )\n\n new_actions = tf.scan(\n scan_fn, tf.range(tf.shape(actions)[0]), actions[-1], reverse=True\n )\n return new_actions"
},
{
"identifier": "invert_gripper_actions",
"path": "octo/data/utils/data_utils.py",
"snippet": "def invert_gripper_actions(actions: tf.Tensor):\n return 1 - actions"
},
{
"identifier": "rel2abs_gripper_actions",
"path": "octo/data/utils/data_utils.py",
"snippet": "def rel2abs_gripper_actions(actions: tf.Tensor):\n \"\"\"\n Converts relative gripper actions (+1 for closing, -1 for opening) to absolute gripper actions\n (0 for closed, 1 for open). Assumes that the first relative gripper is not redundant\n (i.e. close when already closed).\n \"\"\"\n opening_mask = actions < -0.1\n closing_mask = actions > 0.1\n\n # -1 for closing, 1 for opening, 0 for no change\n thresholded_actions = tf.where(opening_mask, 1, tf.where(closing_mask, -1, 0))\n\n def scan_fn(carry, i):\n return tf.cond(\n thresholded_actions[i] == 0,\n lambda: carry,\n lambda: thresholded_actions[i],\n )\n\n # if no relative grasp, assumes open for whole trajectory\n start = -1 * thresholded_actions[tf.argmax(thresholded_actions != 0, axis=0)]\n start = tf.cond(start == 0, lambda: 1, lambda: start)\n # -1 for closed, 1 for open\n new_actions = tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), start)\n\n new_actions = tf.cast(new_actions, tf.float32) / 2 + 0.5\n return new_actions"
},
{
"identifier": "relabel_actions",
"path": "octo/data/utils/data_utils.py",
"snippet": "def relabel_actions(traj: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Relabels the actions to use the reached proprio instead. Discards the last timestep of the\n trajectory (since we don't have a next state to compute the action.)\n \"\"\"\n # relabel the first 6 action dims (xyz position, xyz rotation) using the reached proprio\n movement_actions = (\n traj[\"observation\"][\"state\"][1:, :6] - traj[\"observation\"][\"state\"][:-1, :6]\n )\n\n # discard the last timestep of the trajectory\n traj_truncated = tf.nest.map_structure(lambda x: x[:-1], traj)\n\n # recombine to get full actions\n traj_truncated[\"action\"] = tf.concat(\n [movement_actions, traj[\"action\"][:-1, -1:]],\n axis=1,\n )\n\n return traj_truncated"
}
] | from typing import Any, Dict
from octo.data.utils.data_utils import (
binarize_gripper_actions,
invert_gripper_actions,
rel2abs_gripper_actions,
relabel_actions,
)
import tensorflow as tf
import tensorflow_graphics.geometry.transformation as tft
import tensorflow_graphics.geometry.transformation as tft
import tensorflow_graphics.geometry.transformation as tft | 1,251 | """Open X-Embodiment Dataset Transforms
input: dict of features, each is batched, i.e. has leading time dimension
expected output:
step = {
'observation': {
<image_keys, depth_image_keys>
state in chosen state representation
},
'action': action in chosen action representation,
'language_instruction': str,
}
"""
def bridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# NOTE: this is not actually the official OXE copy of bridge, it is our own more up-to-date copy that you
# can find at https://rail.eecs.berkeley.edu/datasets/bridge_release/data/tfds/
trajectory["action"] = tf.concat(
[
trajectory["action"][:, :6],
| """Open X-Embodiment Dataset Transforms
input: dict of features, each is batched, i.e. has leading time dimension
expected output:
step = {
'observation': {
<image_keys, depth_image_keys>
state in chosen state representation
},
'action': action in chosen action representation,
'language_instruction': str,
}
"""
def bridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# NOTE: this is not actually the official OXE copy of bridge, it is our own more up-to-date copy that you
# can find at https://rail.eecs.berkeley.edu/datasets/bridge_release/data/tfds/
trajectory["action"] = tf.concat(
[
trajectory["action"][:, :6], | binarize_gripper_actions(trajectory["action"][:, -1])[:, None], | 0 | 2023-12-13 09:58:56+00:00 | 2k |
mistralai/client-python | tests/test_chat.py | [
{
"identifier": "mock_chat_response_payload",
"path": "tests/utils.py",
"snippet": "def mock_chat_response_payload():\n return orjson.dumps(\n {\n \"id\": \"chat-98c8c60e3fbf4fc49658eddaf447357c\",\n \"object\": \"chat.completion\",\n \"created\": 1703165682,\n \"choices\": [\n {\n \"finish_reason\": \"stop\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"What is the best French cheese?\",\n },\n \"index\": 0,\n }\n ],\n \"model\": \"mistral-small\",\n \"usage\": {\"prompt_tokens\": 90, \"total_tokens\": 90, \"completion_tokens\": 0},\n }\n ).decode()"
},
{
"identifier": "mock_chat_response_streaming_payload",
"path": "tests/utils.py",
"snippet": "def mock_chat_response_streaming_payload():\n return [\n \"data: \"\n + orjson.dumps(\n {\n \"id\": \"cmpl-8cd9019d21ba490aa6b9740f5d0a883e\",\n \"model\": \"mistral-small\",\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\"role\": \"assistant\"},\n \"finish_reason\": None,\n }\n ],\n }\n ).decode()\n + \"\\n\\n\",\n *[\n \"data: \"\n + orjson.dumps(\n {\n \"id\": \"cmpl-8cd9019d21ba490aa6b9740f5d0a883e\",\n \"object\": \"chat.completion.chunk\",\n \"created\": 1703168544,\n \"model\": \"mistral-small\",\n \"choices\": [\n {\n \"index\": i,\n \"delta\": {\"content\": f\"stream response {i}\"},\n \"finish_reason\": None,\n }\n ],\n }\n ).decode()\n + \"\\n\\n\"\n for i in range(10)\n ],\n \"data: [DONE]\\n\\n\",\n ]"
},
{
"identifier": "mock_response",
"path": "tests/utils.py",
"snippet": "def mock_response(\n status_code: int, content: str, is_json: bool = True\n) -> mock.MagicMock:\n response = mock.Mock(Response)\n response.status_code = status_code\n if is_json:\n response.json = mock.MagicMock()\n response.json.return_value = orjson.loads(content)\n response.text = content\n return response"
},
{
"identifier": "mock_stream_response",
"path": "tests/utils.py",
"snippet": "@contextlib.contextmanager\ndef mock_stream_response(status_code: int, content: List[str]):\n response = mock.Mock(Response)\n response.status_code = status_code\n response.iter_lines.return_value = iter(content)\n yield response"
}
] | import unittest.mock as mock
import pytest
from mistralai.client import MistralClient
from mistralai.models.chat_completion import (
ChatCompletionResponse,
ChatCompletionStreamResponse,
ChatMessage,
)
from .utils import (
mock_chat_response_payload,
mock_chat_response_streaming_payload,
mock_response,
mock_stream_response,
) | 1,007 |
@pytest.fixture()
def client():
client = MistralClient()
client._client = mock.MagicMock()
return client
class TestChat:
def test_chat(self, client):
client._client.request.return_value = mock_response(
200,
mock_chat_response_payload(),
)
result = client.chat(
model="mistral-small",
messages=[
ChatMessage(role="user", content="What is the best French cheese?")
],
)
client._client.request.assert_called_once_with(
"post",
"https://api.mistral.ai/v1/chat/completions",
headers={
"User-Agent": f"mistral-client-python/{client._version}",
"Accept": "application/json",
"Authorization": "Bearer None",
"Content-Type": "application/json",
},
json={
"model": "mistral-small",
"messages": [
{"role": "user", "content": "What is the best French cheese?"}
],
"safe_prompt": False,
"stream": False,
},
)
assert isinstance(
result, ChatCompletionResponse
), "Should return an ChatCompletionResponse"
assert len(result.choices) == 1
assert result.choices[0].index == 0
assert result.object == "chat.completion"
def test_chat_streaming(self, client):
|
@pytest.fixture()
def client():
client = MistralClient()
client._client = mock.MagicMock()
return client
class TestChat:
def test_chat(self, client):
client._client.request.return_value = mock_response(
200,
mock_chat_response_payload(),
)
result = client.chat(
model="mistral-small",
messages=[
ChatMessage(role="user", content="What is the best French cheese?")
],
)
client._client.request.assert_called_once_with(
"post",
"https://api.mistral.ai/v1/chat/completions",
headers={
"User-Agent": f"mistral-client-python/{client._version}",
"Accept": "application/json",
"Authorization": "Bearer None",
"Content-Type": "application/json",
},
json={
"model": "mistral-small",
"messages": [
{"role": "user", "content": "What is the best French cheese?"}
],
"safe_prompt": False,
"stream": False,
},
)
assert isinstance(
result, ChatCompletionResponse
), "Should return an ChatCompletionResponse"
assert len(result.choices) == 1
assert result.choices[0].index == 0
assert result.object == "chat.completion"
def test_chat_streaming(self, client): | client._client.stream.return_value = mock_stream_response( | 3 | 2023-12-07 10:09:51+00:00 | 2k |
kijai/ComfyUI-Marigold | marigold/model/marigold_pipeline.py | [
{
"identifier": "RGBEncoder",
"path": "marigold/model/rgb_encoder.py",
"snippet": "class RGBEncoder(nn.Module):\n \"\"\"\n The encoder of pretrained Stable Diffusion VAE\n \"\"\"\n \n def __init__(self, pretrained_path, subfolder=None) -> None:\n super().__init__()\n \n vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)\n logging.info(f\"pretrained AutoencoderKL loaded from: {pretrained_path}\")\n \n self.rgb_encoder = nn.Sequential(\n vae.encoder,\n vae.quant_conv,\n )\n \n def to(self, *args, **kwargs):\n self.rgb_encoder.to(*args, **kwargs) \n \n def forward(self, rgb_in):\n return self.encode(rgb_in)\n \n def encode(self, rgb_in):\n moments = self.rgb_encoder(rgb_in) # [B, 8, H/8, W/8]\n mean, logvar = torch.chunk(moments, 2, dim=1)\n rgb_latent = mean\n return rgb_latent"
},
{
"identifier": "StackedDepthAE",
"path": "marigold/model/stacked_depth_AE.py",
"snippet": "class StackedDepthAE(nn.Module):\n \"\"\"\n Tailored pretrained image VAE for depth map.\n Encode: Depth images are repeated into 3 channels.\n Decode: The average of 3 chennels are taken as output.\n \"\"\"\n\n def __init__(self, pretrained_path, subfolder=None) -> None:\n super().__init__()\n\n self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)\n logging.info(f\"pretrained AutoencoderKL loaded from: {pretrained_path}\")\n\n def forward(self, depth_in):\n depth_latent = self.encode(depth_in)\n depth_out = self.decode(depth_latent)\n return depth_out\n\n def to(self, *args, **kwargs):\n self.vae.to(*args, **kwargs)\n\n @staticmethod\n def _stack_depth_images(depth_in):\n if 4 == len(depth_in.shape):\n stacked = depth_in.repeat(1, 3, 1, 1)\n elif 3 == len(depth_in.shape):\n stacked = depth_in.unsqueeze(1)\n stacked = depth_in.repeat(1, 3, 1, 1)\n return stacked\n\n def encode(self, depth_in):\n stacked = self._stack_depth_images(depth_in)\n h = self.vae.encoder(stacked)\n moments = self.vae.quant_conv(h)\n mean, logvar = torch.chunk(moments, 2, dim=1)\n depth_latent = mean\n return depth_latent\n\n def decode(self, depth_latent):\n z = self.vae.post_quant_conv(depth_latent)\n stacked = self.vae.decoder(z)\n depth_mean = stacked.mean(dim=1, keepdim=True)\n return depth_mean"
}
] | import logging
import numpy as np
import torch
from typing import Dict
from diffusers import (
DDIMScheduler,
DDPMScheduler,
PNDMScheduler,
DEISMultistepScheduler,
SchedulerMixin,
UNet2DConditionModel,
)
from torch import nn
from torch.nn import Conv2d
from torch.nn.parameter import Parameter
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from .rgb_encoder import RGBEncoder
from .stacked_depth_AE import StackedDepthAE | 1,225 | # Author: Bingxin Ke
# Last modified: 2023-12-11
class MarigoldPipeline(nn.Module):
"""
Marigold monocular depth estimator.
"""
def __init__(
self,
unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
rgb_encoder_pretrained_path: Dict,
depht_ae_pretrained_path: Dict,
noise_scheduler_pretrained_path: Dict,
tokenizer_pretrained_path: Dict,
text_encoder_pretrained_path: Dict,
empty_text_embed=None,
trainable_unet=False,
rgb_latent_scale_factor=0.18215,
depth_latent_scale_factor=0.18215,
noise_scheduler_type=None,
enable_gradient_checkpointing=False,
enable_xformers=True,
) -> None:
super().__init__()
self.rgb_latent_scale_factor = rgb_latent_scale_factor
self.depth_latent_scale_factor = depth_latent_scale_factor
self.device = "cpu"
# ******* Initialize modules *******
# Trainable modules
self.trainable_module_dic: Dict[str, nn.Module] = {}
self.trainable_unet = trainable_unet
# Denoising UNet
self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
)
logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
if 8 != self.unet.config["in_channels"]:
self._replace_unet_conv_in()
logging.warning("Unet conv_in layer is replaced")
if enable_xformers:
self.unet.enable_xformers_memory_efficient_attention()
else:
self.unet.disable_xformers_memory_efficient_attention()
# Image encoder
| # Author: Bingxin Ke
# Last modified: 2023-12-11
class MarigoldPipeline(nn.Module):
"""
Marigold monocular depth estimator.
"""
def __init__(
self,
unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
rgb_encoder_pretrained_path: Dict,
depht_ae_pretrained_path: Dict,
noise_scheduler_pretrained_path: Dict,
tokenizer_pretrained_path: Dict,
text_encoder_pretrained_path: Dict,
empty_text_embed=None,
trainable_unet=False,
rgb_latent_scale_factor=0.18215,
depth_latent_scale_factor=0.18215,
noise_scheduler_type=None,
enable_gradient_checkpointing=False,
enable_xformers=True,
) -> None:
super().__init__()
self.rgb_latent_scale_factor = rgb_latent_scale_factor
self.depth_latent_scale_factor = depth_latent_scale_factor
self.device = "cpu"
# ******* Initialize modules *******
# Trainable modules
self.trainable_module_dic: Dict[str, nn.Module] = {}
self.trainable_unet = trainable_unet
# Denoising UNet
self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
)
logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
if 8 != self.unet.config["in_channels"]:
self._replace_unet_conv_in()
logging.warning("Unet conv_in layer is replaced")
if enable_xformers:
self.unet.enable_xformers_memory_efficient_attention()
else:
self.unet.disable_xformers_memory_efficient_attention()
# Image encoder | self.rgb_encoder = RGBEncoder( | 0 | 2023-12-12 12:25:52+00:00 | 2k |
modelscope/richdreamer | extern/ldm_zero123/thirdp/psp/model_irse.py | [
{
"identifier": "Flatten",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "class Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)"
},
{
"identifier": "bottleneck_IR",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth),\n )\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut"
},
{
"identifier": "bottleneck_IR_SE",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR_SE(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR_SE, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth),\n )\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n SEModule(depth, 16),\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut"
},
{
"identifier": "get_blocks",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "def get_blocks(num_layers):\n if num_layers == 50:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=4),\n get_block(in_channel=128, depth=256, num_units=14),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n elif num_layers == 100:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=13),\n get_block(in_channel=128, depth=256, num_units=30),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n elif num_layers == 152:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=8),\n get_block(in_channel=128, depth=256, num_units=36),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n else:\n raise ValueError(\n \"Invalid number of layers: {}. Must be one of [50, 100, 152]\".format(\n num_layers\n )\n )\n return blocks"
},
{
"identifier": "l2_norm",
"path": "extern/ldm_zero123/thirdp/psp/helpers.py",
"snippet": "def l2_norm(input, axis=1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n return output"
}
] | from torch.nn import (BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear,
Module, PReLU, Sequential,)
from extern.ldm_zero123.thirdp.psp.helpers import (Flatten, bottleneck_IR,
bottleneck_IR_SE,
get_blocks, l2_norm,) | 1,210 | # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
unit_module = bottleneck_IR
elif mode == "ir_se":
| # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
unit_module = bottleneck_IR
elif mode == "ir_se": | unit_module = bottleneck_IR_SE | 2 | 2023-12-06 07:53:11+00:00 | 2k |
rehg-lab/RAVE | annotator/mmpkg/mmcv/runner/base_module.py | [
{
"identifier": "master_only",
"path": "annotator/mmpkg/mmcv/runner/dist_utils.py",
"snippet": "def master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper"
},
{
"identifier": "get_logger",
"path": "annotator/mmpkg/mmcv/utils/logging.py",
"snippet": "def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):\ndef print_log(msg, logger=None, level=logging.INFO):"
}
] | import copy
import warnings
import torch.nn as nn
from abc import ABCMeta
from collections import defaultdict
from logging import FileHandler
from annotator.mmpkg.mmcv.runner.dist_utils import master_only
from annotator.mmpkg.mmcv.utils.logging import get_logger, logger_initialized, print_log
from ..cnn import initialize
from ..cnn.utils.weight_init import update_init_info | 999 | # Copyright (c) OpenMMLab. All rights reserved.
class BaseModule(nn.Module, metaclass=ABCMeta):
"""Base module for all modules in openmmlab.
``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
initialization and recording initialization
information.
- ``_params_init_info``: Used to track the parameter
initialization information. This attribute only
exists during executing the ``init_weights``.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, init_cfg=None):
"""Initialize BaseModule, inherited from `torch.nn.Module`"""
# NOTE init_cfg can be defined in different levels, but init_cfg
# in low levels has a higher priority.
super(BaseModule, self).__init__()
# define default value of init_cfg instead of hard code
# in init_weights() function
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
# Backward compatibility in derived classes
# if pretrained is not None:
# warnings.warn('DeprecationWarning: pretrained is a deprecated \
# key, please consider using init_cfg')
# self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
@property
def is_init(self):
return self._is_init
def init_weights(self):
"""Initialize the weights."""
is_top_level_module = False
# check if it is top-level module
if not hasattr(self, '_params_init_info'):
# The `_params_init_info` is used to record the initialization
# information of the parameters
# the key should be the obj:`nn.Parameter` of model and the value
# should be a dict containing
# - init_info (str): The string that describes the initialization.
# - tmp_mean_value (FloatTensor): The mean of the parameter,
# which indicates whether the parameter has been modified.
# this attribute would be deleted after all parameters
# is initialized.
self._params_init_info = defaultdict(dict)
is_top_level_module = True
# Initialize the `_params_init_info`,
# When detecting the `tmp_mean_value` of
# the corresponding parameter is changed, update related
# initialization information
for name, param in self.named_parameters():
self._params_init_info[param][
'init_info'] = f'The value is the same before and ' \
f'after calling `init_weights` ' \
f'of {self.__class__.__name__} '
self._params_init_info[param][
'tmp_mean_value'] = param.data.mean()
# pass `params_init_info` to all submodules
# All submodules share the same `params_init_info`,
# so it will be updated when parameters are
# modified at any level of the model.
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
# Get the initialized logger, if not exist,
# create a logger named `mmcv`
logger_names = list(logger_initialized.keys())
logger_name = logger_names[0] if logger_names else 'mmcv'
module_name = self.__class__.__name__
if not self._is_init:
if self.init_cfg:
| # Copyright (c) OpenMMLab. All rights reserved.
class BaseModule(nn.Module, metaclass=ABCMeta):
"""Base module for all modules in openmmlab.
``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
initialization and recording initialization
information.
- ``_params_init_info``: Used to track the parameter
initialization information. This attribute only
exists during executing the ``init_weights``.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, init_cfg=None):
"""Initialize BaseModule, inherited from `torch.nn.Module`"""
# NOTE init_cfg can be defined in different levels, but init_cfg
# in low levels has a higher priority.
super(BaseModule, self).__init__()
# define default value of init_cfg instead of hard code
# in init_weights() function
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
# Backward compatibility in derived classes
# if pretrained is not None:
# warnings.warn('DeprecationWarning: pretrained is a deprecated \
# key, please consider using init_cfg')
# self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
@property
def is_init(self):
return self._is_init
def init_weights(self):
"""Initialize the weights."""
is_top_level_module = False
# check if it is top-level module
if not hasattr(self, '_params_init_info'):
# The `_params_init_info` is used to record the initialization
# information of the parameters
# the key should be the obj:`nn.Parameter` of model and the value
# should be a dict containing
# - init_info (str): The string that describes the initialization.
# - tmp_mean_value (FloatTensor): The mean of the parameter,
# which indicates whether the parameter has been modified.
# this attribute would be deleted after all parameters
# is initialized.
self._params_init_info = defaultdict(dict)
is_top_level_module = True
# Initialize the `_params_init_info`,
# When detecting the `tmp_mean_value` of
# the corresponding parameter is changed, update related
# initialization information
for name, param in self.named_parameters():
self._params_init_info[param][
'init_info'] = f'The value is the same before and ' \
f'after calling `init_weights` ' \
f'of {self.__class__.__name__} '
self._params_init_info[param][
'tmp_mean_value'] = param.data.mean()
# pass `params_init_info` to all submodules
# All submodules share the same `params_init_info`,
# so it will be updated when parameters are
# modified at any level of the model.
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
# Get the initialized logger, if not exist,
# create a logger named `mmcv`
logger_names = list(logger_initialized.keys())
logger_name = logger_names[0] if logger_names else 'mmcv'
module_name = self.__class__.__name__
if not self._is_init:
if self.init_cfg: | print_log( | 1 | 2023-12-05 02:51:53+00:00 | 2k |
worldcoin/open-iris | tests/e2e_tests/pipelines/test_e2e_iris_pipeline.py | [
{
"identifier": "compare_debug_pipeline_outputs",
"path": "tests/e2e_tests/utils.py",
"snippet": "def compare_debug_pipeline_outputs(pipeline_output_1: Dict[str, Any], pipeline_output_2: Dict[str, Any]):\n \"\"\"Compare two IRISPipeline outputs for debugging.\n\n Args:\n pipeline_output_1 (Dict[str, Any]): pipeline output 1.\n pipeline_output_2 (Dict[str, Any]): pipeline output 2.\n \"\"\"\n compare_iris_pipeline_template_output(pipeline_output_1[\"iris_template\"], pipeline_output_2[\"iris_template\"])\n compare_iris_pipeline_metadata_output(pipeline_output_1[\"metadata\"], pipeline_output_2[\"metadata\"])\n\n # Debug-specific intermediary outputs\n to_test = {\n \"normalized_iris\": [\"normalized_image\", \"normalized_mask\"],\n \"iris_response\": [\"iris_responses\", \"mask_responses\"],\n \"extrapolated_polygons\": [\"pupil\", \"iris\", \"eyeball\"],\n }\n for key, values in to_test.items():\n for value in values:\n np.testing.assert_almost_equal(\n pipeline_output_1[key][value],\n pipeline_output_2[key][value],\n decimal=4,\n )\n np.testing.assert_almost_equal(\n pipeline_output_1[\"segmentation_map\"][\"predictions\"],\n pipeline_output_2[\"segmentation_map\"][\"predictions\"],\n decimal=4,\n )"
},
{
"identifier": "compare_iris_pipeline_outputs",
"path": "tests/e2e_tests/utils.py",
"snippet": "def compare_iris_pipeline_outputs(pipeline_output_1: Dict[str, Any], pipeline_output_2: Dict[str, Any]):\n \"\"\"Compare two IRISPipeline outputs for the Orb.\n\n Args:\n pipeline_output_1 (Dict[str, Any]): pipeline output 1.\n pipeline_output_2 (Dict[str, Any]): pipeline output 2.\n \"\"\"\n compare_iris_pipeline_template_output(pipeline_output_1[\"iris_template\"], pipeline_output_2[\"iris_template\"])\n compare_iris_pipeline_metadata_output(pipeline_output_1[\"metadata\"], pipeline_output_2[\"metadata\"])\n compare_iris_pipeline_error_output(pipeline_output_1[\"error\"], pipeline_output_2[\"error\"])"
}
] | import os
import pickle
import cv2
import numpy as np
import pytest
from typing import Any, Dict
from iris.pipelines.iris_pipeline import IRISPipeline
from tests.e2e_tests.utils import compare_debug_pipeline_outputs, compare_iris_pipeline_outputs | 906 |
@pytest.fixture
def ir_image() -> np.ndarray:
ir_image_path = os.path.join(os.path.dirname(__file__), "mocks", "inputs", "anonymized.png")
img_data = cv2.imread(ir_image_path, cv2.IMREAD_GRAYSCALE)
return img_data
@pytest.fixture
def expected_iris_pipeline_output() -> Dict[str, Any]:
expected_iris_code_path = os.path.join(
os.path.dirname(__file__), "mocks", "outputs", "expected_iris_orb_pipeline_output.pickle"
)
return pickle.load(open(expected_iris_code_path, "rb"))
@pytest.fixture
def expected_debug_pipeline_output() -> Dict[str, Any]:
expected_iris_code_path = os.path.join(
os.path.dirname(__file__), "mocks", "outputs", "expected_iris_debug_pipeline_output.pickle"
)
return pickle.load(open(expected_iris_code_path, "rb"))
def test_e2e_iris_pipeline(ir_image: np.ndarray, expected_iris_pipeline_output: Dict[str, Any]) -> None:
"""End-to-end test of the IRISPipeline in the Orb setup"""
iris_pipeline = IRISPipeline()
computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
compare_iris_pipeline_outputs(computed_pipeline_output, expected_iris_pipeline_output)
def test_e2e_debug_pipeline(ir_image: np.ndarray, expected_debug_pipeline_output: Dict[str, Any]) -> None:
"""End-to-end test of the IRISPipeline in the debug setup"""
iris_pipeline = IRISPipeline(env=IRISPipeline.DEBUGGING_ENVIRONMENT)
computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
|
@pytest.fixture
def ir_image() -> np.ndarray:
ir_image_path = os.path.join(os.path.dirname(__file__), "mocks", "inputs", "anonymized.png")
img_data = cv2.imread(ir_image_path, cv2.IMREAD_GRAYSCALE)
return img_data
@pytest.fixture
def expected_iris_pipeline_output() -> Dict[str, Any]:
expected_iris_code_path = os.path.join(
os.path.dirname(__file__), "mocks", "outputs", "expected_iris_orb_pipeline_output.pickle"
)
return pickle.load(open(expected_iris_code_path, "rb"))
@pytest.fixture
def expected_debug_pipeline_output() -> Dict[str, Any]:
expected_iris_code_path = os.path.join(
os.path.dirname(__file__), "mocks", "outputs", "expected_iris_debug_pipeline_output.pickle"
)
return pickle.load(open(expected_iris_code_path, "rb"))
def test_e2e_iris_pipeline(ir_image: np.ndarray, expected_iris_pipeline_output: Dict[str, Any]) -> None:
"""End-to-end test of the IRISPipeline in the Orb setup"""
iris_pipeline = IRISPipeline()
computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
compare_iris_pipeline_outputs(computed_pipeline_output, expected_iris_pipeline_output)
def test_e2e_debug_pipeline(ir_image: np.ndarray, expected_debug_pipeline_output: Dict[str, Any]) -> None:
"""End-to-end test of the IRISPipeline in the debug setup"""
iris_pipeline = IRISPipeline(env=IRISPipeline.DEBUGGING_ENVIRONMENT)
computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
| compare_debug_pipeline_outputs(computed_pipeline_output, expected_debug_pipeline_output) | 0 | 2023-12-09 22:43:09+00:00 | 2k |
laixintao/mactop | mactop/panels/cpu_percpu_usage.py | [
{
"identifier": "LabeledColorBar",
"path": "mactop/widgets/labeled_colorbar.py",
"snippet": "class LabeledColorBar(Static):\n percentages = reactive(None)\n\n DEFAULT_CSS = \"\"\"\n LabeledColorBar {\n layout: horizontal;\n }\n LabeledColorBar > ColorBar {\n width: 1fr;\n }\n \"\"\"\n\n def __init__(\n self,\n prefix_label,\n color_choices,\n update_interval,\n percentages_update_fn: Callable[[], List[float]],\n value_render_fn: Callable[[List[float]], str],\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n\n self.percentages_update_fn = percentages_update_fn\n self.color_choices = color_choices\n self.update_interval = update_interval\n self.prefix_label = prefix_label\n self.value_render_fn = value_render_fn\n\n def on_mount(self) -> None:\n self.set_interval(self.update_interval, self.update_percentages)\n\n def update_percentages(self) -> None:\n result = self.percentages_update_fn()\n if result is not None:\n self.percentages = copy.copy(result)\n\n def watch_percentages(self, percentages) -> None:\n if not percentages:\n return\n\n try:\n number_widget = self.query_one(\".colorbar-value\")\n except textual.css.query.NoMatches:\n logger.warning(\n \"Can not found DOM element in .colorbar-value in LabeledColorBar\"\n )\n return\n number_str = self.value_render_fn(percentages)\n number_widget.styles.width = len(number_str)\n number_widget.update(number_str)\n\n colorbar = self.query_one(\"ColorBar\")\n colorbar.percentages = percentages\n\n def compose(self) -> ComposeResult:\n yield Label(f\"{self.prefix_label}\", classes=\"colorbar-label\")\n yield ColorBar(self.color_choices)\n yield Static(\" \", classes=\"colorbar-value\")"
},
{
"identifier": "metrics",
"path": "mactop/metrics_store.py",
"snippet": "class ProcessorType(enum.Enum):\nclass Smc:\nclass PowerMetricsBattery:\nclass Netowrk:\nclass CPU:\nclass M1GPU:\nclass CPUCore:\nclass ProcessorPackage:\nclass M1CPUCluster:\nclass M1ProcessorPackage:\nclass ProcessorIntel:\nclass Disk:\nclass PowerMetrics:\nclass AdapterDetails:\nclass AppleSmartBattery:\nclass IORegMetrics:\nclass CPUTimesPercent:\nclass SwapMemory:\nclass VirtualMemory:\nclass LoadAvg:\nclass PsutilMetrics:\nclass Metrics:\n INTEL = \"intel\"\n M1 = \"M1\"\n def get_core(self, core_index):\n def get_psutilmetrics(self):\n def set_psutilmetrics(self, p: PsutilMetrics):\n def get_powermetrics(self):\n def set_powermetrics(self, metrics):\n def get_ioregmetrics(self):\n def set_ioregmetrics(self, metrics):"
},
{
"identifier": "render_cpu_percentage_100",
"path": "mactop/utils/formatting.py",
"snippet": "def render_cpu_percentage_100(percentages):\n busy = 100 - percentages[-1]\n return f\"{busy:2.0f}%\""
},
{
"identifier": "BaseStatic",
"path": "mactop/panels/_base.py",
"snippet": "class BaseStatic(Static):\n def __init__(self, refresh_interval, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.refresh_interval = float(refresh_interval)"
},
{
"identifier": "const",
"path": "mactop/const.py",
"snippet": "COLOR_USER=\"green\"\nCOLOR_NICE=\"blue\"\nCOLOR_SYSTEM=\"#006400\"\nCOLOR_IDLE=\"#2F4F4F\"\nCOLOR_C_STATE=\"#008000\"\nCOLOR_P_STATE=\"#FF8C00\""
}
] | import logging
from functools import partial
from textual.app import ComposeResult
from mactop.widgets import LabeledColorBar
from mactop.metrics_store import metrics
from mactop.utils.formatting import render_cpu_percentage_100
from ._base import BaseStatic
from mactop import const | 1,272 |
logger = logging.getLogger(__name__)
def get_percpu_percent(index):
cpus = metrics.psutilmetrics.cpu_percent_percpu
if not cpus:
return [0, 0, 0, 0]
cpu_percent = cpus[index]
return [
cpu_percent.user,
cpu_percent.nice,
cpu_percent.system,
cpu_percent.idle,
]
class CPUUsageBarPanel(BaseStatic):
BORDER_TITLE = "CPU"
DEFAULT_CSS = """
CPUUsageBarPanel {
layout: grid;
grid-gutter: 0 1;
}
"""
def __init__(
self,
color_user=const.COLOR_USER,
color_nice=const.COLOR_NICE,
color_system=const.COLOR_SYSTEM,
color_idle=const.COLOR_IDLE,
columns=4,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_user = color_user
self.color_nice = color_nice
self.color_system = color_system
self.color_idle = color_idle
self.columns = int(columns)
def compose(self) -> ComposeResult:
self.styles.grid_size_columns = self.columns
cpu_count = metrics.psutilmetrics.cpu_count
for index in range(cpu_count):
yield LabeledColorBar(
prefix_label=f"[#FFFFE0]{index:>2}[/#FFFFE0]",
color_choices=[
self.color_user,
self.color_nice,
self.color_system,
self.color_idle,
],
percentages_update_fn=partial(get_percpu_percent, index=index),
|
logger = logging.getLogger(__name__)
def get_percpu_percent(index):
cpus = metrics.psutilmetrics.cpu_percent_percpu
if not cpus:
return [0, 0, 0, 0]
cpu_percent = cpus[index]
return [
cpu_percent.user,
cpu_percent.nice,
cpu_percent.system,
cpu_percent.idle,
]
class CPUUsageBarPanel(BaseStatic):
BORDER_TITLE = "CPU"
DEFAULT_CSS = """
CPUUsageBarPanel {
layout: grid;
grid-gutter: 0 1;
}
"""
def __init__(
self,
color_user=const.COLOR_USER,
color_nice=const.COLOR_NICE,
color_system=const.COLOR_SYSTEM,
color_idle=const.COLOR_IDLE,
columns=4,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_user = color_user
self.color_nice = color_nice
self.color_system = color_system
self.color_idle = color_idle
self.columns = int(columns)
def compose(self) -> ComposeResult:
self.styles.grid_size_columns = self.columns
cpu_count = metrics.psutilmetrics.cpu_count
for index in range(cpu_count):
yield LabeledColorBar(
prefix_label=f"[#FFFFE0]{index:>2}[/#FFFFE0]",
color_choices=[
self.color_user,
self.color_nice,
self.color_system,
self.color_idle,
],
percentages_update_fn=partial(get_percpu_percent, index=index), | value_render_fn=render_cpu_percentage_100, | 2 | 2023-12-05 09:12:42+00:00 | 2k |
geopavlakos/hamer | hamer/datasets/vitdet_dataset.py | [
{
"identifier": "convert_cvimg_to_tensor",
"path": "hamer/datasets/utils.py",
"snippet": "def convert_cvimg_to_tensor(cvimg: np.array):\n \"\"\"\n Convert image from HWC to CHW format.\n Args:\n cvimg (np.array): Image of shape (H, W, 3) as loaded by OpenCV.\n Returns:\n np.array: Output image of shape (3, H, W).\n \"\"\"\n # from h,w,c(OpenCV) to c,h,w\n img = cvimg.copy()\n img = np.transpose(img, (2, 0, 1))\n # from int to float\n img = img.astype(np.float32)\n return img"
},
{
"identifier": "expand_to_aspect_ratio",
"path": "hamer/datasets/utils.py",
"snippet": "def expand_to_aspect_ratio(input_shape, target_aspect_ratio=None):\n \"\"\"Increase the size of the bounding box to match the target shape.\"\"\"\n if target_aspect_ratio is None:\n return input_shape\n\n try:\n w , h = input_shape\n except (ValueError, TypeError):\n return input_shape\n\n w_t, h_t = target_aspect_ratio\n if h / w < h_t / w_t:\n h_new = max(w * h_t / w_t, h)\n w_new = w\n else:\n h_new = h\n w_new = max(h * w_t / h_t, w)\n if h_new < h or w_new < w:\n breakpoint()\n return np.array([w_new, h_new])"
},
{
"identifier": "generate_image_patch_cv2",
"path": "hamer/datasets/utils.py",
"snippet": "def generate_image_patch_cv2(img: np.array, c_x: float, c_y: float,\n bb_width: float, bb_height: float,\n patch_width: float, patch_height: float,\n do_flip: bool, scale: float, rot: float,\n border_mode=cv2.BORDER_CONSTANT, border_value=0) -> Tuple[np.array, np.array]:\n \"\"\"\n Crop the input image and return the crop and the corresponding transformation matrix.\n Args:\n img (np.array): Input image of shape (H, W, 3)\n c_x (float): Bounding box center x coordinate in the original image.\n c_y (float): Bounding box center y coordinate in the original image.\n bb_width (float): Bounding box width.\n bb_height (float): Bounding box height.\n patch_width (float): Output box width.\n patch_height (float): Output box height.\n do_flip (bool): Whether to flip image or not.\n scale (float): Rescaling factor for the bounding box (augmentation).\n rot (float): Random rotation applied to the box.\n Returns:\n img_patch (np.array): Cropped image patch of shape (patch_height, patch_height, 3)\n trans (np.array): Transformation matrix.\n \"\"\"\n\n img_height, img_width, img_channels = img.shape\n if do_flip:\n img = img[:, ::-1, :]\n c_x = img_width - c_x - 1\n\n\n trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot)\n\n img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)), \n flags=cv2.INTER_LINEAR, \n borderMode=border_mode,\n borderValue=border_value,\n )\n # Force borderValue=cv2.BORDER_CONSTANT for alpha channel\n if (img.shape[2] == 4) and (border_mode != cv2.BORDER_CONSTANT):\n img_patch[:,:,3] = cv2.warpAffine(img[:,:,3], trans, (int(patch_width), int(patch_height)), \n flags=cv2.INTER_LINEAR, \n borderMode=cv2.BORDER_CONSTANT,\n )\n\n return img_patch, trans"
}
] | from typing import Dict
from skimage.filters import gaussian
from yacs.config import CfgNode
from .utils import (convert_cvimg_to_tensor,
expand_to_aspect_ratio,
generate_image_patch_cv2)
import cv2
import numpy as np
import torch | 1,342 |
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406])
DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225])
class ViTDetDataset(torch.utils.data.Dataset):
def __init__(self,
cfg: CfgNode,
img_cv2: np.array,
boxes: np.array,
right: np.array,
rescale_factor=2.5,
train: bool = False,
**kwargs):
super().__init__()
self.cfg = cfg
self.img_cv2 = img_cv2
# self.boxes = boxes
assert train == False, "ViTDetDataset is only for inference"
self.train = train
self.img_size = cfg.MODEL.IMAGE_SIZE
self.mean = 255. * np.array(self.cfg.MODEL.IMAGE_MEAN)
self.std = 255. * np.array(self.cfg.MODEL.IMAGE_STD)
# Preprocess annotations
boxes = boxes.astype(np.float32)
self.center = (boxes[:, 2:4] + boxes[:, 0:2]) / 2.0
self.scale = rescale_factor * (boxes[:, 2:4] - boxes[:, 0:2]) / 200.0
self.personid = np.arange(len(boxes), dtype=np.int32)
self.right = right.astype(np.float32)
def __len__(self) -> int:
return len(self.personid)
def __getitem__(self, idx: int) -> Dict[str, np.array]:
center = self.center[idx].copy()
center_x = center[0]
center_y = center[1]
scale = self.scale[idx]
BBOX_SHAPE = self.cfg.MODEL.get('BBOX_SHAPE', None)
|
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406])
DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225])
class ViTDetDataset(torch.utils.data.Dataset):
def __init__(self,
cfg: CfgNode,
img_cv2: np.array,
boxes: np.array,
right: np.array,
rescale_factor=2.5,
train: bool = False,
**kwargs):
super().__init__()
self.cfg = cfg
self.img_cv2 = img_cv2
# self.boxes = boxes
assert train == False, "ViTDetDataset is only for inference"
self.train = train
self.img_size = cfg.MODEL.IMAGE_SIZE
self.mean = 255. * np.array(self.cfg.MODEL.IMAGE_MEAN)
self.std = 255. * np.array(self.cfg.MODEL.IMAGE_STD)
# Preprocess annotations
boxes = boxes.astype(np.float32)
self.center = (boxes[:, 2:4] + boxes[:, 0:2]) / 2.0
self.scale = rescale_factor * (boxes[:, 2:4] - boxes[:, 0:2]) / 200.0
self.personid = np.arange(len(boxes), dtype=np.int32)
self.right = right.astype(np.float32)
def __len__(self) -> int:
return len(self.personid)
def __getitem__(self, idx: int) -> Dict[str, np.array]:
center = self.center[idx].copy()
center_x = center[0]
center_y = center[1]
scale = self.scale[idx]
BBOX_SHAPE = self.cfg.MODEL.get('BBOX_SHAPE', None) | bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max() | 1 | 2023-12-08 09:07:07+00:00 | 2k |
rogeriochaves/driver | driver/annotator.py | [
{
"identifier": "detect_components",
"path": "driver/UIED/run_single.py",
"snippet": "def detect_components(\n input_path_img, ocr_result: AnnotatedImage, showOCR=False, showUIED=False\n) -> DetectElementsResponse:\n output_root = \"output\"\n\n # Resizes the image to be smaller because this process is heavy, and lower resolution\n # does not lose much quality when detecting components\n max_width_or_height = 982\n resized_height = resize_height_by_longest_edge(\n input_path_img, resize_length=max_width_or_height\n )\n # color_tips()\n\n is_clf = False\n\n import detect_text.text_detection as text\n\n os.makedirs(pjoin(output_root, \"ocr\"), exist_ok=True)\n text_json = text.text_detection(\n ocr_result, input_path_img, output_root, show=showOCR\n )\n\n import detect_compo.ip_region_proposal as ip\n\n os.makedirs(pjoin(output_root, \"ip\"), exist_ok=True)\n # switch of the classification func\n classifier = None\n if is_clf:\n classifier = {}\n from cnn.CNN import CNN\n\n # classifier['Image'] = CNN('Image')\n classifier[\"Elements\"] = CNN(\"Elements\")\n # classifier['Noise'] = CNN('Noise')\n compo_json = ip.compo_detection(\n input_path_img,\n output_root,\n key_params,\n classifier=classifier,\n resize_by_height=resized_height,\n show=False,\n )\n\n import detect_merge.merge as merge\n\n os.makedirs(pjoin(output_root, \"merge\"), exist_ok=True)\n name = input_path_img.split(\"/\")[-1][:-4]\n compo_path = pjoin(output_root, \"ip\", str(name) + \".json\")\n ocr_path = pjoin(output_root, \"ocr\", str(name) + \".json\")\n board, components = merge.merge(\n input_path_img,\n compo_json,\n text_json,\n pjoin(output_root, \"merge\"),\n is_remove_bar=key_params[\"remove-bar\"],\n is_paragraph=key_params[\"merge-line-to-paragraph\"],\n show=showUIED,\n )\n\n return components"
},
{
"identifier": "show_image",
"path": "driver/UIED/utils.py",
"snippet": "def show_image(window_name: str, image: cv2.typing.MatLike):\n cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\n if sys.platform == \"darwin\":\n os.system(\n \"\"\"/usr/bin/osascript -e 'tell app \"Finder\" to set frontmost of process \"python\" to true' \"\"\"\n )\n\n cv2.imshow(window_name, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n # Hack to fix closing bug on macos: https://stackoverflow.com/questions/6116564/destroywindow-does-not-close-window-on-mac-using-python-and-opencv\n for _ in range(1, 5):\n cv2.waitKey(1)"
},
{
"identifier": "ocr_text_detection",
"path": "driver/ocr_call.py",
"snippet": "def ocr_text_detection(input_image_path, config: DebugConfig) -> AnnotatedImage:\n ocr_provider = config[\"ocr_provider\"]\n if not ocr_provider:\n if os.environ.get(\"AZURE_VISION_API_KEY\"):\n ocr_provider = \"azure\"\n elif os.environ.get(\"GCLOUD_VISION_API_KEY\"):\n ocr_provider = \"google\"\n elif os.environ.get(\"BAIDU_OCR_API_KEY\"):\n ocr_provider = \"baidu\"\n\n if ocr_provider == \"azure\":\n print_action(\"Annotating screenshot with Azure Vision\")\n return azure_ocr_text_detect(input_image_path)\n elif ocr_provider == \"google\":\n print_action(\"Annotating screenshot with Google Cloud Vision\")\n return google_ocr_text_detect(input_image_path)\n elif ocr_provider == \"baidu\":\n print_action(\"Annotating screenshot with Baidu Vision\")\n return baidu_ocr_text_detect(input_image_path)\n else:\n raise Exception(\n \"No OCR API env variable set, please set either AZURE_VISION_API_KEY or GCLOUD_VISION_API_KEY\"\n )"
},
{
"identifier": "DebugConfig",
"path": "driver/types.py",
"snippet": "class Click(TypedDict):\nclass Type(TypedDict):\nclass Press(TypedDict):\nclass Refresh(TypedDict):\nclass LabelMapItem(TypedDict):\nclass ImgMultiplierFactor(TypedDict):\nclass DebugConfig(TypedDict):\nclass Context(TypedDict):\nclass Vertex:\nclass BoundingPoly:\nclass TextAnnotation:\nclass AnnotatedImage:"
},
{
"identifier": "is_retina_display",
"path": "driver/utils.py",
"snippet": "def is_retina_display():\n return is_retina"
}
] | import math
import os
import cv2
from PIL import Image, ImageDraw, ImageFont
from driver.UIED.run_single import detect_components
from driver.UIED.utils import show_image
from driver.ocr_call import ocr_text_detection
from driver.types import DebugConfig, ImgMultiplierFactor, LabelMap
from driver.utils import is_retina_display | 1,310 |
def annotate_image(input_image_path, debug: DebugConfig):
ocr_result = ocr_text_detection(input_image_path, debug)
components = detect_components(
input_image_path,
ocr_result,
showOCR=debug["ocr"],
showUIED=debug["uied"],
)
original_image = Image.open(input_image_path)
size = {"width": original_image.width, "height": original_image.height}
img_multiplier_factor: ImgMultiplierFactor = {
"height": components["img_shape"][0] / size["height"],
"width": components["img_shape"][1] / size["width"],
}
label_counter = 1
label_prefix = "A"
drawn_positions = []
|
def annotate_image(input_image_path, debug: DebugConfig):
ocr_result = ocr_text_detection(input_image_path, debug)
components = detect_components(
input_image_path,
ocr_result,
showOCR=debug["ocr"],
showUIED=debug["uied"],
)
original_image = Image.open(input_image_path)
size = {"width": original_image.width, "height": original_image.height}
img_multiplier_factor: ImgMultiplierFactor = {
"height": components["img_shape"][0] / size["height"],
"width": components["img_shape"][1] / size["width"],
}
label_counter = 1
label_prefix = "A"
drawn_positions = [] | label_map: LabelMap = {} | 3 | 2023-12-10 17:18:28+00:00 | 2k |
baidubce/app-builder | appbuilder/core/components/embeddings/base.py | [
{
"identifier": "Component",
"path": "appbuilder/core/component.py",
"snippet": "class Component:\n r\"\"\"Component基类, 其它实现的Component子类需要继承该基类,并至少实现run方法.\"\"\"\n\n def __init__(self,\n meta: Optional[ComponentArguments] = ComponentArguments(),\n secret_key: Optional[str] = None,\n gateway: str = \"\"\n ):\n r\"\"\"Component初始化方法.\n\n 参数:\n meta (obj: `ComponentArguments`, 可选) : component元信息.\n secret_key(str,可选): 用户鉴权token, 默认从环境变量中获取: os.getenv(\"APPBUILDER_TOKEN\", \"\").\n gateway(str, 可选): 后端网关服务地址,默认从环境变量中获取: os.getenv(\"GATEWAY_URL\", \"\")\n 返回:\n 无\n \"\"\"\n\n self.meta = meta\n self.http_client = HTTPClient(secret_key, gateway)\n\n def __call__(self, *inputs, **kwargs):\n r\"\"\"implement __call__ method\"\"\"\n return self.run(*inputs, **kwargs)\n\n def run(self, *inputs, **kwargs):\n r\"\"\"\n Defines the computation performed at every call.\n Should be overridden by all subclasses.\n\n Parameters:\n *inputs(tuple): unpacked tuple arguments\n **kwargs(dict): unpacked dict arguments\n \"\"\"\n raise NotImplementedError\n\n def batch(self, *args, **kwargs) -> List[Message]:\n r\"\"\"pass\"\"\"\n return None\n\n async def arun(self, *args, **kwargs) -> Optional[Message]:\n r\"\"\"pass\"\"\"\n return None\n\n async def abatch(self, *args, **kwargs) -> List[Message]:\n r\"\"\"pass\"\"\"\n return None\n\n def _trace(self, **data) -> None:\n r\"\"\"pass\"\"\"\n pass\n\n def _debug(self, **data) -> None:\n r\"\"\"pass\"\"\"\n pass"
},
{
"identifier": "Message",
"path": "appbuilder/core/message.py",
"snippet": "class Message(BaseModel, Generic[_T]):\n content: Optional[_T] = {}\n name: Optional[str] = \"msg\"\n mtype: Optional[str] = \"dict\"\n id: Optional[str] = str(uuid.uuid4())\n\n def __init__(self, content: Optional[_T] = None, **data):\n if content is not None:\n data['content'] = content\n super().__init__(**data)\n self.mtype = type(self.content).__name__\n\n def __str__(self):\n return f\"Message(name={self.name}, content={self.content}, mtype={self.mtype})\"\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(name={self.name!r}, content={self.content!r}, mtype={self.mtype!r})\""
},
{
"identifier": "ComponentArguments",
"path": "appbuilder/core/component.py",
"snippet": "class ComponentArguments(BaseModel):\n r\"\"\"\"ComponentArguments define Component meta fields\"\"\"\n name: str = \"\"\n tool_desc: Dict[str, Any] = {}\n\n def extract_values_to_dict(self):\n r\"\"\"extract ComponentArguments fields to dict\"\"\"\n\n inputs = {}\n for field_name, field in self.__fields__.items():\n value = getattr(self, field_name)\n # 获取 display_name 元数据\n variable_name = field.field_info.extra.get('variable_name')\n if variable_name:\n # 使用 Enum 成员的实际值\n if isinstance(value, Message):\n inputs[variable_name] = str(value.content)\n elif isinstance(value, Enum):\n inputs[variable_name] = str(value.value)\n else:\n inputs[variable_name] = str(value)\n else:\n inputs[field_name] = value\n return inputs"
}
] | from abc import abstractmethod
from typing import List, Union
from appbuilder.core.component import Component
from appbuilder.core.message import Message
from appbuilder.core.component import ComponentArguments | 1,120 | """
base
"""
# Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EmbeddingBaseComponent(Component):
"""
EmbeddingBaseComponent
"""
name: str
version: str
| """
base
"""
# Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EmbeddingBaseComponent(Component):
"""
EmbeddingBaseComponent
"""
name: str
version: str | meta: ComponentArguments | 2 | 2023-12-05 01:48:12+00:00 | 2k |
corfyi/UCMCTrack | run_mot20_test.py | [
{
"identifier": "run_ucmc",
"path": "util/run_ucmc.py",
"snippet": "def run_ucmc(args, det_path = \"det_results/mot17/yolox_x_ablation\",\n cam_path = \"cam_para/mot17\",\n gmc_path = \"gmc/mot17\",\n out_path = \"output/mot17\",\n exp_name = \"val\",\n dataset = \"MOT17\"):\n\n seq_name = args.seq\n\n eval_path = os.path.join(out_path,exp_name)\n orig_save_path = os.path.join(eval_path,seq_name)\n if not os.path.exists(orig_save_path):\n os.makedirs(orig_save_path)\n\n\n if dataset == \"MOT17\":\n det_file = os.path.join(det_path, f\"{seq_name}-SDP.txt\")\n cam_para = os.path.join(cam_path, f\"{seq_name}-SDP.txt\")\n result_file = os.path.join(orig_save_path,f\"{seq_name}-SDP.txt\")\n elif dataset == \"MOT20\":\n det_file = os.path.join(det_path, f\"{seq_name}.txt\")\n cam_para = os.path.join(cam_path, f\"{seq_name}.txt\")\n result_file = os.path.join(orig_save_path,f\"{seq_name}.txt\")\n\n gmc_file = os.path.join(gmc_path, f\"GMC-{seq_name}.txt\")\n\n print(det_file)\n print(cam_para)\n\n detector = Detector()\n detector.load(cam_para, det_file,gmc_file)\n print(f\"seq_length = {detector.seq_length}\")\n\n a1 = args.a\n a2 = args.a\n high_score = args.high_score\n conf_thresh = args.conf_thresh\n fps = args.fps\n cdt = args.cdt\n wx = args.wx\n wy = args.wy\n vmax = args.vmax\n \n tracker = UCMCTrack(a1, a2, wx,wy,vmax, cdt, fps, dataset, high_score,args.cmc,detector)\n\n t1 = time.time()\n\n tracklets = dict()\n\n with open(result_file,\"w\") as f:\n for frame_id in range(1, detector.seq_length + 1):\n dets = detector.get_dets(frame_id, conf_thresh)\n tracker.update(dets,frame_id)\n if args.hp:\n for i in tracker.tentative_idx:\n t = tracker.trackers[i]\n if(t.detidx < 0 or t.detidx >= len(dets)):\n continue\n if t.id not in tracklets:\n tracklets[t.id] = Tracklet(frame_id, dets[t.detidx].get_box())\n else:\n tracklets[t.id].add_box(frame_id, dets[t.detidx].get_box())\n for i in tracker.confirmed_idx:\n t = tracker.trackers[i]\n if(t.detidx < 0 or t.detidx >= len(dets)):\n continue\n if t.id not in tracklets:\n tracklets[t.id] = Tracklet(frame_id, dets[t.detidx].get_box())\n else:\n tracklets[t.id].add_box(frame_id, dets[t.detidx].get_box())\n tracklets[t.id].activate()\n else:\n for i in tracker.confirmed_idx:\n t = tracker.trackers[i] \n if(t.detidx < 0 or t.detidx >= len(dets)):\n continue\n d = dets[t.detidx]\n f.write(f\"{frame_id},{t.id},{d.bb_left:.1f},{d.bb_top:.1f},{d.bb_width:.1f},{d.bb_height:.1f},{d.conf:.2f},-1,-1,-1\\n\")\n\n if args.hp:\n for frame_id in range(1, detector.seq_length + 1):\n for id in tracklets:\n if tracklets[id].is_active:\n if frame_id in tracklets[id].boxes:\n box = tracklets[id].boxes[frame_id]\n f.write(f\"{frame_id},{id},{box[0]:.1f},{box[1]:.1f},{box[2]:.1f},{box[3]:.1f},-1,-1,-1,-1\\n\")\n\n interpolate(orig_save_path, eval_path, n_min=3, n_dti=cdt, is_enable = True)\n\n print(f\"Time cost: {time.time() - t1:.2f}s\")"
},
{
"identifier": "make_args",
"path": "util/run_ucmc.py",
"snippet": "def make_args():\n parser = argparse.ArgumentParser(description='Process some arguments.')\n parser.add_argument('--seq', type=str, default = \"MOT17-02\", help='seq name')\n parser.add_argument('--fps', type=float, default=30.0, help='fps')\n parser.add_argument('--wx', type=float, default=0.1, help='wx')\n parser.add_argument('--wy', type=float, default=0.1, help='wy')\n parser.add_argument('--vmax', type=float, default=0.5, help='vmax')\n parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')\n parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')\n parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')\n parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')\n parser.add_argument(\"--cmc\", action=\"store_true\", help=\"use cmc or not.\")\n parser.add_argument(\"--hp\", action=\"store_true\", help=\"use head padding or not.\")\n args = parser.parse_args()\n return args"
}
] | from util.run_ucmc import run_ucmc, make_args | 1,416 |
if __name__ == '__main__':
det_path = "det_results/mot20"
cam_path = "cam_para/mot20"
gmc_path = "gmc/mot20"
out_path = "output/mot20"
exp_name = "test"
dataset = "MOT20"
|
if __name__ == '__main__':
det_path = "det_results/mot20"
cam_path = "cam_para/mot20"
gmc_path = "gmc/mot20"
out_path = "output/mot20"
exp_name = "test"
dataset = "MOT20" | args = make_args() | 1 | 2023-12-12 07:29:20+00:00 | 2k |
ingra14m/Specular-Gaussians | metrics.py | [
{
"identifier": "ssim",
"path": "utils/loss_utils.py",
"snippet": "def ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)"
},
{
"identifier": "psnr",
"path": "utils/image_utils.py",
"snippet": "def psnr(img1, img2):\n mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)\n return 20 * torch.log10(1.0 / torch.sqrt(mse))"
}
] | from pathlib import Path
from PIL import Image
from utils.loss_utils import ssim
from tqdm import tqdm
from utils.image_utils import psnr
from argparse import ArgumentParser
import os
import torch
import torchvision.transforms.functional as tf
import lpips
import json | 721 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
# from lpipsPyTorch import lpips
def readImages(renders_dir, gt_dir):
renders = []
gts = []
image_names = []
for fname in os.listdir(renders_dir):
render = Image.open(renders_dir / fname)
gt = Image.open(gt_dir / fname)
renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda())
gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda())
image_names.append(fname)
return renders, gts, image_names
def evaluate(model_paths):
full_dict = {}
per_view_dict = {}
full_dict_polytopeonly = {}
per_view_dict_polytopeonly = {}
print("")
for scene_dir in model_paths:
try:
print("Scene:", scene_dir)
full_dict[scene_dir] = {}
per_view_dict[scene_dir] = {}
full_dict_polytopeonly[scene_dir] = {}
per_view_dict_polytopeonly[scene_dir] = {}
test_dir = Path(scene_dir) / "test"
for method in os.listdir(test_dir):
if not method.startswith("ours"):
continue
print("Method:", method)
full_dict[scene_dir][method] = {}
per_view_dict[scene_dir][method] = {}
full_dict_polytopeonly[scene_dir][method] = {}
per_view_dict_polytopeonly[scene_dir][method] = {}
method_dir = test_dir / method
gt_dir = method_dir / "gt"
renders_dir = method_dir / "renders"
renders, gts, image_names = readImages(renders_dir, gt_dir)
ssims = []
psnrs = []
lpipss = []
for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
# from lpipsPyTorch import lpips
def readImages(renders_dir, gt_dir):
renders = []
gts = []
image_names = []
for fname in os.listdir(renders_dir):
render = Image.open(renders_dir / fname)
gt = Image.open(gt_dir / fname)
renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda())
gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda())
image_names.append(fname)
return renders, gts, image_names
def evaluate(model_paths):
full_dict = {}
per_view_dict = {}
full_dict_polytopeonly = {}
per_view_dict_polytopeonly = {}
print("")
for scene_dir in model_paths:
try:
print("Scene:", scene_dir)
full_dict[scene_dir] = {}
per_view_dict[scene_dir] = {}
full_dict_polytopeonly[scene_dir] = {}
per_view_dict_polytopeonly[scene_dir] = {}
test_dir = Path(scene_dir) / "test"
for method in os.listdir(test_dir):
if not method.startswith("ours"):
continue
print("Method:", method)
full_dict[scene_dir][method] = {}
per_view_dict[scene_dir][method] = {}
full_dict_polytopeonly[scene_dir][method] = {}
per_view_dict_polytopeonly[scene_dir][method] = {}
method_dir = test_dir / method
gt_dir = method_dir / "gt"
renders_dir = method_dir / "renders"
renders, gts, image_names = readImages(renders_dir, gt_dir)
ssims = []
psnrs = []
lpipss = []
for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"): | ssims.append(ssim(renders[idx], gts[idx])) | 0 | 2023-12-12 14:59:01+00:00 | 2k |
u2seg/U2Seg | detectron2/evaluation/evaluator.py | [
{
"identifier": "get_world_size",
"path": "detectron2/utils/comm.py",
"snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()"
},
{
"identifier": "is_main_process",
"path": "detectron2/utils/comm.py",
"snippet": "def is_main_process() -> bool:\n return get_rank() == 0"
},
{
"identifier": "log_every_n_seconds",
"path": "detectron2/utils/logger.py",
"snippet": "def log_every_n_seconds(lvl, msg, n=1, *, name=None):\n \"\"\"\n Log no more than once per n seconds.\n\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n \"\"\"\n caller_module, key = _find_caller()\n last_logged = _LOG_TIMER.get(key, None)\n current_time = time.time()\n if last_logged is None or current_time - last_logged >= n:\n logging.getLogger(name or caller_module).log(lvl, msg)\n _LOG_TIMER[key] = current_time"
}
] | import datetime
import logging
import time
import torch
from collections import OrderedDict, abc
from contextlib import ExitStack, contextmanager
from typing import List, Union
from torch import nn
from detectron2.utils.comm import get_world_size, is_main_process
from detectron2.utils.logger import log_every_n_seconds | 1,151 | # Copyright (c) Facebook, Inc. and its affiliates.
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
def process(self, inputs, outputs):
"""
Process the pair of inputs and outputs.
If they contain batches, the pairs can be consumed one-by-one using `zip`:
.. code-block:: python
for input_, output in zip(inputs, outputs):
# do evaluation on single input/output pair
...
Args:
inputs (list): the inputs that's used to call the model.
outputs (list): the return value of `model(inputs)`
"""
pass
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
pass
class DatasetEvaluators(DatasetEvaluator):
"""
Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
This class dispatches every evaluation call to
all of its :class:`DatasetEvaluator`.
"""
def __init__(self, evaluators):
"""
Args:
evaluators (list): the evaluators to combine.
"""
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, inputs, outputs):
for evaluator in self._evaluators:
evaluator.process(inputs, outputs)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if is_main_process() and result is not None:
for k, v in result.items():
assert (
k not in results
), "Different evaluators produce results with the same key {}".format(k)
results[k] = v
return results
def inference_on_dataset(
model,
data_loader,
evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
callbacks=None,
):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.__call__` accurately.
The model will be used in eval mode.
Args:
model (callable): a callable which takes an object from
`data_loader` and returns some outputs.
If it's an nn.Module, it will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
but don't want to do any evaluation.
callbacks (dict of callables): a dictionary of callback functions which can be
called at each stage of inference.
Returns:
The return value of `evaluator.evaluate()`
"""
| # Copyright (c) Facebook, Inc. and its affiliates.
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
def process(self, inputs, outputs):
"""
Process the pair of inputs and outputs.
If they contain batches, the pairs can be consumed one-by-one using `zip`:
.. code-block:: python
for input_, output in zip(inputs, outputs):
# do evaluation on single input/output pair
...
Args:
inputs (list): the inputs that's used to call the model.
outputs (list): the return value of `model(inputs)`
"""
pass
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
pass
class DatasetEvaluators(DatasetEvaluator):
"""
Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
This class dispatches every evaluation call to
all of its :class:`DatasetEvaluator`.
"""
def __init__(self, evaluators):
"""
Args:
evaluators (list): the evaluators to combine.
"""
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, inputs, outputs):
for evaluator in self._evaluators:
evaluator.process(inputs, outputs)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if is_main_process() and result is not None:
for k, v in result.items():
assert (
k not in results
), "Different evaluators produce results with the same key {}".format(k)
results[k] = v
return results
def inference_on_dataset(
model,
data_loader,
evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
callbacks=None,
):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.__call__` accurately.
The model will be used in eval mode.
Args:
model (callable): a callable which takes an object from
`data_loader` and returns some outputs.
If it's an nn.Module, it will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
but don't want to do any evaluation.
callbacks (dict of callables): a dictionary of callback functions which can be
called at each stage of inference.
Returns:
The return value of `evaluator.evaluate()`
""" | num_devices = get_world_size() | 0 | 2023-12-05 01:13:31+00:00 | 2k |
upfusion3d/upfusion | control_net/cldm/ddim_hacked.py | [
{
"identifier": "make_ddim_sampling_parameters",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev"
},
{
"identifier": "make_ddim_timesteps",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out"
},
{
"identifier": "noise_like",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "extract_into_tensor",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
}
] | import torch
import numpy as np
from tqdm import tqdm
from control_net.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor | 844 | """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): | self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, | 1 | 2023-12-12 00:49:11+00:00 | 2k |
modelscope/normal-depth-diffusion | libs/ControlNet-v1-1-nightly/annotator/normalbae/models/baseline.py | [
{
"identifier": "UpSampleBN",
"path": "libs/ControlNet-v1-1-nightly/annotator/normalbae/models/submodules/submodules.py",
"snippet": "class UpSampleBN(nn.Module):\n\n def __init__(self, skip_input, output_features):\n super(UpSampleBN, self).__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(\n skip_input,\n output_features,\n kernel_size=3,\n stride=1,\n padding=1), nn.BatchNorm2d(output_features), nn.LeakyReLU(),\n nn.Conv2d(\n output_features,\n output_features,\n kernel_size=3,\n stride=1,\n padding=1), nn.BatchNorm2d(output_features), nn.LeakyReLU())\n\n def forward(self, x, concat_with):\n up_x = F.interpolate(\n x,\n size=[concat_with.size(2),\n concat_with.size(3)],\n mode='bilinear',\n align_corners=True)\n f = torch.cat([up_x, concat_with], dim=1)\n return self._net(f)"
},
{
"identifier": "norm_normalize",
"path": "libs/ControlNet-v1-1-nightly/annotator/normalbae/models/submodules/submodules.py",
"snippet": "def norm_normalize(norm_out):\n min_kappa = 0.01\n norm_x, norm_y, norm_z, kappa = torch.split(norm_out, 1, dim=1)\n norm = torch.sqrt(norm_x**2.0 + norm_y**2.0 + norm_z**2.0) + 1e-10\n kappa = F.elu(kappa) + 1.0 + min_kappa\n final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa],\n dim=1)\n return final_out"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodules.submodules import UpSampleBN, norm_normalize | 928 |
# This is the baseline encoder-decoder we used in the ablation study
class NNET(nn.Module):
def __init__(self, args=None):
super(NNET, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(num_classes=4)
def forward(self, x, **kwargs):
out = self.decoder(self.encoder(x), **kwargs)
# Bilinearly upsample the output to match the input resolution
up_out = F.interpolate(
out,
size=[x.size(2), x.size(3)],
mode='bilinear',
align_corners=False)
# L2-normalize the first three channels / ensure positive value for concentration parameters (kappa)
up_out = norm_normalize(up_out)
return up_out
def get_1x_lr_params(self): # lr/10 learning rate
return self.encoder.parameters()
def get_10x_lr_params(self): # lr learning rate
modules = [self.decoder]
for m in modules:
yield from m.parameters()
# Encoder
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
basemodel_name = 'tf_efficientnet_b5_ap'
basemodel = torch.hub.load(
'rwightman/gen-efficientnet-pytorch',
basemodel_name,
pretrained=True)
# Remove last layer
basemodel.global_pool = nn.Identity()
basemodel.classifier = nn.Identity()
self.original_model = basemodel
def forward(self, x):
features = [x]
for k, v in self.original_model._modules.items():
if (k == 'blocks'):
for ki, vi in v._modules.items():
features.append(vi(features[-1]))
else:
features.append(v(features[-1]))
return features
# Decoder (no pixel-wise MLP, no uncertainty-guided sampling)
class Decoder(nn.Module):
def __init__(self, num_classes=4):
super(Decoder, self).__init__()
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
|
# This is the baseline encoder-decoder we used in the ablation study
class NNET(nn.Module):
def __init__(self, args=None):
super(NNET, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(num_classes=4)
def forward(self, x, **kwargs):
out = self.decoder(self.encoder(x), **kwargs)
# Bilinearly upsample the output to match the input resolution
up_out = F.interpolate(
out,
size=[x.size(2), x.size(3)],
mode='bilinear',
align_corners=False)
# L2-normalize the first three channels / ensure positive value for concentration parameters (kappa)
up_out = norm_normalize(up_out)
return up_out
def get_1x_lr_params(self): # lr/10 learning rate
return self.encoder.parameters()
def get_10x_lr_params(self): # lr learning rate
modules = [self.decoder]
for m in modules:
yield from m.parameters()
# Encoder
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
basemodel_name = 'tf_efficientnet_b5_ap'
basemodel = torch.hub.load(
'rwightman/gen-efficientnet-pytorch',
basemodel_name,
pretrained=True)
# Remove last layer
basemodel.global_pool = nn.Identity()
basemodel.classifier = nn.Identity()
self.original_model = basemodel
def forward(self, x):
features = [x]
for k, v in self.original_model._modules.items():
if (k == 'blocks'):
for ki, vi in v._modules.items():
features.append(vi(features[-1]))
else:
features.append(v(features[-1]))
return features
# Decoder (no pixel-wise MLP, no uncertainty-guided sampling)
class Decoder(nn.Module):
def __init__(self, num_classes=4):
super(Decoder, self).__init__()
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0) | self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) | 0 | 2023-12-06 07:29:34+00:00 | 2k |
daswer123/xtts-webui | scripts/resemble_enhance/denoiser/inference.py | [
{
"identifier": "inference",
"path": "scripts/resemble_enhance/inference.py",
"snippet": "def inference(model, dwav, sr, device, chunk_seconds: float = 30.0, overlap_seconds: float = 1.0):\n remove_weight_norm_recursively(model)\n\n hp: HParams = model.hp\n\n dwav = resample(\n dwav,\n orig_freq=sr,\n new_freq=hp.wav_rate,\n lowpass_filter_width=64,\n rolloff=0.9475937167399596,\n resampling_method=\"sinc_interp_kaiser\",\n beta=14.769656459379492,\n )\n\n del sr # We are now using hp.wav_rate as the sampling rate\n sr = hp.wav_rate\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n\n start_time = time.perf_counter()\n\n chunk_length = int(sr * chunk_seconds)\n overlap_length = int(sr * overlap_seconds)\n hop_length = chunk_length - overlap_length\n\n chunks = []\n\n for start in trange(0, dwav.shape[-1], hop_length):\n new_chunk = inference_chunk(model, dwav[start : start + chunk_length], sr, device)\n chunks.append(new_chunk)\n\n # Delete the processed segment to free up memory\n # del new_chunk\n # if torch.cuda.is_available():\n # torch.cuda.empty_cache()\n\n # Force garbage collection at this point (optional and may slow down processing)\n # gc.collect()\n\n hwav = merge_chunks(chunks, chunk_length, hop_length, sr=sr,length=dwav.shape[-1])\n # Clean up chunks to free memory after merging\n \n del chunks[:]\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n gc.collect() # Explicitly call garbage collector again\n\n elapsed_time = time.perf_counter() - start_time\n logger.info(f\"Elapsed time: {elapsed_time:.3f} s, {hwav.shape[-1] / elapsed_time / 1000:.3f} kHz\")\n\n return hwav, sr"
},
{
"identifier": "Denoiser",
"path": "scripts/resemble_enhance/denoiser/train.py",
"snippet": "def load_G(run_dir: Path, hp: HParams | None = None, training=True):\ndef save_wav(path: Path, wav: Tensor, rate: int):\ndef main():\n def feed_G(engine: Engine, batch: dict[str, Tensor]):\n def eval_fn(engine: Engine, eval_dir, n_saved=10):"
}
] | import logging
import torch
from functools import cache
from ..inference import inference
from .train import Denoiser, HParams | 664 |
logger = logging.getLogger(__name__)
@cache
def load_denoiser(run_dir, device):
if run_dir is None:
|
logger = logging.getLogger(__name__)
@cache
def load_denoiser(run_dir, device):
if run_dir is None: | return Denoiser(HParams()) | 1 | 2023-12-14 06:34:12+00:00 | 2k |
FrozenBurning/PrimDiffusion | dva/io.py | [
{
"identifier": "AttrDict",
"path": "dva/attr_dict.py",
"snippet": "class AttrDict:\n def __init__(self, entries):\n self.add_entries_(entries)\n\n def keys(self):\n return self.__dict__.keys()\n\n def values(self):\n return self.__dict__.values()\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n return self.__dict__.__delitem__(key)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return self.__dict__.__repr__()\n\n def __getattr__(self, attr):\n if attr.startswith(\"__\"):\n return self.__getattribute__(attr)\n return self.__dict__[attr]\n\n def items(self):\n return self.__dict__.items()\n\n def __iter__(self):\n return iter(self.items())\n\n def add_entries_(self, entries, overwrite=True):\n for key, value in entries.items():\n if key not in self.__dict__:\n if isinstance(value, dict):\n self.__dict__[key] = AttrDict(value)\n else:\n self.__dict__[key] = value\n else:\n if isinstance(value, dict):\n self.__dict__[key].add_entries_(entries=value, overwrite=overwrite)\n elif overwrite or self.__dict__[key] is None:\n self.__dict__[key] = value\n\n def serialize(self):\n return json.dumps(self, default=self.obj_to_dict, indent=4)\n\n def obj_to_dict(self, obj):\n return obj.__dict__\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)"
},
{
"identifier": "compute_v2uv",
"path": "dva/geom.py",
"snippet": "def compute_v2uv(n_verts, vi, vti, n_max=4):\n \"\"\"Computes mapping from vertex indices to texture indices.\n\n Args:\n vi: [F, 3], triangles\n vti: [F, 3], texture triangles\n n_max: int, max number of texture locations\n\n Returns:\n [n_verts, n_max], texture indices\n \"\"\"\n v2uv_dict = {}\n for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)):\n v2uv_dict.setdefault(i_v, set()).add(i_uv)\n assert len(v2uv_dict) == n_verts\n v2uv = np.zeros((n_verts, n_max), dtype=np.int32)\n for i in range(n_verts):\n vals = sorted(list(v2uv_dict[i]))\n v2uv[i, :] = vals[0]\n v2uv[i, : len(vals)] = np.array(vals)\n return v2uv"
},
{
"identifier": "compute_neighbours",
"path": "dva/geom.py",
"snippet": "def compute_neighbours(n_verts, vi, n_max_values=10):\n \"\"\"Computes first-ring neighbours given vertices and faces.\"\"\"\n n_vi = vi.shape[0]\n\n adj = {i: set() for i in range(n_verts)}\n for i in range(n_vi):\n for idx in vi[i]:\n adj[idx] |= set(vi[i]) - set([idx])\n\n nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values))\n nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32)\n\n for idx in range(n_verts):\n n_values = min(len(adj[idx]), n_max_values)\n nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values]\n nbs_weights[idx, :n_values] = -1.0 / n_values\n\n return nbs_idxs, nbs_weights"
}
] | import json
import cv2
import numpy as np
import copy
import importlib
import pickle
import os
from typing import Any, Dict
from dva.attr_dict import AttrDict
from dva.geom import compute_v2uv, compute_neighbours | 1,514 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
}
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
} | topology["v2uv"] = compute_v2uv( | 1 | 2023-12-06 05:12:55+00:00 | 2k |
Nearcyan/papers.day | backend/admin.py | [
{
"identifier": "ArxivPaper",
"path": "backend/models.py",
"snippet": "class ArxivPaper(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n arxiv_id = models.CharField(max_length=20, unique=True)\n\n # fields scraped from the paper page:\n title = models.CharField(max_length=255, db_index=True)\n abstract = models.TextField(db_index=True)\n authors = models.ManyToManyField(Author)\n primary_subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)\n subjects = models.ManyToManyField(Subject, related_name=\"papers\")\n comment = models.TextField(null=True, blank=True)\n doi = models.CharField(max_length=255, null=True, blank=True)\n journal_ref = models.CharField(max_length=255, null=True, blank=True)\n publication_date = models.DateField()\n\n # fields we create\n summary = models.TextField(db_index=True)\n total_author_citations = models.IntegerField(default=0, db_index=True)\n citations = models.IntegerField(default=0, db_index=True)\n\n # file fields\n pdf = models.FileField(upload_to=\"pdfs\", null=True, blank=True)\n screenshot = models.ImageField(upload_to=\"screenshots\", null=True, blank=True)\n source_tar = models.FileField(upload_to=\"tar_sources\", null=True, blank=True)\n images = models.ManyToManyField(PaperImage, related_name=\"paper_images\")\n sources = models.ManyToManyField(PaperSource, related_name=\"paper_sources\")\n\n def abstract_link(self) -> str:\n return f\"https://arxiv.org/abs/{self.arxiv_id}\"\n\n def pdf_link(self) -> str:\n return f\"https://arxiv.org/pdf/{self.arxiv_id}.pdf\"\n\n def source_link(self) -> str:\n return f\"https://arxiv.org/e-print/{self.arxiv_id}\"\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Author",
"path": "backend/models.py",
"snippet": "class Author(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n name = models.CharField(max_length=255, db_index=True)\n affiliation = models.CharField(max_length=255, null=True, blank=True, db_index=True)\n email = models.EmailField(null=True, blank=True)\n email_domain = models.CharField(max_length=255, null=True, blank=True, db_index=True)\n citations = models.IntegerField(default=0, db_index=True)\n scholar_id = models.CharField(max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name"
},
{
"identifier": "Subject",
"path": "backend/models.py",
"snippet": "class Subject(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n short_name = models.CharField(max_length=255)\n full_name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.full_name"
},
{
"identifier": "PaperImage",
"path": "backend/models.py",
"snippet": "class PaperImage(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n image = models.ImageField(upload_to=\"images\")\n paper = models.ForeignKey(\"ArxivPaper\", on_delete=models.CASCADE)"
},
{
"identifier": "PaperSource",
"path": "backend/models.py",
"snippet": "class PaperSource(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n content = models.TextField()\n paper = models.ForeignKey(\"ArxivPaper\", on_delete=models.CASCADE)"
}
] | from django.contrib import admin
from .models import ArxivPaper, Author, Subject, PaperImage, PaperSource | 1,096 |
class ArxivPaperAdmin(admin.ModelAdmin):
list_display = ('title', 'citations', 'total_author_citations', 'summary', 'publication_date', 'arxiv_id',
'created_at')
search_fields = ('title', 'abstract', 'arxiv_id')
readonly_fields = ('created_at', 'modified_at')
ordering = ('-publication_date',)
list_filter = ('publication_date', 'created_at', 'citations', 'total_author_citations')
class SubjectAdmin(admin.ModelAdmin):
list_display = ('short_name', 'full_name')
search_fields = ('short_name', 'full_name')
ordering = ('short_name',)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id')
search_fields = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id')
ordering = ('name',)
class PaperImageAdmin(admin.ModelAdmin):
list_display = ('image', 'paper')
search_fields = ('image', 'paper')
ordering = ('image',)
class PaperSourceAdmin(admin.ModelAdmin):
list_display = ('paper',)
search_fields = ('paper',)
|
class ArxivPaperAdmin(admin.ModelAdmin):
list_display = ('title', 'citations', 'total_author_citations', 'summary', 'publication_date', 'arxiv_id',
'created_at')
search_fields = ('title', 'abstract', 'arxiv_id')
readonly_fields = ('created_at', 'modified_at')
ordering = ('-publication_date',)
list_filter = ('publication_date', 'created_at', 'citations', 'total_author_citations')
class SubjectAdmin(admin.ModelAdmin):
list_display = ('short_name', 'full_name')
search_fields = ('short_name', 'full_name')
ordering = ('short_name',)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id')
search_fields = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id')
ordering = ('name',)
class PaperImageAdmin(admin.ModelAdmin):
list_display = ('image', 'paper')
search_fields = ('image', 'paper')
ordering = ('image',)
class PaperSourceAdmin(admin.ModelAdmin):
list_display = ('paper',)
search_fields = ('paper',)
| admin.site.register(ArxivPaper, ArxivPaperAdmin) | 0 | 2023-12-14 08:23:05+00:00 | 2k |
LSimon95/megatts2 | models/trainer.py | [
{
"identifier": "MegaVQ",
"path": "models/megatts2.py",
"snippet": "class MegaVQ(nn.Module):\n def __init__(\n self,\n mrte: MRTE,\n vqpe: VQProsodyEncoder,\n decoder: ConvNet,\n ):\n super(MegaVQ, self).__init__()\n\n self.mrte = mrte\n self.vqpe = vqpe\n self.decoder = decoder\n\n def forward(\n self,\n duration_tokens: torch.Tensor, # (B, T)\n text: torch.Tensor, # (B, T)\n text_lens: torch.Tensor, # (B,)\n mel_mrte: torch.Tensor, # (B, T, mel_bins)\n mel_lens_mrte: torch.Tensor, # (B,)\n mel_vqpe: torch.Tensor, # (B, T, mel_bins)\n ):\n zq, commit_loss, vq_loss = self.vqpe(mel_vqpe)\n x = self.mrte(duration_tokens, text, text_lens,\n mel_mrte, mel_lens_mrte)\n x = torch.cat([x, zq], dim=-1)\n\n x = rearrange(x, 'B T D -> B D T')\n x = self.decoder(x)\n x = rearrange(x, 'B D T -> B T D')\n\n return x, commit_loss, vq_loss"
},
{
"identifier": "Discriminator",
"path": "modules/dscrm.py",
"snippet": "class Discriminator(nn.Module):\n def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1,\n hidden_size=128):\n super(Discriminator, self).__init__()\n self.time_lengths = time_lengths\n self.discriminator = MultiWindowDiscriminator(\n freq_length=freq_length,\n time_lengths=time_lengths,\n kernel=kernel,\n c_in=c_in, hidden_size=hidden_size\n )\n\n def forward(self, x, start_frames_wins=None):\n \"\"\"\n\n :param x: [B, T, 80]\n :param return_y_only:\n :return:\n \"\"\"\n if len(x.shape) == 3:\n x = x[:, None, :, :] # [B,1,T,80]\n x_len = x.sum([1, -1]).ne(0).int().sum([-1])\n ret = {'y_c': None, 'y': None}\n ret['y'], start_frames_wins, ret['h'] = self.discriminator(\n x, x_len, start_frames_wins=start_frames_wins)\n\n ret['start_frames_wins'] = start_frames_wins\n return ret"
},
{
"identifier": "plot_spectrogram_to_numpy",
"path": "utils/utils.py",
"snippet": "def plot_spectrogram_to_numpy(spec_target: np.ndarray, spec_output: np.ndarray) -> np.ndarray:\n \"\"\"\n Plot a spectrogram and convert it to a numpy array.\n\n Args:\n spectrogram (ndarray): Spectrogram data.\n\n Returns:\n ndarray: Numpy array representing the plotted spectrogram.\n \"\"\"\n\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))\n ax1.set_title(\"Target\")\n im = ax1.imshow(spec_target.astype(np.float32), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.colorbar(im, ax=ax1)\n plt.xlabel(\"Frames\")\n plt.ylabel(\"Channels\")\n\n ax2.set_title(\"Output\")\n im = ax2.imshow(spec_output.astype(np.float32), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.colorbar(im, ax=ax2)\n plt.xlabel(\"Frames\")\n plt.ylabel(\"Channels\")\n\n plt.tight_layout()\n\n fig.canvas.draw()\n data = save_figure_to_numpy(fig)\n plt.close()\n return data"
}
] | import lightning.pytorch as pl
import torch
import torchaudio
import torch.nn.functional as F
import transformers
import numpy as np
import math
from .megatts2 import MegaVQ
from modules.dscrm import Discriminator
from utils.utils import plot_spectrogram_to_numpy | 1,002 |
class MegaGANTrainer(pl.LightningModule):
def __init__(
self,
|
class MegaGANTrainer(pl.LightningModule):
def __init__(
self, | G: MegaVQ, | 0 | 2023-12-10 15:02:54+00:00 | 2k |
wanghao-cst/Omni-VideoAssistant | llava/serve/controller.py | [
{
"identifier": "CONTROLLER_HEART_BEAT_EXPIRATION",
"path": "llava/constants.py",
"snippet": "CONTROLLER_HEART_BEAT_EXPIRATION = 30"
},
{
"identifier": "build_logger",
"path": "llava/utils.py",
"snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef violates_moderation(text):\ndef pretty_print_semaphore(semaphore):\nclass StreamToLogger(object):"
}
] | import argparse
import asyncio
import dataclasses
import json
import logging
import time
import threading
import numpy as np
import requests
import uvicorn
from enum import Enum, auto
from typing import List, Union
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from llava.utils import build_logger, server_error_msg | 1,519 | if not worker_status:
return False
self.worker_info[worker_name] = WorkerInfo(
worker_status["model_names"], worker_status["speed"], worker_status["queue_length"],
check_heart_beat, time.time())
logger.info(f"Register done: {worker_name}, {worker_status}")
return True
def get_worker_status(self, worker_name: str):
try:
r = requests.post(worker_name + "/worker_get_status", timeout=5)
except requests.exceptions.RequestException as e:
logger.error(f"Get status fails: {worker_name}, {e}")
return None
if r.status_code != 200:
logger.error(f"Get status fails: {worker_name}, {r}")
return None
return r.json()
def remove_worker(self, worker_name: str):
del self.worker_info[worker_name]
def refresh_all_workers(self):
old_info = dict(self.worker_info)
self.worker_info = {}
for w_name, w_info in old_info.items():
if not self.register_worker(w_name, w_info.check_heart_beat, None):
logger.info(f"Remove stale worker: {w_name}")
def list_models(self):
model_names = set()
for w_name, w_info in self.worker_info.items():
model_names.update(w_info.model_names)
return list(model_names)
def get_worker_address(self, model_name: str):
if self.dispatch_method == DispatchMethod.LOTTERY:
worker_names = []
worker_speeds = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_speeds.append(w_info.speed)
worker_speeds = np.array(worker_speeds, dtype=np.float32)
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
if True: # Directly return address
pt = np.random.choice(np.arange(len(worker_names)),
p=worker_speeds)
worker_name = worker_names[pt]
return worker_name
# Check status before returning
while True:
pt = np.random.choice(np.arange(len(worker_names)),
p=worker_speeds)
worker_name = worker_names[pt]
if self.get_worker_status(worker_name):
break
else:
self.remove_worker(worker_name)
worker_speeds[pt] = 0
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
continue
return worker_name
elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
worker_names = []
worker_qlen = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_qlen.append(w_info.queue_length / w_info.speed)
if len(worker_names) == 0:
return ""
min_index = np.argmin(worker_qlen)
w_name = worker_names[min_index]
self.worker_info[w_name].queue_length += 1
logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}")
return w_name
else:
raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
def receive_heart_beat(self, worker_name: str, queue_length: int):
if worker_name not in self.worker_info:
logger.info(f"Receive unknown heart beat. {worker_name}")
return False
self.worker_info[worker_name].queue_length = queue_length
self.worker_info[worker_name].last_heart_beat = time.time()
logger.info(f"Receive heart beat. {worker_name}")
return True
def remove_stable_workers_by_expiration(self):
expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
to_delete = []
for worker_name, w_info in self.worker_info.items():
if w_info.check_heart_beat and w_info.last_heart_beat < expire:
to_delete.append(worker_name)
for worker_name in to_delete:
self.remove_worker(worker_name)
def worker_api_generate_stream(self, params):
worker_addr = self.get_worker_address(params["model"])
if not worker_addr:
logger.info(f"no worker: {params['model']}")
ret = {
| """
A controller manages distributed workers.
It sends worker addresses to clients.
"""
logger = build_logger("controller", "controller.log")
class DispatchMethod(Enum):
LOTTERY = auto()
SHORTEST_QUEUE = auto()
@classmethod
def from_str(cls, name):
if name == "lottery":
return cls.LOTTERY
elif name == "shortest_queue":
return cls.SHORTEST_QUEUE
else:
raise ValueError(f"Invalid dispatch method")
@dataclasses.dataclass
class WorkerInfo:
model_names: List[str]
speed: int
queue_length: int
check_heart_beat: bool
last_heart_beat: str
def heart_beat_controller(controller):
while True:
time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
controller.remove_stable_workers_by_expiration()
class Controller:
def __init__(self, dispatch_method: str):
# Dict[str -> WorkerInfo]
self.worker_info = {}
self.dispatch_method = DispatchMethod.from_str(dispatch_method)
self.heart_beat_thread = threading.Thread(
target=heart_beat_controller, args=(self,))
self.heart_beat_thread.start()
logger.info("Init controller")
def register_worker(self, worker_name: str, check_heart_beat: bool,
worker_status: dict):
if worker_name not in self.worker_info:
logger.info(f"Register a new worker: {worker_name}")
else:
logger.info(f"Register an existing worker: {worker_name}")
if not worker_status:
worker_status = self.get_worker_status(worker_name)
if not worker_status:
return False
self.worker_info[worker_name] = WorkerInfo(
worker_status["model_names"], worker_status["speed"], worker_status["queue_length"],
check_heart_beat, time.time())
logger.info(f"Register done: {worker_name}, {worker_status}")
return True
def get_worker_status(self, worker_name: str):
try:
r = requests.post(worker_name + "/worker_get_status", timeout=5)
except requests.exceptions.RequestException as e:
logger.error(f"Get status fails: {worker_name}, {e}")
return None
if r.status_code != 200:
logger.error(f"Get status fails: {worker_name}, {r}")
return None
return r.json()
def remove_worker(self, worker_name: str):
del self.worker_info[worker_name]
def refresh_all_workers(self):
old_info = dict(self.worker_info)
self.worker_info = {}
for w_name, w_info in old_info.items():
if not self.register_worker(w_name, w_info.check_heart_beat, None):
logger.info(f"Remove stale worker: {w_name}")
def list_models(self):
model_names = set()
for w_name, w_info in self.worker_info.items():
model_names.update(w_info.model_names)
return list(model_names)
def get_worker_address(self, model_name: str):
if self.dispatch_method == DispatchMethod.LOTTERY:
worker_names = []
worker_speeds = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_speeds.append(w_info.speed)
worker_speeds = np.array(worker_speeds, dtype=np.float32)
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
if True: # Directly return address
pt = np.random.choice(np.arange(len(worker_names)),
p=worker_speeds)
worker_name = worker_names[pt]
return worker_name
# Check status before returning
while True:
pt = np.random.choice(np.arange(len(worker_names)),
p=worker_speeds)
worker_name = worker_names[pt]
if self.get_worker_status(worker_name):
break
else:
self.remove_worker(worker_name)
worker_speeds[pt] = 0
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
continue
return worker_name
elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
worker_names = []
worker_qlen = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_qlen.append(w_info.queue_length / w_info.speed)
if len(worker_names) == 0:
return ""
min_index = np.argmin(worker_qlen)
w_name = worker_names[min_index]
self.worker_info[w_name].queue_length += 1
logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}")
return w_name
else:
raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
def receive_heart_beat(self, worker_name: str, queue_length: int):
if worker_name not in self.worker_info:
logger.info(f"Receive unknown heart beat. {worker_name}")
return False
self.worker_info[worker_name].queue_length = queue_length
self.worker_info[worker_name].last_heart_beat = time.time()
logger.info(f"Receive heart beat. {worker_name}")
return True
def remove_stable_workers_by_expiration(self):
expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
to_delete = []
for worker_name, w_info in self.worker_info.items():
if w_info.check_heart_beat and w_info.last_heart_beat < expire:
to_delete.append(worker_name)
for worker_name in to_delete:
self.remove_worker(worker_name)
def worker_api_generate_stream(self, params):
worker_addr = self.get_worker_address(params["model"])
if not worker_addr:
logger.info(f"no worker: {params['model']}")
ret = { | "text": server_error_msg, | 1 | 2023-12-05 08:02:17+00:00 | 2k |
RobertCsordas/moe_attention | layers/transformer/transformer.py | [
{
"identifier": "MultiHeadAttention",
"path": "layers/transformer/multi_head_attention.py",
"snippet": "class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.1, input_size: Optional[int] = None,\n out_size: Optional[int] = None):\n super(AbsPosAttentionBase, self).__init__(state_size, n_heads, dropout)\n\n self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)\n self.data_to_q = torch.nn.Linear(input_size or state_size, n_heads * self.projection_size, bias=False)\n\n super(MultiHeadAttention, self).__init__(out_size)\n self.reset_parameters()\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n need_weights: bool = False):\n # Input and output shape: [n_batch, n_steps, data_size]\n k, v = self.transform_data(attend_to, self.data_to_kv, 2)\n q, = self.transform_data(curr_state, self.data_to_q, 1)\n\n data, scores = self.merged_attention(curr_state.shape[0], q.shape[1], mask, q, k, v)\n if need_weights:\n return data, scores\n else:\n return data\n\n def reset_parameters(self):\n # super().reset_parameters()\n\n torch.nn.init.xavier_uniform_(self.data_to_q.weight)\n torch.nn.init.xavier_uniform_(self.data_to_kv.weight)\n torch.nn.init.xavier_uniform_(self.data_to_kv.weight)"
},
{
"identifier": "AttentionMask",
"path": "layers/transformer/multi_head_attention.py",
"snippet": "class AttentionMask:\n src_length_mask: Optional[torch.Tensor]\n position_mask: Optional[torch.Tensor]"
}
] | import torch
import torch.nn
import torch.nn.functional as F
from .multi_head_attention import MultiHeadAttention, AttentionMask
from typing import Optional, Callable, Dict, Type, Sequence, Union
from dataclasses import dataclass | 686 | # This file is based on PyTorch's internal implementation
ActivationFunction = Callable[[torch.Tensor], torch.Tensor]
class TransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
| # This file is based on PyTorch's internal implementation
ActivationFunction = Callable[[torch.Tensor], torch.Tensor]
class TransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
| def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor: | 1 | 2023-12-13 08:45:02+00:00 | 2k |
riccardomusmeci/mlx-llm | src/mlx_llm/model/_registry.py | [
{
"identifier": "phi2",
"path": "src/mlx_llm/model/phi2.py",
"snippet": "def phi2() -> Phi2:\n return Phi2(\n dim=2560,\n vocab_size=51200,\n n_heads=32,\n n_layers=32,\n rotary_dim=32\n )"
},
{
"identifier": "llama_2_7B_chat",
"path": "src/mlx_llm/model/transformer.py",
"snippet": "def llama_2_7B_chat() -> Transformer:\n return Transformer(\n dim=4096,\n hidden_dim=11008,\n vocab_size=32000,\n n_layers=32,\n n_heads=32,\n n_kv_heads=32,\n head_dim=128,\n norm_eps=1e-5\n )"
},
{
"identifier": "tiny_llama_chat_v06",
"path": "src/mlx_llm/model/transformer.py",
"snippet": "def tiny_llama_chat_v06() -> Transformer:\n return Transformer(\n dim=2048,\n hidden_dim=5632,\n n_heads=32,\n n_kv_heads=4,\n n_layers=22,\n vocab_size=32000,\n head_dim=64, # 2048 / 32,\n norm_eps=1e-5,\n rope_traditional=False\n )"
},
{
"identifier": "openhermes_25_mistral_7B",
"path": "src/mlx_llm/model/transformer.py",
"snippet": "def openhermes_25_mistral_7B() -> Transformer:\n return Transformer(\n dim=4096,\n hidden_dim=14336,\n vocab_size=32002,\n n_layers=32,\n n_heads=32,\n n_kv_heads=8,\n head_dim=128,\n norm_eps=1e-5\n )"
},
{
"identifier": "mistral_7B_instruct_v02",
"path": "src/mlx_llm/model/transformer.py",
"snippet": "def mistral_7B_instruct_v02() -> Transformer:\n return Transformer(\n dim=4096,\n hidden_dim=14336,\n vocab_size=32000,\n n_layers=32,\n n_heads=32,\n n_kv_heads=8,\n head_dim=128,\n norm_eps=1e-5\n )"
},
{
"identifier": "e5_mistral_7b_instruct",
"path": "src/mlx_llm/model/transformer.py",
"snippet": "def e5_mistral_7b_instruct() -> Transformer:\n return Transformer(\n dim=4096,\n hidden_dim=14336,\n vocab_size=32000,\n n_layers=32,\n n_heads=32,\n n_kv_heads=8,\n head_dim=128,\n norm_eps=1e-5\n )"
}
] | from .phi2 import phi2
from .transformer import (
llama_2_7B_chat,
tiny_llama_chat_v06,
openhermes_25_mistral_7B,
# mistral_7B_instruct_v01,
mistral_7B_instruct_v02,
e5_mistral_7b_instruct
) | 756 |
MODEL_ENTRYPOINTS = {
"Phi2": phi2,
"LLaMA-2-7B-chat": llama_2_7B_chat,
"TinyLlama-1.1B-Chat-v0.6": tiny_llama_chat_v06,
# "Mistral-7B-Instruct-v0.1": mistral_7B_instruct_v01,
|
MODEL_ENTRYPOINTS = {
"Phi2": phi2,
"LLaMA-2-7B-chat": llama_2_7B_chat,
"TinyLlama-1.1B-Chat-v0.6": tiny_llama_chat_v06,
# "Mistral-7B-Instruct-v0.1": mistral_7B_instruct_v01, | "Mistral-7B-Instruct-v0.2": mistral_7B_instruct_v02, | 4 | 2023-12-07 16:19:47+00:00 | 2k |
xetdata/xetcache | xetcache/xetmemo_kernel_extension.py | [
{
"identifier": "hash_anything",
"path": "xetcache/util.py",
"snippet": "def hash_anything(x):\n return hashlib.sha256(pickle.dumps(x)).hexdigest()"
},
{
"identifier": "probe_memo",
"path": "xetcache/util.py",
"snippet": "def probe_memo(memopath, inputhashstr, key=None):\n \"\"\"\n Locate the memo from the provided input.\n \"\"\"\n memo_file = inputhashstr + '.pickle'\n if key is None:\n full_memo_file = os.path.join(memopath, inputhashstr + '.pickle')\n else:\n key = str(key)\n full_memo_file = os.path.join(memopath, key, inputhashstr + '.pickle')\n if full_memo_file.startswith(\"xet://\"):\n try:\n openfile = fsspec.open(full_memo_file, 'rb')\n fbytestr = None\n with openfile as f:\n print(f\"Loading from {memo_file}\")\n # reading from a string first will avoid potential tiny\n # reads that are extraordinarily slow\n fbytestr = f.read()\n result = pickle.loads(fbytestr)\n return result\n except Exception as e:\n if str(\"404 Not Found\") in str(e):\n return None\n print(f'Failed to load: {e}')\n return None\n elif os.path.exists(full_memo_file):\n if file_is_pointer_file(full_memo_file):\n materialized = materialize_pointer_file(full_memo_file)\n else:\n materialized = True\n if materialized:\n with open(full_memo_file, 'rb') as f:\n print(f\"Loading from {memo_file}\")\n result = pickle.load(f)\n return result\n return None"
},
{
"identifier": "store_memo",
"path": "xetcache/util.py",
"snippet": "def store_memo(memopath, inputhashstr, store, key):\n \"\"\"\n Locate the memo from the provided input.\n \"\"\"\n memo_file = inputhashstr + '.pickle'\n if key is None:\n full_memo_file = os.path.join(memopath, inputhashstr + '.pickle')\n else:\n key = str(key)\n full_memo_file = os.path.join(memopath, key, inputhashstr + '.pickle')\n memopath = os.path.join(memopath, key)\n if full_memo_file.startswith(\"xet://\"):\n fs = fsspec.filesystem(\"xet\")\n with fs.transaction:\n openfile = fsspec.open(full_memo_file, 'wb')\n with openfile as f:\n print(f\"Writing to {memo_file}\")\n pickle.dump(store, f)\n else:\n os.makedirs(memopath, exist_ok=True)\n with open(full_memo_file, 'wb') as f:\n print(f\"Writing to {memo_file}\")\n pickle.dump(store, f)\n return None"
},
{
"identifier": "get_memo_path",
"path": "xetcache/config.py",
"snippet": "def get_memo_path():\n \"\"\"\n Reads the current memo path\n \"\"\"\n return _MEMOPATH"
},
{
"identifier": "get_runtime_threshold",
"path": "xetcache/config.py",
"snippet": "def get_runtime_threshold():\n \"\"\"\n Reads the current runtime threshold in seconds. \n Only functions or cells which run longer than this will be cached.\n \"\"\"\n return _RUNTIME_THRESHOLD_SEC"
}
] | import os
import time
from .util import hash_anything, probe_memo, store_memo
from .config import get_memo_path, get_runtime_threshold
from IPython.core.magic import Magics, magics_class, cell_magic | 1,389 |
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line
|
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line | inputhashes = [hash_anything(line), hash_anything(cell)] | 0 | 2023-12-05 21:59:08+00:00 | 2k |
open-compass/T-Eval | teval/evaluators/planning_evaluator.py | [
{
"identifier": "format_load",
"path": "teval/utils/format_load.py",
"snippet": "def format_load(raw_data: str, start_character: str = '', end_character: str = ''):\n \"\"\"Format the raw data into the format that can be evaluated.\n\n Args:\n raw_data (str): The raw data.\n start_character (str, optional): The start character. Defaults to '', if using it, the string will be sliced from the first start_character.\n end_character (str, optional): The end character. Defaults to '', if using it, the string will be sliced to the last end_character.\n\n Returns:\n str: The formatted data.\n \"\"\"\n if type(raw_data) != str:\n # the data has been evaluated\n return raw_data\n if \"```json\" in raw_data:\n raw_data = raw_data[raw_data.find(\"```json\") + len(\"```json\"):]\n raw_data = raw_data.strip(\"`\")\n if start_character != '':\n raw_data = raw_data[raw_data.find(start_character):]\n if end_character != '':\n raw_data = raw_data[:raw_data.rfind(end_character) + len(end_character)]\n successful_parse = False\n try:\n data = ast.literal_eval(raw_data)\n successful_parse = True\n except Exception as e:\n pass\n try:\n if not successful_parse:\n data = json.loads(raw_data)\n successful_parse = True\n except Exception as e:\n pass\n try:\n if not successful_parse:\n data = json.loads(raw_data.replace(\"\\'\", \"\\\"\"))\n successful_parse = True\n except Exception as e:\n pass\n if not successful_parse:\n raise Exception(\"Cannot parse raw data\")\n return data"
},
{
"identifier": "ResponseDataSample",
"path": "teval/schema.py",
"snippet": "class ResponseDataSample:\n \"\"\"\n Args:\n template(str): Format string with keyword-only arguments. For\n example '{who} like {what}'\n pred(Any): Parsed data from LLM generating response.\n gt(Any): Ground truth data\n meta_data(dict, optional): Meta information will be used to evaluate\n LLM's response\n \"\"\"\n template: str\n pred: Any\n gt: Any\n meta_data: dict = None"
}
] | from collections import defaultdict
from numpy import mean
from mmengine import load
from teval.utils.format_load import format_load
from tqdm import tqdm
from teval.schema import ResponseDataSample
from sentence_transformers import SentenceTransformer, util
import json
import itertools
import networkx as nx
import numpy as np
import copy
import json
import re | 1,293 | # import evaluate
class PlanningEvaluator:
"""Planning Evaluation
Args:
dataset_path(str): File path of evaluation dataset
name_weight(float): the weight of action_name in bert_score match, default = 0.9
args_weight(float): the weight of action_args in bert_score match, default = 0.1
match_threshold(float): the threshold of matching
match_strategy(str): matching method, can choose 'bertscore' or 'permutation'
bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2".
Refer to https://www.sbert.net/docs/pretrained_models.html for more models.
"""
def __init__(
self,
dataset_path: str,
name_weight = 0.75,
args_weight = 0.25,
match_threshold = 0.7,
match_strategy: str = 'bertscore', # ["bertscore", "permutation"]
bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2']
default_prompt_type: str = 'json', # ["json", "ReWOO"]
**kwargs,
) -> None:
self.bert_score_model = bert_score_model
print(bert_score_model)
self.dataset_path = dataset_path
self.name_weight = name_weight
self.args_weight = args_weight
self.match_threshold = match_threshold
self.default_prompt_type = default_prompt_type # ["json", "ReWOO"]
assert match_strategy in ["bertscore", "permutation"], f"match strategy must in [\"bertscore\", \"permutation\"], but get {match_strategy}"
self.match_strategy = match_strategy
self.valid_data_count = None
self.sentence_model = SentenceTransformer(self.bert_score_model)
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
total_error = 0
total_count = 0
for key in dataset.keys():
datum = dataset[key]
data_sample, error = self._process_response(datum)
total_error += error
total_count += 1
self.dataset.append(
dict(response_data_sample=data_sample))
self.num_samples = len(self.dataset)
print("total_data_count:", total_count, "valid_data_count:", total_count - total_error)
self.valid_data_count = total_count - total_error
def format_load(self, data):
r'''
ensure evaluator can work correctly under any data input
'''
try:
json_format = format_load(data, start_character='[', end_character=']')
except Exception as e:
return []
if type(json_format) != list:
return []
for i in range(len(json_format)):
try:
json_format[i] = {
'name': str(json_format[i]['name']),
'id': int(json_format[i]['id']),
'args': str(json_format[i]['args'])
}
except Exception as e:
return []
return json_format
def _process_response(
self,
datum,
| # import evaluate
class PlanningEvaluator:
"""Planning Evaluation
Args:
dataset_path(str): File path of evaluation dataset
name_weight(float): the weight of action_name in bert_score match, default = 0.9
args_weight(float): the weight of action_args in bert_score match, default = 0.1
match_threshold(float): the threshold of matching
match_strategy(str): matching method, can choose 'bertscore' or 'permutation'
bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2".
Refer to https://www.sbert.net/docs/pretrained_models.html for more models.
"""
def __init__(
self,
dataset_path: str,
name_weight = 0.75,
args_weight = 0.25,
match_threshold = 0.7,
match_strategy: str = 'bertscore', # ["bertscore", "permutation"]
bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2']
default_prompt_type: str = 'json', # ["json", "ReWOO"]
**kwargs,
) -> None:
self.bert_score_model = bert_score_model
print(bert_score_model)
self.dataset_path = dataset_path
self.name_weight = name_weight
self.args_weight = args_weight
self.match_threshold = match_threshold
self.default_prompt_type = default_prompt_type # ["json", "ReWOO"]
assert match_strategy in ["bertscore", "permutation"], f"match strategy must in [\"bertscore\", \"permutation\"], but get {match_strategy}"
self.match_strategy = match_strategy
self.valid_data_count = None
self.sentence_model = SentenceTransformer(self.bert_score_model)
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
total_error = 0
total_count = 0
for key in dataset.keys():
datum = dataset[key]
data_sample, error = self._process_response(datum)
total_error += error
total_count += 1
self.dataset.append(
dict(response_data_sample=data_sample))
self.num_samples = len(self.dataset)
print("total_data_count:", total_count, "valid_data_count:", total_count - total_error)
self.valid_data_count = total_count - total_error
def format_load(self, data):
r'''
ensure evaluator can work correctly under any data input
'''
try:
json_format = format_load(data, start_character='[', end_character=']')
except Exception as e:
return []
if type(json_format) != list:
return []
for i in range(len(json_format)):
try:
json_format[i] = {
'name': str(json_format[i]['name']),
'id': int(json_format[i]['id']),
'args': str(json_format[i]['args'])
}
except Exception as e:
return []
return json_format
def _process_response(
self,
datum, | ) -> ResponseDataSample: | 1 | 2023-12-10 05:18:46+00:00 | 2k |
rabilrbl/gemini-pro-bot | gemini_pro_bot/handlers.py | [
{
"identifier": "model",
"path": "gemini_pro_bot/llm.py",
"snippet": "SAFETY_SETTINGS = {\n HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,\n HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,\n HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,\n}"
},
{
"identifier": "format_message",
"path": "gemini_pro_bot/html_format.py",
"snippet": "def format_message(text: str) -> str:\n \"\"\"Format the given message text from markdown to HTML.\n\n Escapes HTML characters, applies link, code, and other rich text formatting,\n and returns the formatted HTML string.\n\n Args:\n message (str): The plain text message to format.\n\n Returns:\n str: The formatted HTML string.\n \"\"\"\n formatted_text = escape_html(text)\n formatted_text = apply_exclude_code(formatted_text)\n formatted_text = apply_code(formatted_text)\n return formatted_text"
}
] | import asyncio
import PIL.Image as load_image
from gemini_pro_bot.llm import model, img_model
from google.generativeai.types.generation_types import (
StopCandidateException,
BlockedPromptException,
)
from telegram import Update
from telegram.ext import (
ContextTypes,
)
from telegram.error import NetworkError, BadRequest
from telegram.constants import ChatAction, ParseMode
from gemini_pro_bot.html_format import format_message
from io import BytesIO | 1,017 |
def new_chat(context: ContextTypes.DEFAULT_TYPE) -> None:
context.chat_data["chat"] = model.start_chat()
async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""Send a message when the command /start is issued."""
user = update.effective_user
await update.message.reply_html(
f"Hi {user.mention_html()}!\n\nStart sending messages with me to generate a response.\n\nSend /new to start a new chat session.",
# reply_markup=ForceReply(selective=True),
)
async def help_command(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""Send a message when the command /help is issued."""
help_text = """
Basic commands:
/start - Start the bot
/help - Get help. Shows this message
Chat commands:
/new - Start a new chat session (model will forget previously generated messages)
Send a message to the bot to generate a response.
"""
await update.message.reply_text(help_text)
async def newchat_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Start a new chat session."""
init_msg = await update.message.reply_text(
text="Starting new chat session...",
reply_to_message_id=update.message.message_id,
)
new_chat(context)
await init_msg.edit_text("New chat session started.")
# Define the function that will handle incoming messages
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Handles incoming text messages from users.
Checks if a chat session exists for the user, initializes a new session if not.
Sends the user's message to the chat session to generate a response.
Streams the response back to the user, handling any errors.
"""
if context.chat_data.get("chat") is None:
new_chat(context)
text = update.message.text
init_msg = await update.message.reply_text(
text="Generating...", reply_to_message_id=update.message.message_id
)
await update.message.chat.send_action(ChatAction.TYPING)
# Generate a response using the text-generation pipeline
chat = context.chat_data.get("chat") # Get the chat session for this chat
response = None
try:
response = await chat.send_message_async(
text, stream=True
) # Generate a response
except StopCandidateException as sce:
print("Prompt: ", text, " was stopped. User: ", update.message.from_user)
print(sce)
await init_msg.edit_text("The model unexpectedly stopped generating.")
chat.rewind() # Rewind the chat session to prevent the bot from getting stuck
return
except BlockedPromptException as bpe:
print("Prompt: ", text, " was blocked. User: ", update.message.from_user)
print(bpe)
await init_msg.edit_text("Blocked due to safety concerns.")
if response:
# Resolve the response to prevent the chat session from getting stuck
await response.resolve()
return
full_plain_message = ""
# Stream the responses
async for chunk in response:
try:
if chunk.text:
full_plain_message += chunk.text
|
def new_chat(context: ContextTypes.DEFAULT_TYPE) -> None:
context.chat_data["chat"] = model.start_chat()
async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""Send a message when the command /start is issued."""
user = update.effective_user
await update.message.reply_html(
f"Hi {user.mention_html()}!\n\nStart sending messages with me to generate a response.\n\nSend /new to start a new chat session.",
# reply_markup=ForceReply(selective=True),
)
async def help_command(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""Send a message when the command /help is issued."""
help_text = """
Basic commands:
/start - Start the bot
/help - Get help. Shows this message
Chat commands:
/new - Start a new chat session (model will forget previously generated messages)
Send a message to the bot to generate a response.
"""
await update.message.reply_text(help_text)
async def newchat_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Start a new chat session."""
init_msg = await update.message.reply_text(
text="Starting new chat session...",
reply_to_message_id=update.message.message_id,
)
new_chat(context)
await init_msg.edit_text("New chat session started.")
# Define the function that will handle incoming messages
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Handles incoming text messages from users.
Checks if a chat session exists for the user, initializes a new session if not.
Sends the user's message to the chat session to generate a response.
Streams the response back to the user, handling any errors.
"""
if context.chat_data.get("chat") is None:
new_chat(context)
text = update.message.text
init_msg = await update.message.reply_text(
text="Generating...", reply_to_message_id=update.message.message_id
)
await update.message.chat.send_action(ChatAction.TYPING)
# Generate a response using the text-generation pipeline
chat = context.chat_data.get("chat") # Get the chat session for this chat
response = None
try:
response = await chat.send_message_async(
text, stream=True
) # Generate a response
except StopCandidateException as sce:
print("Prompt: ", text, " was stopped. User: ", update.message.from_user)
print(sce)
await init_msg.edit_text("The model unexpectedly stopped generating.")
chat.rewind() # Rewind the chat session to prevent the bot from getting stuck
return
except BlockedPromptException as bpe:
print("Prompt: ", text, " was blocked. User: ", update.message.from_user)
print(bpe)
await init_msg.edit_text("Blocked due to safety concerns.")
if response:
# Resolve the response to prevent the chat session from getting stuck
await response.resolve()
return
full_plain_message = ""
# Stream the responses
async for chunk in response:
try:
if chunk.text:
full_plain_message += chunk.text | message = format_message(full_plain_message) | 1 | 2023-12-14 16:57:14+00:00 | 2k |
nox-410/tvm.tl | python/tvm/target/x86.py | [
{
"identifier": "register_func",
"path": "python/tvm/_ffi/registry.py",
"snippet": "def register_func(func_name, f=None, override=False):\n \"\"\"Register global function\n\n Parameters\n ----------\n func_name : str or function\n The function name\n\n f : function, optional\n The function to be registered.\n\n override: boolean optional\n Whether override existing entry.\n\n Returns\n -------\n fregister : function\n Register function if f is not specified.\n\n Examples\n --------\n The following code registers my_packed_func as global function.\n Note that we simply get it back from global function table to invoke\n it from python side. However, we can also invoke the same function\n from C++ backend, or in the compiled TVM code.\n\n .. code-block:: python\n\n targs = (10, 10.0, \"hello\")\n @tvm.register_func\n def my_packed_func(*args):\n assert(tuple(args) == targs)\n return 10\n # Get it out from global function table\n f = tvm.get_global_func(\"my_packed_func\")\n assert isinstance(f, tvm.PackedFunc)\n y = f(*targs)\n assert y == 10\n \"\"\"\n if callable(func_name):\n f = func_name\n func_name = f.__name__\n\n if not isinstance(func_name, str):\n raise ValueError(\"expect string function name\")\n\n ioverride = ctypes.c_int(override)\n\n def register(myf):\n \"\"\"internal register function\"\"\"\n if not isinstance(myf, PackedFuncBase):\n myf = convert_to_tvm_func(myf)\n check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride))\n return myf\n\n if f:\n return register(f)\n return register"
},
{
"identifier": "target_has_features",
"path": "python/tvm/target/codegen.py",
"snippet": "def target_has_features(cpu_features, target=None):\n \"\"\"Check CPU features for the target's `-mtriple` and `-mcpu` and `-mattr`.\n\n Parameters\n ----------\n target : Target\n The TVM target.\n cpu_features : str or Array\n CPU Feature(s) to check.\n\n Returns\n -------\n has_features : bool\n True if target has the feature(s).\n \"\"\"\n assert isinstance(target, Target) or target is None\n assert isinstance(cpu_features, (Array, list, tuple, str))\n has_feats = True\n cpu_features = [cpu_features] if isinstance(cpu_features, str) else cpu_features\n for feat in cpu_features:\n has_feats &= _ffi_api.target_has_feature(feat, target)\n return has_feats"
}
] | from .._ffi import register_func
from .codegen import target_has_features | 940 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common x86 related utilities"""
@register_func("tvm.topi.x86.utils.get_simd_32bit_lanes")
def get_simd_32bit_lanes():
"""X86 SIMD optimal vector length lookup.
Parameters
----------
Returns
-------
vec_len : int
The optimal vector length of CPU from the global context target.
"""
vec_len = 4
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common x86 related utilities"""
@register_func("tvm.topi.x86.utils.get_simd_32bit_lanes")
def get_simd_32bit_lanes():
"""X86 SIMD optimal vector length lookup.
Parameters
----------
Returns
-------
vec_len : int
The optimal vector length of CPU from the global context target.
"""
vec_len = 4 | if target_has_features(["avx512bw", "avx512f"]): | 1 | 2023-12-14 02:37:47+00:00 | 2k |
kakaobrain/honeybee | tasks/mme/mme_dataset.py | [
{
"identifier": "TaskDataset",
"path": "tasks/base_dataset.py",
"snippet": "class TaskDataset(Dataset):\n def build_prompt(self, question, image_prompt=\"Human: <image>\"):\n prompt = f\"\"\"{SYSTEM}\n{image_prompt}\nHuman: {question}\nAI: \"\"\"\n return prompt\n\n def collate_fn(self, examples: list[Example]) -> Batch:\n ids = [ex.id for ex in examples]\n data = [ex.data for ex in examples]\n\n images = [ex.image for ex in examples]\n prompts = [ex.prompt for ex in examples]\n inputs = self.processor(images=images, text=prompts)\n\n batch = Batch(ids=ids, inputs=inputs, data=data)\n return batch"
},
{
"identifier": "Example",
"path": "tasks/base_dataset.py",
"snippet": "class Example:\n id: int # results will be sorted by id\n image: Image\n prompt: str\n data: dict # answer and additional data -- will be included in results"
}
] | from pathlib import Path
from PIL import Image
from tasks.base_dataset import TaskDataset, Example
import utils | 763 |
EVAL_TYPE_DICT = {
"Perception": ["existence", "count", "position", "color", "posters", "celebrity", "scene", "landmark", "artwork", "OCR"],
"Cognition": ["commonsense_reasoning", "numerical_calculation", "text_translation", "code_reasoning"]
}
def load_subset(dir_path):
root = Path(dir_path)
dset_name = root.name
imgpaths = list(root.glob("**/*.jpg")) + list(root.glob("**/*.png"))
imgpaths = sorted(imgpaths)
def get_txtpath(imgpath):
txtpath = imgpath.with_suffix(".txt")
txtname = txtpath.name
if txtpath.exists():
return txtpath
if imgpath.parent.name == "images":
return imgpath.parent.parent / "questions_answers_YN" / txtname
raise ValueError(f"Cannot find txt path from image path `{imgpath}`")
data = []
for imgpath in imgpaths:
txtpath = get_txtpath(imgpath)
with txtpath.open(encoding="utf-8") as f:
for line in f:
q, a = line.strip().split("\t")
data.append((dset_name, imgpath, q, a))
return data
class MMEDataset(TaskDataset):
def __init__(self, root, processor):
root = Path(root)
data = []
for subset in EVAL_TYPE_DICT["Perception"] + EVAL_TYPE_DICT["Cognition"]:
data += load_subset(root / subset)
utils.print_rank_0(f"MME total dataset size = {len(data)}")
assert len(data) == 2374
self.data = data
self.processor = processor
def __len__(self):
return len(self.data)
def __getitem__(self, index):
dset_name, imgpath, question, answer = self.data[index]
prompt = f"Answer the question using a single word or phrase. {question}"
prompt = self.build_prompt(prompt)
imgid = imgpath.name
image = Image.open(imgpath)
data = {
"question": question,
"answer": answer,
"image_path": str(imgpath),
"image_id": imgid,
"dataset_name": dset_name,
}
|
EVAL_TYPE_DICT = {
"Perception": ["existence", "count", "position", "color", "posters", "celebrity", "scene", "landmark", "artwork", "OCR"],
"Cognition": ["commonsense_reasoning", "numerical_calculation", "text_translation", "code_reasoning"]
}
def load_subset(dir_path):
root = Path(dir_path)
dset_name = root.name
imgpaths = list(root.glob("**/*.jpg")) + list(root.glob("**/*.png"))
imgpaths = sorted(imgpaths)
def get_txtpath(imgpath):
txtpath = imgpath.with_suffix(".txt")
txtname = txtpath.name
if txtpath.exists():
return txtpath
if imgpath.parent.name == "images":
return imgpath.parent.parent / "questions_answers_YN" / txtname
raise ValueError(f"Cannot find txt path from image path `{imgpath}`")
data = []
for imgpath in imgpaths:
txtpath = get_txtpath(imgpath)
with txtpath.open(encoding="utf-8") as f:
for line in f:
q, a = line.strip().split("\t")
data.append((dset_name, imgpath, q, a))
return data
class MMEDataset(TaskDataset):
def __init__(self, root, processor):
root = Path(root)
data = []
for subset in EVAL_TYPE_DICT["Perception"] + EVAL_TYPE_DICT["Cognition"]:
data += load_subset(root / subset)
utils.print_rank_0(f"MME total dataset size = {len(data)}")
assert len(data) == 2374
self.data = data
self.processor = processor
def __len__(self):
return len(self.data)
def __getitem__(self, index):
dset_name, imgpath, question, answer = self.data[index]
prompt = f"Answer the question using a single word or phrase. {question}"
prompt = self.build_prompt(prompt)
imgid = imgpath.name
image = Image.open(imgpath)
data = {
"question": question,
"answer": answer,
"image_path": str(imgpath),
"image_id": imgid,
"dataset_name": dset_name,
} | ex = Example(index, image, prompt, data) | 1 | 2023-12-06 14:48:41+00:00 | 2k |
NVlabs/RADIO | radio/hf_model.py | [
{
"identifier": "eradio",
"path": "radio/eradio_model.py",
"snippet": "@register_model\ndef eradio(pretrained=False, **kwargs):\n return fastervit2_large_fullres_ws16(pretrained=pretrained, **kwargs)"
},
{
"identifier": "create_model_from_args",
"path": "radio/radio_model.py",
"snippet": "def create_model_from_args(args) -> nn.Module:\n in_chans = 3\n if args.in_chans is not None:\n in_chans = args.in_chans\n elif args.input_size is not None:\n in_chans = args.input_size[0]\n\n # Skip weight initialization unless it's explicitly requested.\n weight_init = args.model_kwargs.pop(\"weight_init\", \"skip\")\n\n model = create_model(\n args.model,\n pretrained=args.pretrained,\n in_chans=in_chans,\n num_classes=args.num_classes,\n drop_rate=args.drop,\n drop_path_rate=args.drop_path,\n drop_block_rate=args.drop_block,\n global_pool=args.gp,\n bn_momentum=args.bn_momentum,\n bn_eps=args.bn_eps,\n scriptable=args.torchscript,\n checkpoint_path=args.initial_checkpoint,\n weight_init=weight_init,\n **args.model_kwargs,\n )\n\n assert (\n not args.cls_token_per_teacher or args.cpe_max_size is not None\n ), \"CPE must be enabled for multiple CLS tokens!\"\n\n if args.cpe_max_size is not None:\n enable_cpe(\n model,\n args.cpe_max_size,\n num_cls_tokens=len(args.teachers) if args.cls_token_per_teacher else 1,\n register_multiple=args.register_multiple,\n )\n\n return model"
},
{
"identifier": "RADIOModel",
"path": "radio/radio_model.py",
"snippet": "class RADIOModel(nn.Module):\n def __init__(\n self,\n model: nn.Module,\n input_conditioner: InputConditioner,\n return_summary: bool,\n return_spatial_features: bool,\n ):\n super().__init__()\n\n self.model = model\n self.input_conditioner = input_conditioner\n self.return_summary = return_summary\n self.return_spatial_features = return_spatial_features\n\n def forward(self, x: torch.Tensor):\n x = self.input_conditioner(x)\n\n y = self.model.forward_features(x)\n\n if isinstance(y, (list, tuple)):\n summary, all_feat = y\n elif isinstance(self.model, VisionTransformer):\n patch_gen = getattr(self.model, \"patch_generator\", None)\n if patch_gen is not None:\n summary = y[:, : patch_gen.num_cls_tokens].flatten(1)\n all_feat = y[:, patch_gen.num_skip :]\n elif self.model.global_pool == \"avg\":\n summary = y[:, self.model.num_prefix_tokens :].mean(dim=1)\n all_feat = y\n else:\n summary = y[:, 0]\n all_feat = y[:, 1:]\n else:\n raise ValueError(\"Unsupported model type\")\n\n if self.return_summary and self.return_spatial_features:\n return summary, all_feat\n elif self.return_summary:\n return summary\n return all_feat"
},
{
"identifier": "get_default_conditioner",
"path": "radio/input_conditioner.py",
"snippet": "def get_default_conditioner():\n from timm.data.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD\n\n return InputConditioner(\n input_scale=1.0,\n norm_mean=OPENAI_CLIP_MEAN,\n norm_std=OPENAI_CLIP_STD,\n )"
},
{
"identifier": "InputConditioner",
"path": "radio/input_conditioner.py",
"snippet": "class InputConditioner(nn.Module):\n def __init__(self,\n input_scale: float,\n norm_mean: norm_t,\n norm_std: norm_t,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n\n self.dtype = dtype\n\n # self.input_scale = input_scale\n self.register_buffer(\"norm_mean\", _to_tensor(norm_mean) / input_scale)\n self.register_buffer(\"norm_std\", _to_tensor(norm_std) / input_scale)\n\n def forward(self, x: torch.Tensor):\n # x = x * self.input_scale\n y = (x - self.norm_mean) / self.norm_std\n return y.to(self.dtype)"
}
] | from collections import namedtuple
from typing import Optional
from timm.models import VisionTransformer
from transformers import PretrainedConfig, PreTrainedModel
from .eradio_model import eradio
from .radio_model import create_model_from_args
from .radio_model import RADIOModel as RADIOModelBase
from .input_conditioner import get_default_conditioner, InputConditioner
import torch | 1,421 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config | model = create_model_from_args(args) | 1 | 2023-12-08 19:53:01+00:00 | 2k |
taikinman/langrila | src/langrila/database/chroma.py | [
{
"identifier": "BaseModule",
"path": "src/langrila/base.py",
"snippet": "class BaseModule(ABC):\n @abstractmethod\n def run(self, *args, **kwargs):\n raise NotImplementedError\n\n async def arun(self, *args, **kwargs):\n raise NotImplementedError\n\n def stream(self, *args, **kwargs):\n raise NotImplementedError\n\n async def astream(self, *args, **kwargs):\n raise NotImplementedError\n\n def __call__(self, *args, **kwargs):\n _async = kwargs.pop(\"arun\", False)\n _stream = kwargs.pop(\"stream\", False)\n if _async:\n if _stream:\n return self.astream(*args, **kwargs)\n else:\n return asyncio.create_task(self.arun(*args, **kwargs))\n else:\n if _stream:\n return self.stream(*args, **kwargs)\n else:\n return self.run(*args, **kwargs)"
},
{
"identifier": "RetrievalResult",
"path": "src/langrila/result.py",
"snippet": "class RetrievalResult(BaseModel):\n ids: list[int | str]\n documents: list[str]\n metadatas: Optional[list[dict[str, Any]] | list[None]]\n similarities: list[float]\n usage: Usage"
},
{
"identifier": "Usage",
"path": "src/langrila/usage.py",
"snippet": "class Usage(BaseModel):\n prompt_tokens: int = 0\n completion_tokens: int = 0\n\n def __add__(self, other: __class__ | dict | CompletionUsage):\n if isinstance(other, dict):\n other = Usage(**other)\n\n if hasattr(other, 'prompt_tokens'):\n prompt_tokens = self.prompt_tokens + other.prompt_tokens\n else:\n prompt_tokens = self.prompt_tokens\n if hasattr(other, 'completion_tokens'):\n completion_tokens = self.completion_tokens + other.completion_tokens\n else:\n completion_tokens = self.completion_tokens\n return Usage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n )\n\n def __sub__(self, other: __class__ | dict | CompletionUsage):\n if isinstance(other, dict):\n other = Usage(**other)\n\n if hasattr(other, 'prompt_tokens'):\n prompt_tokens = self.prompt_tokens - other.prompt_tokens\n else:\n prompt_tokens = self.prompt_tokens\n if hasattr(other, 'completion_tokens'):\n completion_tokens = self.completion_tokens - other.completion_tokens\n else:\n completion_tokens = self.completion_tokens\n return Usage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n )\n\n @property\n def total_tokens(self):\n return self.prompt_tokens + self.completion_tokens\n\n @field_validator('prompt_tokens')\n def check_prompt_tokens(cls, v):\n if v < 0:\n raise ValueError('prompt_tokens must be greater or equal to 0')\n return v\n\n @field_validator('completion_tokens')\n def check_completion_tokens(cls, v):\n if v < 0:\n raise ValueError('completion_tokens must be greater or equal to 0')\n return v\n\n def __repr__(self):\n return f'Usage(prompt_tokens={self.prompt_tokens}, completion_tokens={self.completion_tokens}, total_tokens={self.total_tokens})'"
}
] | import sys
import chromadb
from pathlib import Path
from typing import Optional
from ..base import BaseModule
from ..result import RetrievalResult
from ..usage import Usage | 1,570 |
python_version = sys.version_info
# NOTE : Python version < 3.10 is bundled by lower version sqlite client, so in that case sqlite modules is override
# https://docs.trychroma.com/troubleshooting#sqlite
__import__("pysqlite3")
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
class ChromaCollectionModule(BaseModule):
def __init__(
self,
persistence_directory: str,
collection_name: str,
embedder: Optional[BaseModule] = None,
):
self.embedder = embedder
self.persistence_directory = Path(persistence_directory)
self.collection_name = collection_name
def run(
self,
documents: list[str],
metadatas: Optional[list[dict[str, str]]]=None,
embeddings: Optional[list[list[float]]] = None,
) -> None:
if embeddings is None:
if self.embedder is not None:
embeddings = self.embedder(documents).embeddings
else:
raise AttributeError(
"attribute embedder must be the instance of the class inheriting BaseModule."
)
ids = [str(i) for i in range(len(documents))]
client = chromadb.PersistentClient(path=self.persistence_directory.as_posix())
# recreation collection
try:
client.delete_collection(name=self.collection_name)
except ValueError:
pass
collection = client.get_or_create_collection(
name=self.collection_name, metadata={"hnsw:space": "cosine"}
)
collection.upsert(ids=ids, embeddings=embeddings, documents=documents, metadatas=metadatas)
def as_retriever(
self, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False
) -> "ChromaRetrievalModule":
return ChromaRetrievalModule(
embedder=self.embedder,
persistence_directory=self.persistence_directory,
collection_name=self.collection_name,
n_results=n_results,
threshold_similarity=threshold_similarity,
return_only_relevant_docs=return_only_relevant_docs,
)
class ChromaRetrievalModule(BaseModule):
def __init__(
self,
embedder: BaseModule,
persistence_directory: str,
collection_name: str,
n_results: int = 4,
threshold_similarity: float = 0.8,
return_only_relevant_docs: bool = False,
):
assert isinstance(
embedder, BaseModule
), "embedder must be the instance of the class inheriting BaseModule."
self.embedder = embedder
self.n_results = n_results
self.threshold_similarity = threshold_similarity
self.persistence_directory = persistence_directory
self.collection_name = collection_name
self.return_only_relevant_docs = return_only_relevant_docs
self.n_results = n_results
def run(
self,
query: str,
where: Optional[dict] = None,
) -> dict:
query_embed = self.embedder(query)
client = chromadb.PersistentClient(path=self.persistence_directory.as_posix())
collection = client.get_collection(name=self.collection_name)
retrieved = collection.query(
query_embeddings=query_embed.embeddings[0], n_results=self.n_results, where=where
)
_results = self.filter_with_distance(retrieved)
results = RetrievalResult(
ids=_results["ids"],
documents=_results["documents"],
metadatas=_results["metadatas"],
similarities=_results["similarities"],
|
python_version = sys.version_info
# NOTE : Python version < 3.10 is bundled by lower version sqlite client, so in that case sqlite modules is override
# https://docs.trychroma.com/troubleshooting#sqlite
__import__("pysqlite3")
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
class ChromaCollectionModule(BaseModule):
def __init__(
self,
persistence_directory: str,
collection_name: str,
embedder: Optional[BaseModule] = None,
):
self.embedder = embedder
self.persistence_directory = Path(persistence_directory)
self.collection_name = collection_name
def run(
self,
documents: list[str],
metadatas: Optional[list[dict[str, str]]]=None,
embeddings: Optional[list[list[float]]] = None,
) -> None:
if embeddings is None:
if self.embedder is not None:
embeddings = self.embedder(documents).embeddings
else:
raise AttributeError(
"attribute embedder must be the instance of the class inheriting BaseModule."
)
ids = [str(i) for i in range(len(documents))]
client = chromadb.PersistentClient(path=self.persistence_directory.as_posix())
# recreation collection
try:
client.delete_collection(name=self.collection_name)
except ValueError:
pass
collection = client.get_or_create_collection(
name=self.collection_name, metadata={"hnsw:space": "cosine"}
)
collection.upsert(ids=ids, embeddings=embeddings, documents=documents, metadatas=metadatas)
def as_retriever(
self, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False
) -> "ChromaRetrievalModule":
return ChromaRetrievalModule(
embedder=self.embedder,
persistence_directory=self.persistence_directory,
collection_name=self.collection_name,
n_results=n_results,
threshold_similarity=threshold_similarity,
return_only_relevant_docs=return_only_relevant_docs,
)
class ChromaRetrievalModule(BaseModule):
def __init__(
self,
embedder: BaseModule,
persistence_directory: str,
collection_name: str,
n_results: int = 4,
threshold_similarity: float = 0.8,
return_only_relevant_docs: bool = False,
):
assert isinstance(
embedder, BaseModule
), "embedder must be the instance of the class inheriting BaseModule."
self.embedder = embedder
self.n_results = n_results
self.threshold_similarity = threshold_similarity
self.persistence_directory = persistence_directory
self.collection_name = collection_name
self.return_only_relevant_docs = return_only_relevant_docs
self.n_results = n_results
def run(
self,
query: str,
where: Optional[dict] = None,
) -> dict:
query_embed = self.embedder(query)
client = chromadb.PersistentClient(path=self.persistence_directory.as_posix())
collection = client.get_collection(name=self.collection_name)
retrieved = collection.query(
query_embeddings=query_embed.embeddings[0], n_results=self.n_results, where=where
)
_results = self.filter_with_distance(retrieved)
results = RetrievalResult(
ids=_results["ids"],
documents=_results["documents"],
metadatas=_results["metadatas"],
similarities=_results["similarities"], | usage=Usage( | 2 | 2023-12-10 09:42:35+00:00 | 2k |
Open-All-Scale-Causal-Engine/OpenASCE | openasce/inference/learner/dml_test.py | [
{
"identifier": "DML",
"path": "openasce/inference/learner/dml.py",
"snippet": "class DML(_DML, InferenceModel):\n def fit(\n self,\n *,\n X: Iterable[np.ndarray],\n Y: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs\n ):\n \"\"\"Feed the sample data and train the model used to effect on the samples.\n\n Arguments:\n X: Features of the samples.\n Y: Outcomes of the samples.\n T: Treatments of the samples.\n\n Returns:\n\n \"\"\"\n\n def _nuisance_fit(\n _self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None\n ):\n assert Z is None, \"Cannot accept instrument!\"\n param = {\n \"X\": X,\n \"W\": W,\n \"T\": T,\n \"Y\": Y,\n \"model_t\": _self._model_t,\n \"model_y\": _self._model_y,\n \"sample_weight\": sample_weight,\n \"groups\": groups,\n }\n results = self.launch(num=2, param=param, dataset=None)\n for r in results:\n if \"model_t\" in r:\n _self._model_t = r[\"model_t\"]\n elif \"model_y\" in r:\n _self._model_y = r[\"model_y\"]\n return _self\n\n _ModelNuisance.fit = _nuisance_fit\n super().fit(Y, T, X=X, **kwargs)\n\n def todo(self, idx: int, total_num: int, param: Any, dataset: Iterable) -> Any:\n model_t = param.pop(\"model_t\")\n model_y = param.pop(\"model_y\")\n X, Y, T, W = param[\"X\"], param[\"Y\"], param[\"T\"], param[\"W\"]\n sample_weight, groups = param[\"sample_weight\"], param[\"groups\"]\n result = {\"idx\": idx}\n if idx == 0:\n model_t.fit(\n X,\n W,\n T,\n **filter_none_kwargs(sample_weight=sample_weight, groups=groups)\n )\n result[\"model_t\"] = model_t\n elif idx == 1:\n model_y.fit(\n X,\n W,\n Y,\n **filter_none_kwargs(sample_weight=sample_weight, groups=groups)\n )\n result[\"model_y\"] = model_y\n return result\n\n def estimate(self, *, X: Iterable[np.ndarray]) -> NoReturn:\n \"\"\"Feed the sample data and estimate the effect on the samples\n\n Arguments:\n X: Features of the samples.\n\n Returns:\n\n \"\"\"\n self._estimate_result = self.const_marginal_effect(X)"
},
{
"identifier": "get_ihdp_data",
"path": "tests/datasets/ihdp_data.py",
"snippet": "def get_ihdp_data():\n \"\"\"\n Loads the IHDP dataset, refer to https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/IHDP/csv/ihdp_npci_1.csv\n \"\"\"\n col_names = [\"treatment\", \"y_factual\", \"y_cfactual\", \"mu0\", \"mu1\"] + [\n \"x{}\".format(i + 1) for i in range(25)\n ]\n csv_path = os.path.join(os.path.dirname(inspect.getfile(get_ihdp_data)), \"ihdp.csv\")\n df = pd.read_csv(csv_path, names=col_names)\n logger.info(\"IHDP dataset loaded.\")\n return df.iloc[:523], df.iloc[523:]"
},
{
"identifier": "logger",
"path": "openasce/utils/logger.py",
"snippet": "GLOBAL_LOGGER_NAME = \"openasce-log\"\nDEFAULT_FORMAT = (\n \"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d:%(funcName)s] %(message)s\"\n)\nDEFAULT_FORMATTER = logging.Formatter(DEFAULT_FORMAT)\ndef init_custom_logger(name):\nclass openasceLogger(object):"
}
] | from unittest import TestCase
from econml.sklearn_extensions.linear_model import WeightedLassoCVWrapper
from sklearn.linear_model import LassoCV
from openasce.inference.learner.dml import DML
from tests.datasets.ihdp_data import get_ihdp_data
from openasce.utils.logger import logger
import numpy as np | 1,324 | # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class TestDML(TestCase):
def setUp(self) -> None:
self.train_data, self.test_data = get_ihdp_data()
np.random.seed(12)
return super().setUp()
def test_dml(self):
np.random.seed(12)
learner = DML(
model_y=WeightedLassoCVWrapper(),
model_t=WeightedLassoCVWrapper(),
model_final=LassoCV(cv=3),
categories=[0, 1],
)
learner.fit(
X=self.train_data[self.train_data.columns[5:]]
.to_numpy()
.astype(np.float32),
Y=self.train_data["y_factual"],
T=self.train_data["treatment"],
)
learner.estimate(
X=self.test_data[self.train_data.columns[5:]].to_numpy().astype(np.float32)
)
avg = np.average(learner.get_result())
| # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class TestDML(TestCase):
def setUp(self) -> None:
self.train_data, self.test_data = get_ihdp_data()
np.random.seed(12)
return super().setUp()
def test_dml(self):
np.random.seed(12)
learner = DML(
model_y=WeightedLassoCVWrapper(),
model_t=WeightedLassoCVWrapper(),
model_final=LassoCV(cv=3),
categories=[0, 1],
)
learner.fit(
X=self.train_data[self.train_data.columns[5:]]
.to_numpy()
.astype(np.float32),
Y=self.train_data["y_factual"],
T=self.train_data["treatment"],
)
learner.estimate(
X=self.test_data[self.train_data.columns[5:]].to_numpy().astype(np.float32)
)
avg = np.average(learner.get_result()) | logger.info(f"dml result: {avg}") | 2 | 2023-12-06 05:54:36+00:00 | 2k |
latorc/Wechat-AI-Assistant | chatbot.py | [
{
"identifier": "WcfWrapper",
"path": "wcf_wrapper.py",
"snippet": "class WcfWrapper:\r\n def __init__(self) -> None:\r\n def __del__(self):\r\n def msg_preview_str(self, msg:WxMsg) -> str:\r\n def wxid_to_nickname(self, wxid) -> str:\r\n def wxid_to_wxcode(self, wxid) -> str:\r\n def get_msg(self) -> WxMsg:\r\n def get_msg_text(self, msg:WxMsg) -> str:\r\n def get_content_type(self, msg:WxMsg) -> int:\r\n def get_refer_content(self, msg:WxMsg) -> ChatMsg:\r\n def get_msg_extra(self, msgid:str, sample_extra:str) -> str:\r\n def get_image(self, msgid:str, extra:str) -> str:\r\n def get_video(self, msgid:str, extra:str) -> str:\r\n def send_message(self, chat_msg:ChatMsg, receiver:str, at_list:str=\"\") -> int:\r\n def send_text(self, msg: str, receiver: str, at_list: str = \"\") -> int:\r\n def send_image(self, file:str, receiver:str) -> int:\r\n def send_file(self, file:str, receiver:str) -> int:\r\n def search_msg(self):\r"
},
{
"identifier": "AdminCmd",
"path": "config.py",
"snippet": "class AdminCmd(Enum):\r\n \"\"\" 微信机器人管理员命令, 与配置项目名称对应 \"\"\"\r\n help = auto()\r\n reload_config = auto()\r\n clear_chat = auto()\r\n load_preset = auto()\r\n reset_preset = auto()\r\n list_preset = auto()\r\n chat_id = auto()\r\n \r\n @property\r\n def description(self):\r\n \"\"\" 返回命令的描述说明 \"\"\"\r\n texts = {\r\n AdminCmd.help: \"显示帮助信息\",\r\n AdminCmd.reload_config: \"重新载入配置文件\",\r\n AdminCmd.clear_chat: \"清除当前对话记忆\",\r\n AdminCmd.load_preset: \"预设名 为当前对话载入预设\",\r\n AdminCmd.reset_preset: \"为当前对话清除预设\",\r\n AdminCmd.list_preset: \"列出当前可用预设\",\r\n AdminCmd.chat_id: \"显示当前对话(群聊或单聊)的id\" \r\n }\r\n return texts.get(self, \"\")\r"
},
{
"identifier": "ContentType",
"path": "common.py",
"snippet": "class ContentType(Enum):\r\n \"\"\" 表示用微信发送的消息的类型\"\"\"\r\n text = 1 # 文字\r\n image = 3 # 图片\r\n link = 4 # 链接\r\n file = 6 # 文件\r\n voice = 34 # 语音\r\n video = 43 # 视频\r\n ERROR = 9000 # 错误\r\n UNSUPPORTED = 9001 # 不支持类型\r"
},
{
"identifier": "ChatMsg",
"path": "common.py",
"snippet": "class ChatMsg:\r\n \"\"\" 代表某种类型的消息, 用于内部数据传递 \"\"\"\r\n def __init__(self, type:ContentType, content:str) -> None:\r\n \"\"\" 初始化\r\n Args:\r\n type (ContentType): 附件类型\r\n content (str): 附件内容\r\n \"\"\"\r\n self.type = type\r\n self.content = content\r"
}
] | import queue
import re
import config
import common
import openai_wrapper
import preset
from typing import Tuple
from wcf_wrapper import WcfWrapper, ContentType
from wcferry import WxMsg
from config import AdminCmd
from common import ContentType, ChatMsg
| 1,525 |
class Chatbot():
""" 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """
def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None:
""" 初始化
args:
config (Config): Config对象
wcfw (WcfWrapper): Wechat Ferry Wrapper对象
oaiw (OpenAIWrapper): AI Wrapper对象
"""
self.config = config
self.wcfw = wcfw
self.openai_wrapper = oaiw
self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设}
def start_main_loop(self) -> None:
"""
主循环, 接收并处理微信消息.
该函数阻塞进程.
"""
while self.wcfw.wcf.is_receiving_msg():
try:
msg:WxMsg = self.wcfw.get_msg()
note = f"收到消息 {self.wcfw.msg_preview_str(msg)}"
common.logger().info(note)
except queue.Empty:
continue # 无消息,继续
except Exception as e:
common.logger().error("接收微信消息错误: %s", common.error_trace(e))
try:
self.run_wxmsg(msg)
except Exception as e:
common.logger().error("处理消息错误:%s", common.error_trace(e))
def run_wxmsg(self, msg:WxMsg):
""" 读取并处理一条消息
args:
msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content
"""
content = self._filter_wxmsg(msg)
if content is None:
return
# 确定回复对象
if msg.from_group():
receiver = msg.roomid
if msg.from_self():
at_list = ""
else:
at_list = msg.sender
else: #单聊
receiver = msg.sender
at_list = ""
# 发送者是管理员, 并且是命令时, 处理命令并直接返回
if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins:
cmd = self._match_admin_cmd(content)
if cmd:
try:
self.process_admin_cmd(content, receiver, at_list)
except Exception as e:
common.logger().error("执行管理员命令错误: %s",common.error_trace(e))
self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list)
return
### 调用 AI 处理消息
# 回调函数, 处理 AI 返回消息
|
class Chatbot():
""" 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """
def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None:
""" 初始化
args:
config (Config): Config对象
wcfw (WcfWrapper): Wechat Ferry Wrapper对象
oaiw (OpenAIWrapper): AI Wrapper对象
"""
self.config = config
self.wcfw = wcfw
self.openai_wrapper = oaiw
self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设}
def start_main_loop(self) -> None:
"""
主循环, 接收并处理微信消息.
该函数阻塞进程.
"""
while self.wcfw.wcf.is_receiving_msg():
try:
msg:WxMsg = self.wcfw.get_msg()
note = f"收到消息 {self.wcfw.msg_preview_str(msg)}"
common.logger().info(note)
except queue.Empty:
continue # 无消息,继续
except Exception as e:
common.logger().error("接收微信消息错误: %s", common.error_trace(e))
try:
self.run_wxmsg(msg)
except Exception as e:
common.logger().error("处理消息错误:%s", common.error_trace(e))
def run_wxmsg(self, msg:WxMsg):
""" 读取并处理一条消息
args:
msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content
"""
content = self._filter_wxmsg(msg)
if content is None:
return
# 确定回复对象
if msg.from_group():
receiver = msg.roomid
if msg.from_self():
at_list = ""
else:
at_list = msg.sender
else: #单聊
receiver = msg.sender
at_list = ""
# 发送者是管理员, 并且是命令时, 处理命令并直接返回
if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins:
cmd = self._match_admin_cmd(content)
if cmd:
try:
self.process_admin_cmd(content, receiver, at_list)
except Exception as e:
common.logger().error("执行管理员命令错误: %s",common.error_trace(e))
self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list)
return
### 调用 AI 处理消息
# 回调函数, 处理 AI 返回消息
| def callback_msg(msg:ChatMsg) -> int:
| 3 | 2023-12-07 12:17:15+00:00 | 2k |
tensorsense/faceflow | params/datamodule.py | [
{
"identifier": "LocalNaturalDatasetCfg",
"path": "lib/data/cfg/local.py",
"snippet": "class LocalNaturalDatasetCfg:\n name: str\n root: str\n labels_filename: str = \"au.csv\"\n crops_dir: str = \"crops\"\n aus: List[str] = field(\n default_factory=lambda: [\n \"AU1\",\n \"AU2\",\n \"AU4\",\n ]\n )"
},
{
"identifier": "AUDataModule",
"path": "lib/data/datamodules/vanilla.py",
"snippet": "class AUDataModule(pl.LightningDataModule):\n\n def __init__(self, dataset_cfg: Dict[str, List],\n image_size: int = 224,\n logits_per_class: int = 2,\n train_transforms: A.Compose = None,\n val_transforms: A.Compose = None,\n batch_size: int = 16,\n num_workers: int = 4,\n random_state: int = 1337):\n \"\"\"\n Wrapper that abstracts away data handling, like instantiating datasets, setting dataloaders etc.\n :param dataset_cfg: dict with {'train', 'val'} keys, each item contains a list of dict configs for datasets used\n during the corresponding stage. The config has to include name, root, images and labels paths.\n :param image_size: to what size the input is going to be rescaled and cropped\n :param train_transforms:\n :param val_transforms:\n :param batch_size:\n :param num_workers:\n :param random_state:\n \"\"\"\n super().__init__()\n self.dataset_cfg = dataset_cfg\n self.image_size = image_size\n self.logits_per_class = logits_per_class\n\n self.train_transform = train_transforms\n self.val_transform = val_transforms\n\n self.train_dataset = None\n self.val_datasets = None\n\n self.random_state = random_state\n self.num_workers = num_workers\n self.batch_size = batch_size\n\n def fetch_datasets(self, mode=\"train\"):\n assert mode in {\"train\", \"val\"}\n cfg = self.dataset_cfg[mode]\n\n datasets = []\n for ds in cfg:\n datasets.append(SimpleAUDataset(name=ds.name,\n root=ds.root,\n crops_dir=ds.crops_dir,\n labels_filename=ds.labels_filename,\n aus=ds.aus,\n transform=self.train_transform if mode in {\"train\"} else self.val_transform,\n logits_per_class=self.logits_per_class))\n return datasets\n\n def setup(self, stage: str):\n if stage == \"fit\":\n self.train_dataset = ConcatDataset(self.fetch_datasets(\"train\"))\n self.val_datasets = self.fetch_datasets(\"val\")\n\n print(f\"Train size: {len(self.train_dataset)}\")\n print(f\"Val sizes: {[len(d) for d in self.val_datasets]}\")\n\n if stage == \"test\":\n self.val_datasets = self.fetch_datasets(\"val\")\n\n if stage == \"predict\":\n self.val_datasets = self.fetch_datasets(\"val\")\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)\n\n def val_dataloader(self):\n return [DataLoader(d, batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers) for d in self.val_datasets]\n\n def test_dataloader(self):\n return [DataLoader(d, batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers) for d in self.val_datasets]\n\n def predict_dataloader(self):\n return [DataLoader(d, batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers) for d in self.val_datasets]"
}
] | import albumentations as A
import wandb
from albumentations.pytorch import ToTensorV2
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from lib.data.cfg.local import LocalNaturalDatasetCfg
from lib.data.datamodules.vanilla import AUDataModule | 1,019 |
project = "disfa"
aus = [
"AU1",
"AU2",
"AU4",
"AU5",
"AU6",
"AU9",
"AU12",
"AU15",
"AU17",
"AU20",
"AU26",
]
TRAIN_LABELED = [
|
project = "disfa"
aus = [
"AU1",
"AU2",
"AU4",
"AU5",
"AU6",
"AU9",
"AU12",
"AU15",
"AU17",
"AU20",
"AU26",
]
TRAIN_LABELED = [ | LocalNaturalDatasetCfg( | 0 | 2023-12-05 13:15:58+00:00 | 2k |
Psivant/femto | femto/fe/atm/_setup.py | [
{
"identifier": "OpenMMForceGroup",
"path": "femto/md/constants.py",
"snippet": "class OpenMMForceGroup(enum.IntEnum):\n \"\"\"Standard force groups to assign to common OpenMM forces to make them easier to\n identify.\"\"\"\n\n BOND = 0\n ANGLE = 1\n DIHEDRAL = 2\n\n NONBONDED = 3\n\n COM_RESTRAINT = 4\n POSITION_RESTRAINT = 5\n ALIGNMENT_RESTRAINT = 6\n\n BAROSTAT = 7\n\n ATM = 8\n\n OTHER = 16"
},
{
"identifier": "OpenMMForceName",
"path": "femto/md/constants.py",
"snippet": "class OpenMMForceName(str, enum.Enum):\n \"\"\"Standard names use for common OpenMM forces to make them easier to identify.\"\"\"\n\n COM_RESTRAINT = \"com-restraint\"\n POSITION_RESTRAINT = \"position-restraint\"\n ALIGNMENT_RESTRAINT = \"alignment-restraint\""
}
] | import logging
import tempfile
import typing
import numpy
import openmm
import openmm.app
import openmm.unit
import parmed
import scipy.spatial.distance
import femto.fe.reference
import femto.md.rest
import femto.md.restraints
import femto.md.solvate
import femto.md.system
import femto.md.utils.openmm
import femto.fe.atm
import femto.fe.atm._utils
from femto.md.constants import OpenMMForceGroup, OpenMMForceName | 1,395 |
_LOGGER = logging.getLogger(__name__)
def select_displacement(
receptor: parmed.amber.AmberParm,
ligand_1: parmed.amber.AmberParm,
ligand_2: parmed.amber.AmberParm | None,
distance: openmm.unit.Quantity,
) -> openmm.unit.Quantity:
"""Attempts to automatically select a displacement vector for the ligands.
Args:
receptor: The receptor.
ligand_1: The first ligand positioned in the binding site.
ligand_2: The second ligand positioned in the binding site.
distance: The distance to translate ligands along the displacement vector by.
Returns:
The displacement vector.
"""
ligand_coords = numpy.vstack(
[ligand_1.coordinates] + ([] if ligand_2 is None else [ligand_2.coordinates])
)
receptor_coords = receptor.coordinates
directions = numpy.array(
[
[-1.0, -1.0, -1.0],
[+1.0, -1.0, -1.0],
[+1.0, +1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0],
[+1.0, -1.0, +1.0],
[+1.0, +1.0, +1.0],
[-1.0, +1.0, +1.0],
]
)
directions /= numpy.linalg.norm(directions, axis=1, keepdims=True)
closest_distances = []
for direction in directions:
displacement = direction * distance.value_in_unit(openmm.unit.angstrom)
offset_coords = ligand_coords + displacement
distances = scipy.spatial.distance.cdist(offset_coords, receptor_coords)
closest_distances.append(distances.min())
direction = directions[numpy.argmax(closest_distances)]
return direction.flatten() * distance
def _offset_ligand(
ligand: parmed.Structure, offset: openmm.unit.Quantity
) -> parmed.Structure:
"""Offsets the coordinates of the specified ligand by a specified amount.
Args:
ligand: The ligand to offset.
offset: The amount to offset the ligand by.
Returns:
The offset ligand.
"""
# we copy in this strange way because parmed doesn't
# copy all attrs correctly when using copy.deepycopy
with tempfile.TemporaryDirectory() as tmpdir:
ligand.save(f"{tmpdir}/ligand.parm7")
ligand.save(f"{tmpdir}/ligand.mol2")
ligand = parmed.amber.AmberParm(
f"{tmpdir}/ligand.parm7", f"{tmpdir}/ligand.mol2"
)
for atom in ligand.atoms:
atom.xx += offset[0].value_in_unit(openmm.unit.angstrom)
atom.xy += offset[1].value_in_unit(openmm.unit.angstrom)
atom.xz += offset[2].value_in_unit(openmm.unit.angstrom)
return ligand
def _apply_atm_restraints(
system: openmm.System,
config: "femto.fe.atm.ATMRestraints",
ligand_1_com_idxs: list[int],
ligand_1_ref_idxs: tuple[int, int, int] | None,
ligand_2_com_idxs: list[int] | None,
ligand_2_ref_idxs: tuple[int, int, int] | None,
receptor_ref_idxs: list[int],
offset: openmm.unit.Quantity,
):
"""Adds center of mass (COM) and optionally alignment restraints (if running RBFE)
to a system.
Args:
system: The system to add the constraints to in-place.
config: The restraint configuration.
ligand_1_com_idxs: The indices to use when computing the COM of the first
ligand.
ligand_1_ref_idxs: The indices of the first ligand to align on.
ligand_2_com_idxs: The indices to use when computing the COM of the second
ligand.
ligand_2_ref_idxs: The indices of the second ligand to align on.
receptor_ref_idxs: The indices of the receptor atoms that form the binding site.
offset: The vector that the ligand will be offset by during the ATM calculation.
"""
com_restraint = femto.fe.atm._utils.create_com_restraint(
ligand_1_com_idxs,
receptor_ref_idxs,
config.com.k,
config.com.radius,
[0.0, 0.0, 0.0] * openmm.unit.angstrom,
)
com_restraint.setForceGroup(OpenMMForceGroup.COM_RESTRAINT)
| """Set up the system for ATM calculations."""
if typing.TYPE_CHECKING:
_LOGGER = logging.getLogger(__name__)
def select_displacement(
receptor: parmed.amber.AmberParm,
ligand_1: parmed.amber.AmberParm,
ligand_2: parmed.amber.AmberParm | None,
distance: openmm.unit.Quantity,
) -> openmm.unit.Quantity:
"""Attempts to automatically select a displacement vector for the ligands.
Args:
receptor: The receptor.
ligand_1: The first ligand positioned in the binding site.
ligand_2: The second ligand positioned in the binding site.
distance: The distance to translate ligands along the displacement vector by.
Returns:
The displacement vector.
"""
ligand_coords = numpy.vstack(
[ligand_1.coordinates] + ([] if ligand_2 is None else [ligand_2.coordinates])
)
receptor_coords = receptor.coordinates
directions = numpy.array(
[
[-1.0, -1.0, -1.0],
[+1.0, -1.0, -1.0],
[+1.0, +1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0],
[+1.0, -1.0, +1.0],
[+1.0, +1.0, +1.0],
[-1.0, +1.0, +1.0],
]
)
directions /= numpy.linalg.norm(directions, axis=1, keepdims=True)
closest_distances = []
for direction in directions:
displacement = direction * distance.value_in_unit(openmm.unit.angstrom)
offset_coords = ligand_coords + displacement
distances = scipy.spatial.distance.cdist(offset_coords, receptor_coords)
closest_distances.append(distances.min())
direction = directions[numpy.argmax(closest_distances)]
return direction.flatten() * distance
def _offset_ligand(
ligand: parmed.Structure, offset: openmm.unit.Quantity
) -> parmed.Structure:
"""Offsets the coordinates of the specified ligand by a specified amount.
Args:
ligand: The ligand to offset.
offset: The amount to offset the ligand by.
Returns:
The offset ligand.
"""
# we copy in this strange way because parmed doesn't
# copy all attrs correctly when using copy.deepycopy
with tempfile.TemporaryDirectory() as tmpdir:
ligand.save(f"{tmpdir}/ligand.parm7")
ligand.save(f"{tmpdir}/ligand.mol2")
ligand = parmed.amber.AmberParm(
f"{tmpdir}/ligand.parm7", f"{tmpdir}/ligand.mol2"
)
for atom in ligand.atoms:
atom.xx += offset[0].value_in_unit(openmm.unit.angstrom)
atom.xy += offset[1].value_in_unit(openmm.unit.angstrom)
atom.xz += offset[2].value_in_unit(openmm.unit.angstrom)
return ligand
def _apply_atm_restraints(
system: openmm.System,
config: "femto.fe.atm.ATMRestraints",
ligand_1_com_idxs: list[int],
ligand_1_ref_idxs: tuple[int, int, int] | None,
ligand_2_com_idxs: list[int] | None,
ligand_2_ref_idxs: tuple[int, int, int] | None,
receptor_ref_idxs: list[int],
offset: openmm.unit.Quantity,
):
"""Adds center of mass (COM) and optionally alignment restraints (if running RBFE)
to a system.
Args:
system: The system to add the constraints to in-place.
config: The restraint configuration.
ligand_1_com_idxs: The indices to use when computing the COM of the first
ligand.
ligand_1_ref_idxs: The indices of the first ligand to align on.
ligand_2_com_idxs: The indices to use when computing the COM of the second
ligand.
ligand_2_ref_idxs: The indices of the second ligand to align on.
receptor_ref_idxs: The indices of the receptor atoms that form the binding site.
offset: The vector that the ligand will be offset by during the ATM calculation.
"""
com_restraint = femto.fe.atm._utils.create_com_restraint(
ligand_1_com_idxs,
receptor_ref_idxs,
config.com.k,
config.com.radius,
[0.0, 0.0, 0.0] * openmm.unit.angstrom,
)
com_restraint.setForceGroup(OpenMMForceGroup.COM_RESTRAINT) | com_restraint.setName(OpenMMForceName.COM_RESTRAINT) | 1 | 2023-12-07 15:28:18+00:00 | 2k |
AIFSH/NativeDancer | nativedancer/third_part/detectron2/evaluation/cityscapes_evaluation.py | [
{
"identifier": "MetadataCatalog",
"path": "nativedancer/third_part/detectron2/data/catalog.py",
"snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }"
},
{
"identifier": "comm",
"path": "nativedancer/third_part/detectron2/utils/comm.py",
"snippet": "_LOCAL_PROCESS_GROUP = None\n_MISSING_LOCAL_PG_ERROR = (\n \"Local process group is not yet created! Please use detectron2's `launch()` \"\n \"to start processes and initialize pytorch process group. If you need to start \"\n \"processes in other ways, please call comm.create_local_process_group(\"\n \"num_workers_per_machine) after calling torch.distributed.init_process_group().\"\n)\n _LOCAL_PROCESS_GROUP = pg\ndef get_world_size() -> int:\ndef get_rank() -> int:\ndef create_local_process_group(num_workers_per_machine: int) -> None:\ndef get_local_process_group():\ndef get_local_rank() -> int:\ndef get_local_size() -> int:\ndef is_main_process() -> bool:\ndef synchronize():\ndef _get_global_gloo_group():\ndef all_gather(data, group=None):\ndef gather(data, dst=0, group=None):\ndef shared_random_seed():\ndef reduce_dict(input_dict, average=True):"
},
{
"identifier": "PathManager",
"path": "nativedancer/third_part/detectron2/utils/file_io.py",
"snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):"
},
{
"identifier": "DatasetEvaluator",
"path": "nativedancer/third_part/detectron2/evaluation/evaluator.py",
"snippet": "class DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, inputs, outputs):\n \"\"\"\n Process the pair of inputs and outputs.\n If they contain batches, the pairs can be consumed one-by-one using `zip`:\n\n .. code-block:: python\n\n for input_, output in zip(inputs, outputs):\n # do evaluation on single input/output pair\n ...\n\n Args:\n inputs (list): the inputs that's used to call the model.\n outputs (list): the return value of `model(inputs)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass"
}
] | import glob
import logging
import numpy as np
import os
import tempfile
import torch
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
from collections import OrderedDict
from PIL import Image
from ..data import MetadataCatalog
from ..utils import comm
from ..utils.file_io import PathManager
from .evaluator import DatasetEvaluator
from cityscapesscripts.helpers.labels import name2label
from cityscapesscripts.helpers.labels import trainId2label | 1,295 | # Copyright (c) Facebook, Inc. and its affiliates.
class CityscapesEvaluator(DatasetEvaluator):
"""
Base class for evaluation using cityscapes API.
"""
def __init__(self, dataset_name):
"""
Args:
dataset_name (str): the name of the dataset.
It must have the following metadata associated with it:
"thing_classes", "gt_dir".
"""
self._metadata = MetadataCatalog.get(dataset_name)
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
def reset(self):
self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
self._temp_dir = self._working_dir.name
# All workers will write to the same results directory
# TODO this does not work in distributed training
assert (
| # Copyright (c) Facebook, Inc. and its affiliates.
class CityscapesEvaluator(DatasetEvaluator):
"""
Base class for evaluation using cityscapes API.
"""
def __init__(self, dataset_name):
"""
Args:
dataset_name (str): the name of the dataset.
It must have the following metadata associated with it:
"thing_classes", "gt_dir".
"""
self._metadata = MetadataCatalog.get(dataset_name)
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
def reset(self):
self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
self._temp_dir = self._working_dir.name
# All workers will write to the same results directory
# TODO this does not work in distributed training
assert ( | comm.get_local_size() == comm.get_world_size() | 1 | 2023-12-10 20:14:00+00:00 | 2k |
ethanweber/nerfiller | nerfiller/inpaint/saicinpainting/training/modules/base.py | [
{
"identifier": "DepthWiseSeperableConv",
"path": "nerfiller/inpaint/saicinpainting/training/modules/depthwise_sep_conv.py",
"snippet": "class DepthWiseSeperableConv(nn.Module):\n def __init__(self, in_dim, out_dim, *args, **kwargs):\n super().__init__()\n if \"groups\" in kwargs:\n # ignoring groups for Depthwise Sep Conv\n del kwargs[\"groups\"]\n\n self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs)\n self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1)\n\n def forward(self, x):\n out = self.depthwise(x)\n out = self.pointwise(out)\n return out"
},
{
"identifier": "MultidilatedConv",
"path": "nerfiller/inpaint/saicinpainting/training/modules/multidilated_conv.py",
"snippet": "class MultidilatedConv(nn.Module):\n def __init__(\n self,\n in_dim,\n out_dim,\n kernel_size,\n dilation_num=3,\n comb_mode=\"sum\",\n equal_dim=True,\n shared_weights=False,\n padding=1,\n min_dilation=1,\n shuffle_in_channels=False,\n use_depthwise=False,\n **kwargs,\n ):\n super().__init__()\n convs = []\n self.equal_dim = equal_dim\n assert comb_mode in (\"cat_out\", \"sum\", \"cat_in\", \"cat_both\"), comb_mode\n if comb_mode in (\"cat_out\", \"cat_both\"):\n self.cat_out = True\n if equal_dim:\n assert out_dim % dilation_num == 0\n out_dims = [out_dim // dilation_num] * dilation_num\n self.index = sum(\n [[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])],\n [],\n )\n else:\n out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]\n out_dims.append(out_dim - sum(out_dims))\n index = []\n starts = [0] + out_dims[:-1]\n lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)]\n for i in range(out_dims[-1]):\n for j in range(dilation_num):\n index += list(range(starts[j], starts[j] + lengths[j]))\n starts[j] += lengths[j]\n self.index = index\n assert len(index) == out_dim\n self.out_dims = out_dims\n else:\n self.cat_out = False\n self.out_dims = [out_dim] * dilation_num\n\n if comb_mode in (\"cat_in\", \"cat_both\"):\n if equal_dim:\n assert in_dim % dilation_num == 0\n in_dims = [in_dim // dilation_num] * dilation_num\n else:\n in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]\n in_dims.append(in_dim - sum(in_dims))\n self.in_dims = in_dims\n self.cat_in = True\n else:\n self.cat_in = False\n self.in_dims = [in_dim] * dilation_num\n\n conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d\n dilation = min_dilation\n for i in range(dilation_num):\n if isinstance(padding, int):\n cur_padding = padding * dilation\n else:\n cur_padding = padding[i]\n convs.append(\n conv_type(\n self.in_dims[i],\n self.out_dims[i],\n kernel_size,\n padding=cur_padding,\n dilation=dilation,\n **kwargs,\n )\n )\n if i > 0 and shared_weights:\n convs[-1].weight = convs[0].weight\n convs[-1].bias = convs[0].bias\n dilation *= 2\n self.convs = nn.ModuleList(convs)\n\n self.shuffle_in_channels = shuffle_in_channels\n if self.shuffle_in_channels:\n # shuffle list as shuffling of tensors is nondeterministic\n in_channels_permute = list(range(in_dim))\n random.shuffle(in_channels_permute)\n # save as buffer so it is saved and loaded with checkpoint\n self.register_buffer(\"in_channels_permute\", torch.tensor(in_channels_permute))\n\n def forward(self, x):\n if self.shuffle_in_channels:\n x = x[:, self.in_channels_permute]\n\n outs = []\n if self.cat_in:\n if self.equal_dim:\n x = x.chunk(len(self.convs), dim=1)\n else:\n new_x = []\n start = 0\n for dim in self.in_dims:\n new_x.append(x[:, start : start + dim])\n start += dim\n x = new_x\n for i, conv in enumerate(self.convs):\n if self.cat_in:\n input = x[i]\n else:\n input = x\n outs.append(conv(input))\n if self.cat_out:\n out = torch.cat(outs, dim=1)[:, self.index]\n else:\n out = sum(outs)\n return out"
}
] | import abc
import torch
import torch.nn as nn
from typing import Tuple, List
from nerfiller.inpaint.saicinpainting.training.modules.depthwise_sep_conv import (
DepthWiseSeperableConv,
)
from nerfiller.inpaint.saicinpainting.training.modules.multidilated_conv import (
MultidilatedConv,
) | 1,459 |
class BaseDiscriminator(nn.Module):
@abc.abstractmethod
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Predict scores and get intermediate activations. Useful for feature matching loss
:return tuple (scores, list of intermediate activations)
"""
raise NotImplemented()
def get_conv_block_ctor(kind="default"):
if not isinstance(kind, str):
return kind
if kind == "default":
return nn.Conv2d
if kind == "depthwise":
return DepthWiseSeperableConv
if kind == "multidilated":
|
class BaseDiscriminator(nn.Module):
@abc.abstractmethod
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Predict scores and get intermediate activations. Useful for feature matching loss
:return tuple (scores, list of intermediate activations)
"""
raise NotImplemented()
def get_conv_block_ctor(kind="default"):
if not isinstance(kind, str):
return kind
if kind == "default":
return nn.Conv2d
if kind == "depthwise":
return DepthWiseSeperableConv
if kind == "multidilated": | return MultidilatedConv | 1 | 2023-12-07 19:12:08+00:00 | 2k |
nnanhuang/Customize-it-3D | ldm/models/diffusion/plms.py | [
{
"identifier": "make_ddim_sampling_parameters",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev"
},
{
"identifier": "make_ddim_timesteps",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "norm_thresholding",
"path": "ldm/models/diffusion/sampling_util.py",
"snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)"
}
] | import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding | 868 | """SAMPLING ONLY."""
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS')
| """SAMPLING ONLY."""
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS') | self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, | 1 | 2023-12-14 11:03:35+00:00 | 2k |
TaoHuang13/diffusion_reward | diffusion_reward/models/codec_models/vqgan/vqgan.py | [
{
"identifier": "Codebook",
"path": "diffusion_reward/models/codec_models/vqgan/codebook.py",
"snippet": "class Codebook(nn.Module):\n def __init__(self, args):\n super(Codebook, self).__init__()\n self.num_codebook_vectors = args.num_codebook_vectors\n self.latent_dim = args.latent_dim\n self.beta = args.beta\n\n self.embedding = nn.Embedding(self.num_codebook_vectors, self.latent_dim)\n self.embedding.weight.data.uniform_(-1.0 / self.num_codebook_vectors, 1.0 / self.num_codebook_vectors)\n\n def forward(self, z):\n z = z.permute(0, 2, 3, 1).contiguous()\n z_flattened = z.view(-1, self.latent_dim)\n\n d = torch.sum(z_flattened**2, dim=1, keepdim=True) + \\\n torch.sum(self.embedding.weight**2, dim=1) - \\\n 2*(torch.matmul(z_flattened, self.embedding.weight.t()))\n\n min_encoding_indices = torch.argmin(d, dim=1)\n z_q = self.embedding(min_encoding_indices).view(z.shape)\n\n\n loss = torch.mean((z_q.detach() - z)**2) + self.beta * torch.mean((z_q - z.detach())**2)\n\n z_q = z + (z_q - z).detach()\n z_q = z_q.permute(0, 3, 1, 2)\n return z_q, min_encoding_indices, loss"
},
{
"identifier": "Decoder",
"path": "diffusion_reward/models/codec_models/vqgan/decoder.py",
"snippet": "class Decoder(nn.Module):\n def __init__(self, args):\n super(Decoder, self).__init__()\n #channels = [512, 256, 256, 128, 128]\n channels = args.channels[::-1]\n attn_resolutions = [16]\n num_res_blocks = 2\n resolution = args.latent_size\n\n in_channels = channels[0]\n layers = [nn.Conv2d(args.latent_dim, in_channels, 3, 1, 1),\n ResidualBlock(in_channels, in_channels),\n NonLocalBlock(in_channels),\n ResidualBlock(in_channels, in_channels)]\n\n for i in range(len(channels)):\n out_channels = channels[i]\n for j in range(num_res_blocks):\n layers.append(ResidualBlock(in_channels, out_channels))\n in_channels = out_channels\n if resolution in attn_resolutions:\n layers.append(NonLocalBlock(in_channels))\n #if i != 0 and resolution < 64:\n if resolution < args.resolution:\n layers.append(UpSampleBlock(in_channels))\n resolution *= 2\n layers.append(GroupNorm(in_channels))\n layers.append(Swish())\n layers.append(nn.Conv2d(in_channels, args.image_channels, 3, 1, 1))\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)"
},
{
"identifier": "Encoder",
"path": "diffusion_reward/models/codec_models/vqgan/encoder.py",
"snippet": "class Encoder(nn.Module):\n def __init__(self, args):\n super(Encoder, self).__init__()\n #channels = [128, 128, 128, 256, 256, 512]\n channels = args.channels\n attn_resolutions = [16]\n num_res_blocks = 2\n resolution = args.resolution\n latent_size = args.latent_size\n layers = [nn.Conv2d(args.image_channels, channels[0], 3, 1, 1)]\n for i in range(len(channels)-1):\n in_channels = channels[i]\n out_channels = channels[i + 1]\n for j in range(num_res_blocks):\n layers.append(ResidualBlock(in_channels, out_channels))\n in_channels = out_channels\n if resolution in attn_resolutions:\n layers.append(NonLocalBlock(in_channels))\n #if i != len(channels)-2 and resolution > latent_size:\n if resolution > latent_size:\n layers.append(DownSampleBlock(channels[i+1]))\n resolution //= 2\n layers.append(ResidualBlock(channels[-1], channels[-1]))\n layers.append(NonLocalBlock(channels[-1]))\n layers.append(ResidualBlock(channels[-1], channels[-1]))\n layers.append(GroupNorm(channels[-1]))\n layers.append(Swish())\n layers.append(nn.Conv2d(channels[-1], args.latent_dim, 3, 1, 1))\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)"
}
] | import torch
import torch.nn as nn
from .codebook import Codebook
from .decoder import Decoder
from .encoder import Encoder | 1,162 |
class VQGAN(nn.Module):
def __init__(self, args):
super(VQGAN, self).__init__()
self.encoder = Encoder(args).to(device=args.device)
|
class VQGAN(nn.Module):
def __init__(self, args):
super(VQGAN, self).__init__()
self.encoder = Encoder(args).to(device=args.device) | self.decoder = Decoder(args).to(device=args.device) | 1 | 2023-12-05 02:42:28+00:00 | 2k |
its0x4d/fastapi-jet | fastapi_jet/commands/startproject.py | [
{
"identifier": "app",
"path": "fastapi_jet/cli.py",
"snippet": "def _version_callback(value: bool) -> None:\ndef _register_commands() -> None:\ndef main(\n version: Optional[bool] = typer.Option(\n None,\n \"--version\",\n \"-v\",\n help=\"Show the application's version and exit.\",\n callback=_version_callback,\n is_eager=True,\n )\n) -> None:"
},
{
"identifier": "ProjectContext",
"path": "fastapi_jet/context.py",
"snippet": "class ProjectContext(BaseModel):\n name: str\n folder_name: str = None\n package_name: str = None\n use_templates: bool = False\n no_tests: bool = False\n\n @root_validator(pre=True)\n def set_folder_name(cls, values):\n if not values.get(\"folder_name\"):\n values[\"folder_name\"] = name_fixer(values[\"name\"])\n return values\n\n @root_validator(pre=True)\n def set_package_name(cls, values):\n if not values.get(\"package_name\"):\n values[\"package_name\"] = name_fixer(values[\"name\"], extra=[\"-\"])\n return values"
},
{
"identifier": "generate_template",
"path": "fastapi_jet/generator.py",
"snippet": "def generate_template(template_name: str, context: Union[ProjectContext, AppContext]) -> str:\n \"\"\"\n This function is used to generate a template using the cookiecutter library.\n\n :param template_name: The name of the template to generate.\n :type template_name: str\n :param context: An object that contains the context for the template.\n This can be either a ProjectContext or an AppContext.\n :type context: Union[ProjectContext, AppContext]\n :return: The path to the generated template.\n :rtype: str\n \"\"\"\n try:\n # Generate the template using the cookiecutter library. in 'apps' folder\n data = {\n 'template': os.path.join(TEMPLATES_DIR, template_name),\n 'no_input': True,\n 'extra_context': context.dict(),\n }\n if template_name == \"app\":\n data['output_dir'] = os.path.join(os.getcwd(), 'apps')\n cookiecutter(**data)\n\n except OutputDirExistsException:\n typer.echo(\n f\"[!] Unable to create FastAPI {template_name}! An app with the same name already exists!\"\n f\"\\n[+] Please choose a different name or delete the existing app and try again.\"\n )\n else:\n text = (\n f\"[+] {template_name.capitalize()} [{context.folder_name}] created successfully!\"\n )\n if template_name == \"app\":\n text += f\"\\n[+] To get started, add your app to ROUTERS in app/main.py\"\n typer.echo(text)\n\n return os.path.join(os.getcwd(), context.folder_name)"
},
{
"identifier": "binary_question",
"path": "fastapi_jet/utils.py",
"snippet": "def binary_question(question: str, default: bool = False) -> questionary.Question:\n \"\"\"\n This function is used to present a binary question (yes/no) to the user.\n\n :param question: The question to ask the user.\n :type question: str\n :param default: The default answer to the question. If not provided, the default is False (no).\n :type default: bool, optional\n :return: A questionary.Question object, which can be used to interactively ask the user a question.\n :rtype: questionary.Question\n \"\"\"\n return questionary.confirm(\n question,\n default=default,\n )"
},
{
"identifier": "name_fixer",
"path": "fastapi_jet/utils.py",
"snippet": "def name_fixer(name: str, extra: List[str] = None) -> str:\n \"\"\"\n This function is used to replace certain special characters in a string with an underscore.\n\n :param name: The original string that needs to be fixed.\n :type name: str\n :param extra: An optional list of additional characters that should be replaced.\n :type extra: List[str], optional\n :return: The fixed string.\n :rtype: str\n \"\"\"\n # Define the default list of characters to replace.\n chars = \"* /\\\\|<>?:\\\"' \"\n\n # If the 'extra' parameter is provided, add its characters to the list of characters to replace.\n if extra:\n chars += \"\".join(extra)\n\n # Replace each character in the list with an underscore.\n for char in chars:\n name = name.replace(char, \"_\")\n\n return name"
}
] | import os
import typer
from questionary.form import form
from fastapi_jet.cli import app
from fastapi_jet.context import ProjectContext
from fastapi_jet.generator import generate_template
from fastapi_jet.utils import binary_question, name_fixer | 1,211 |
@app.command(name="startproject")
def startproject(
name: str = typer.Argument(
..., help="Name of the project",
callback=lambda name: name_fixer(name),
metavar="PROJECT_NAME"
),
interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive mode"),
use_templates: bool = typer.Option(False, "--use-templates", "-t", help="Use templates"),
):
"""
Start a new project
"""
if interactive:
project = form(
|
@app.command(name="startproject")
def startproject(
name: str = typer.Argument(
..., help="Name of the project",
callback=lambda name: name_fixer(name),
metavar="PROJECT_NAME"
),
interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive mode"),
use_templates: bool = typer.Option(False, "--use-templates", "-t", help="Use templates"),
):
"""
Start a new project
"""
if interactive:
project = form( | use_templates=binary_question("Do you want to use templates?", default=True), | 3 | 2023-12-12 00:15:53+00:00 | 2k |
WithSecureLabs/damn-vulnerable-llm-agent | main.py | [
{
"identifier": "get_current_user_tool",
"path": "tools.py",
"snippet": "def get_current_user(input : str):\ndef get_transactions(userId : str):"
},
{
"identifier": "display_instructions",
"path": "utils.py",
"snippet": "def display_instructions():\n # Markdown with some basic CSS styles for the box\n box_css = \"\"\"\n <style>\n .instructions-box {\n background-color: #f0f0f0;\n border: 1px solid #ddd;\n border-radius: 5px;\n padding: 20px;\n }\n </style>\n \"\"\"\n\n st.sidebar.markdown(box_css, unsafe_allow_html=True)\n\n st.sidebar.markdown(\n \"\"\"\n <div class=\"instructions-box\">\n \n ### Instructions\n You can exploit this ReAct-based assistant via prompt \n injection to get two flags:\n\n - You'll obtain the first flag by accessing the transactions for user with ID 2\n - The second flag is DocBrown's password\n\n To help you finish the challenge, we suggest you familiarize yourself with the techniques \n described <a href=\"https://labs.withsecure.com/publications/llm-agent-prompt-injection\" target=\"_blank\">here</a> \n and <a href=\"https://youtu.be/43qfHaKh0Xk\" target=\"_blank\">here</a>.\n\n </div>\n\n You'll also find the database schema to be useful:\n\n \"\"\",\n unsafe_allow_html=True,\n )\n\n if st.sidebar.button('Show database schema', use_container_width=True):\n st.sidebar.info('Users(userId,username,password)\\n\\nTransactions(transactionId,username,reference,recipient,amount)')"
},
{
"identifier": "display_logo",
"path": "utils.py",
"snippet": "def display_logo():\n # Convert your image\n image_base64 = get_image_base64(\"labs-logo.png\")\n\n # URL of the company website\n url = 'https://labs.withsecure.com/'\n\n # HTML for centered image with hyperlink\n html_string = f\"\"\"\n <div style=\"display:flex; justify-content:center;\">\n <a href=\"{url}\" target=\"_blank\">\n <img src=\"data:image/png;base64,{image_base64}\" width=\"150px\">\n </a>\n </div>\n \"\"\"\n # Display the HTML in the sidebar\n st.sidebar.markdown(html_string, unsafe_allow_html=True)"
}
] | import langchain
import streamlit as st
from dotenv import load_dotenv
from langchain.agents import ConversationalChatAgent, AgentExecutor
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.agents import initialize_agent
from langchain.callbacks import get_openai_callback
from tools import get_current_user_tool, get_recent_transactions_tool
from utils import display_instructions, display_logo | 1,244 |
load_dotenv()
# Initialise tools
tools = [get_current_user_tool, get_recent_transactions_tool]
system_msg = """Assistant helps the current user retrieve the list of their recent bank transactions ans shows them as a table. Assistant will ONLY operate on the userId returned by the GetCurrentUser() tool, and REFUSE to operate on any other userId provided by the user."""
welcome_message = """Hi! I'm an helpful assistant and I can help fetch information about your recent transactions.\n\nTry asking me: "What are my recent transactions?"
"""
st.set_page_config(page_title="Damn Vulnerable LLM Agent")
st.title("Damn Vulnerable LLM Agent")
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
if len(msgs.messages) == 0:
msgs.clear()
msgs.add_ai_message(welcome_message)
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"):
st.write(step[0].log)
st.write(step[1])
st.write(msg.content)
if prompt := st.chat_input(placeholder="Show my recent transactions"):
st.chat_message("user").write(prompt)
llm = ChatOpenAI(
model_name="gpt-4-1106-preview",
temperature=0, streaming=True
)
tools = tools
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools, verbose=True, system_message=system_msg)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
verbose=True,
max_iterations=6
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = executor(prompt, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"]
display_instructions()
|
load_dotenv()
# Initialise tools
tools = [get_current_user_tool, get_recent_transactions_tool]
system_msg = """Assistant helps the current user retrieve the list of their recent bank transactions ans shows them as a table. Assistant will ONLY operate on the userId returned by the GetCurrentUser() tool, and REFUSE to operate on any other userId provided by the user."""
welcome_message = """Hi! I'm an helpful assistant and I can help fetch information about your recent transactions.\n\nTry asking me: "What are my recent transactions?"
"""
st.set_page_config(page_title="Damn Vulnerable LLM Agent")
st.title("Damn Vulnerable LLM Agent")
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
if len(msgs.messages) == 0:
msgs.clear()
msgs.add_ai_message(welcome_message)
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"):
st.write(step[0].log)
st.write(step[1])
st.write(msg.content)
if prompt := st.chat_input(placeholder="Show my recent transactions"):
st.chat_message("user").write(prompt)
llm = ChatOpenAI(
model_name="gpt-4-1106-preview",
temperature=0, streaming=True
)
tools = tools
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools, verbose=True, system_message=system_msg)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
verbose=True,
max_iterations=6
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = executor(prompt, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"]
display_instructions() | display_logo() | 2 | 2023-12-07 09:37:47+00:00 | 2k |
MarcoGorelli/polars-upgrade | polars_upgrade/_plugins/map_dict.py | [
{
"identifier": "ast_to_offset",
"path": "polars_upgrade/_ast_helpers.py",
"snippet": "def ast_to_offset(node: ast.expr | ast.stmt) -> Offset:\n return Offset(node.lineno, node.col_offset)"
},
{
"identifier": "register",
"path": "polars_upgrade/_data.py",
"snippet": "def register(tp: type[AST_T]) -> Callable[[ASTFunc[AST_T]], ASTFunc[AST_T]]:\n def register_decorator(func: ASTFunc[AST_T]) -> ASTFunc[AST_T]:\n FUNCS[tp].append(func)\n return func\n return register_decorator"
},
{
"identifier": "State",
"path": "polars_upgrade/_data.py",
"snippet": "class State(NamedTuple):\n settings: Settings\n aliases: set[str] = set()\n in_annotation: bool = False"
},
{
"identifier": "TokenFunc",
"path": "polars_upgrade/_data.py",
"snippet": "class Settings(NamedTuple):\nclass State(NamedTuple):\nclass ASTCallbackMapping(Protocol):\nAST_T = TypeVar('AST_T', bound=ast.AST)\nRECORD_FROM_IMPORTS = frozenset((\n 'polars',\n))\nFUNCS = collections.defaultdict(list)\ndef register(tp: type[AST_T]) -> Callable[[ASTFunc[AST_T]], ASTFunc[AST_T]]:\n def register_decorator(func: ASTFunc[AST_T]) -> ASTFunc[AST_T]:\n def __getitem__(self, tp: type[AST_T]) -> list[ASTFunc[AST_T]]: ...\ndef visit(\n funcs: ASTCallbackMapping,\n tree: ast.Module,\n settings: Settings,\n) -> dict[Offset, list[TokenFunc]]:\ndef _import_plugins() -> None:"
},
{
"identifier": "find_op",
"path": "polars_upgrade/_token_helpers.py",
"snippet": "def find_op(tokens: list[Token], i: int, src: str) -> int:\n return _find_token(tokens, i, 'OP', src)"
},
{
"identifier": "is_simple_expression",
"path": "polars_upgrade/_token_helpers.py",
"snippet": "def is_simple_expression(node: ast.expr, aliases: set[str]) -> bool:\n while True:\n if isinstance(node, ast.Call):\n node = node.func\n elif (\n isinstance(node, ast.Attribute) and\n node.attr.islower() and\n isinstance(node.value, ast.Name) and\n node.value.id in aliases\n ):\n return True\n elif isinstance(node, ast.Attribute):\n node = node.value\n else:\n return False"
}
] | import ast
import functools
from typing import Iterable
from tokenize_rt import NON_CODING_TOKENS
from tokenize_rt import Offset
from tokenize_rt import Token
from polars_upgrade._ast_helpers import ast_to_offset
from polars_upgrade._data import register
from polars_upgrade._data import State
from polars_upgrade._data import TokenFunc
from polars_upgrade._token_helpers import find_op
from polars_upgrade._token_helpers import is_simple_expression | 1,010 | from __future__ import annotations
def rename(
i: int,
tokens: list[Token],
*,
name: str,
new: str,
) -> None:
while not (tokens[i].name == 'NAME' and tokens[i].src == name):
i += 1
tokens[i] = tokens[i]._replace(src=new)
def rename_and_add_default(
i: int,
tokens: list[Token],
*,
name: str,
new: str,
) -> None:
while not (tokens[i].name == 'NAME' and tokens[i].src == name):
i += 1
tokens[i] = tokens[i]._replace(src=new)
start_paren = find_op(tokens, i, '(')
close_paren = find_op(tokens, start_paren, ')')
# is there a comma before the close paren?
i = close_paren - 1
while tokens[i].name in NON_CODING_TOKENS:
i -= 1
if ',' not in tokens[i].src:
tokens.insert(i + 1, Token('OP', ', '))
tokens.insert(i + 2, Token('NAME', 'default'))
tokens.insert(i + 3, Token('OP', '='))
tokens.insert(i + 4, Token('NUMBER', 'None'))
else:
tokens.insert(i + 1, Token('NAME', 'default'))
tokens.insert(i + 2, Token('OP', '='))
tokens.insert(i + 3, Token('NUMBER', 'None'))
@register(ast.Call)
def visit_Call(
state: State,
node: ast.Call,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if (
isinstance(node.func, ast.Attribute) and
| from __future__ import annotations
def rename(
i: int,
tokens: list[Token],
*,
name: str,
new: str,
) -> None:
while not (tokens[i].name == 'NAME' and tokens[i].src == name):
i += 1
tokens[i] = tokens[i]._replace(src=new)
def rename_and_add_default(
i: int,
tokens: list[Token],
*,
name: str,
new: str,
) -> None:
while not (tokens[i].name == 'NAME' and tokens[i].src == name):
i += 1
tokens[i] = tokens[i]._replace(src=new)
start_paren = find_op(tokens, i, '(')
close_paren = find_op(tokens, start_paren, ')')
# is there a comma before the close paren?
i = close_paren - 1
while tokens[i].name in NON_CODING_TOKENS:
i -= 1
if ',' not in tokens[i].src:
tokens.insert(i + 1, Token('OP', ', '))
tokens.insert(i + 2, Token('NAME', 'default'))
tokens.insert(i + 3, Token('OP', '='))
tokens.insert(i + 4, Token('NUMBER', 'None'))
else:
tokens.insert(i + 1, Token('NAME', 'default'))
tokens.insert(i + 2, Token('OP', '='))
tokens.insert(i + 3, Token('NUMBER', 'None'))
@register(ast.Call)
def visit_Call(
state: State,
node: ast.Call,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if (
isinstance(node.func, ast.Attribute) and | is_simple_expression(node.func.value, state.aliases) and | 5 | 2023-12-09 19:31:35+00:00 | 2k |
I-am-PUID-0/pd_zurg | main.py | [
{
"identifier": "rclone",
"path": "rclone_rd/rclone.py",
"snippet": "def get_port_from_config(config_file_path, key_type):\ndef setup():\n RCLONEMN_RD = f\"{RCLONEMN}_RD\"\n RCLONEMN_AD = f\"{RCLONEMN}_AD\"\n RCLONEMN_RD = RCLONEMN_AD = RCLONEMN"
},
{
"identifier": "duplicate_cleanup",
"path": "cleanup/duplicate_cleanup.py",
"snippet": "def delete_media_with_retry(media):\ndef process_tv_shows():\ndef process_movies():\ndef setup():\ndef cleanup_interval():\ndef cleanup_schedule():\ndef start_cleanup():\ndef cleanup_thread():"
},
{
"identifier": "auto_update",
"path": "update/auto_update.py",
"snippet": "def auto_update(self, process_name, enable_update):\n if enable_update:\n self.logger.info(f\"Automatic updates set to {format_time(self.auto_update_interval())} for {process_name}\") \n self.schedule_thread = threading.Thread(target=self.update_schedule)\n self.schedule_thread.start()\n self.start_process(process_name)\n else:\n self.logger.info(f\"Automatic update disabled for {process_name}\")\n self.start_process(process_name)"
}
] | from base import *
from rclone_rd import rclone
from cleanup import duplicate_cleanup
from update import auto_update
import plex_debrid_ as p
import zurg as z | 720 |
def main():
logger = get_logger()
version = '2.0.1'
ascii_art = f'''
_______ ______ _______ _______ _______
( ____ )( __ \ / ___ )|\ /|( ____ )( ____ \\
| ( )|| ( \ ) \/ ) || ) ( || ( )|| ( \/
| (____)|| | ) | / )| | | || (____)|| |
| _____)| | | | / / | | | || __)| | ____
| ( | | ) | / / | | | || (\ ( | | \_ )
| ) | (__/ ) / (_/\| (___) || ) \ \__| (___) |
|/ (______/_____(_______/(_______)|/ \__/(_______)
(_____)
Version: {version}
'''
logger.info(ascii_art.format(version=version) + "\n" + "\n")
def healthcheck():
while True:
time.sleep(10)
try:
result = subprocess.run(['python', 'healthcheck.py'], capture_output=True, text=True)
if result.stderr:
logger.error(result.stderr.strip())
except Exception as e:
logger.error('Error running healthcheck.py: %s', e)
time.sleep(50)
thread = threading.Thread(target=healthcheck)
thread.daemon = True
thread.start()
try:
if ZURG is None or str(ZURG).lower() == 'false':
pass
elif str(ZURG).lower() == 'true':
try:
if RDAPIKEY or ADAPIKEY:
try:
z.setup.zurg_setup()
z_updater = z.update.ZurgUpdate()
if ZURGUPDATE:
|
def main():
logger = get_logger()
version = '2.0.1'
ascii_art = f'''
_______ ______ _______ _______ _______
( ____ )( __ \ / ___ )|\ /|( ____ )( ____ \\
| ( )|| ( \ ) \/ ) || ) ( || ( )|| ( \/
| (____)|| | ) | / )| | | || (____)|| |
| _____)| | | | / / | | | || __)| | ____
| ( | | ) | / / | | | || (\ ( | | \_ )
| ) | (__/ ) / (_/\| (___) || ) \ \__| (___) |
|/ (______/_____(_______/(_______)|/ \__/(_______)
(_____)
Version: {version}
'''
logger.info(ascii_art.format(version=version) + "\n" + "\n")
def healthcheck():
while True:
time.sleep(10)
try:
result = subprocess.run(['python', 'healthcheck.py'], capture_output=True, text=True)
if result.stderr:
logger.error(result.stderr.strip())
except Exception as e:
logger.error('Error running healthcheck.py: %s', e)
time.sleep(50)
thread = threading.Thread(target=healthcheck)
thread.daemon = True
thread.start()
try:
if ZURG is None or str(ZURG).lower() == 'false':
pass
elif str(ZURG).lower() == 'true':
try:
if RDAPIKEY or ADAPIKEY:
try:
z.setup.zurg_setup()
z_updater = z.update.ZurgUpdate()
if ZURGUPDATE: | z_updater.auto_update('Zurg',True) | 2 | 2023-12-05 14:49:38+00:00 | 2k |
JeffersonQin/DungeonAssistant | registration.py | [
{
"identifier": "o3dobj",
"path": "utils/o3dobj.py",
"snippet": "def get_o3d_unit_block_at_origin():\ndef get_o3d_trajectory_object(points, color=(1, 0, 0)):\n def transform_o3d_format(points):"
},
{
"identifier": "io",
"path": "utils/io.py",
"snippet": "def load_point_clouds(\n pointcloud_base, pointcloud_prefix, merge_cnt, overlap_discard_num, voxel_size=0.0\n):\ndef load_coordinates_and_timestamps(json_file):\ndef load_transformation_matrices(transformation_dir: str):\ndef save_coodinates_and_timestamps(json_file, points, timestamps):"
},
{
"identifier": "tfm",
"path": "utils/tfm.py",
"snippet": "def transform_trajectory(points, transformation):\ndef transform_clouds_and_trajectories(clouds, trajectories, matrices):\ndef retrieve_floor_plan(cloud, scale=100):"
}
] | import json
import argparse
import os
import os.path as osp
import time
import open3d as o3d
import numpy as np
import copy
import matplotlib.pyplot as plt
from utils import o3dobj
from utils import io
from utils import tfm | 1,505 | default=0.05,
help="voxel size for global fast registration downsampling. default is 0.05",
)
parser.add_argument(
"--voxel_size_icp",
type=float,
default=0.05,
help="voxel size for icp downsampling. default is 0.05",
)
parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr")
parser.add_argument(
"--transformed_trajectory_out",
type=str,
default="trajectory_1.jsonl",
help="output trajectory of the transformed trajectory 1 (to trajectory 2)",
)
args = parser.parse_args()
pointcloud_file_path_1 = args.pointcloud1
pointcloud_file_path_2 = args.pointcloud2
trajectory_file_path_1 = args.trajectory1
trajectory_file_path_2 = args.trajectory2
def preprocess_point_cloud(pcd, voxel_size):
"""Downsamples the point cloud and computes the normals and FPFH features"""
print(f":: Downsample with a voxel size {voxel_size:.3f}.")
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(f":: Estimate normal with search radius {radius_normal:.3f}.")
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
radius_feature = voxel_size * 5
print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.")
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100),
)
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
"""Loads two point clouds and downsamples them."""
print(":: Load two point clouds")
source = o3d.io.read_point_cloud(pointcloud_file_path_1)
target = o3d.io.read_point_cloud(pointcloud_file_path_2)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_fast_global_registration(
source_down, target_down, source_fpfh, target_fpfh, voxel_size
):
"""Performs fast global registration on the downsampled point clouds"""
distance_threshold = voxel_size * 0.5
print(
f":: Apply fast global registration with distance threshold {distance_threshold:.3f}"
)
result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
source_down,
target_down,
source_fpfh,
target_fpfh,
o3d.pipelines.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold
),
)
return result
def execute_vanilla_icp(source, target):
"""Performs vanilla ICP on the point clouds"""
estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane()
max_correspondence_distance = 0.5
# Convergence-Criteria for Vanilla ICP
criteria = o3d.pipelines.registration.ICPConvergenceCriteria(
relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50
)
result = o3d.pipelines.registration.registration_icp(
source,
target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block
|
parser = argparse.ArgumentParser()
parser.add_argument(
"--pointcloud1",
type=str,
default="pointcloud1.ply",
help="first point cloud file path (1 --[transform]-> 2)",
)
parser.add_argument(
"--pointcloud2",
type=str,
default="pointcloud2.ply",
help="second point cloud file path (1 --[transform]-> 2)",
)
parser.add_argument(
"--trajectory1",
type=str,
default="trajectory1.json",
help="first trajectory file path",
)
parser.add_argument(
"--trajectory2",
type=str,
default="trajectory2.json",
help="second trajectory file path",
)
parser.add_argument(
"--fast_cache",
type=str,
default="",
help="transformation cache of fast global registration if available. default is none",
)
parser.add_argument(
"--icp_cache",
type=str,
default="",
help="transformation cache of icp if available. default is none",
)
parser.add_argument(
"--voxel_size_fgr",
type=float,
default=0.05,
help="voxel size for global fast registration downsampling. default is 0.05",
)
parser.add_argument(
"--voxel_size_icp",
type=float,
default=0.05,
help="voxel size for icp downsampling. default is 0.05",
)
parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr")
parser.add_argument(
"--transformed_trajectory_out",
type=str,
default="trajectory_1.jsonl",
help="output trajectory of the transformed trajectory 1 (to trajectory 2)",
)
args = parser.parse_args()
pointcloud_file_path_1 = args.pointcloud1
pointcloud_file_path_2 = args.pointcloud2
trajectory_file_path_1 = args.trajectory1
trajectory_file_path_2 = args.trajectory2
def preprocess_point_cloud(pcd, voxel_size):
"""Downsamples the point cloud and computes the normals and FPFH features"""
print(f":: Downsample with a voxel size {voxel_size:.3f}.")
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(f":: Estimate normal with search radius {radius_normal:.3f}.")
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
radius_feature = voxel_size * 5
print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.")
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100),
)
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
"""Loads two point clouds and downsamples them."""
print(":: Load two point clouds")
source = o3d.io.read_point_cloud(pointcloud_file_path_1)
target = o3d.io.read_point_cloud(pointcloud_file_path_2)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_fast_global_registration(
source_down, target_down, source_fpfh, target_fpfh, voxel_size
):
"""Performs fast global registration on the downsampled point clouds"""
distance_threshold = voxel_size * 0.5
print(
f":: Apply fast global registration with distance threshold {distance_threshold:.3f}"
)
result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
source_down,
target_down,
source_fpfh,
target_fpfh,
o3d.pipelines.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold
),
)
return result
def execute_vanilla_icp(source, target):
"""Performs vanilla ICP on the point clouds"""
estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane()
max_correspondence_distance = 0.5
# Convergence-Criteria for Vanilla ICP
criteria = o3d.pipelines.registration.ICPConvergenceCriteria(
relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50
)
result = o3d.pipelines.registration.registration_icp(
source,
target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block | unit_block = o3dobj.get_o3d_unit_block_at_origin() | 0 | 2023-12-08 19:52:08+00:00 | 2k |
KAIST-VICLab/From_Ground_To_Objects | networks/depth_decoder.py | [
{
"identifier": "ConvBlock",
"path": "networks/layers.py",
"snippet": "class ConvBlock(nn.Module):\r\n \"\"\"Layer to perform a convolution followed by ELU\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels):\r\n super(ConvBlock, self).__init__()\r\n\r\n self.conv = Conv3x3(in_channels, out_channels)\r\n self.nonlin = nn.ELU(inplace=True)\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = self.nonlin(out)\r\n return out\r"
},
{
"identifier": "Conv3x3",
"path": "networks/layers.py",
"snippet": "class Conv3x3(nn.Module):\r\n \"\"\"Layer to pad and convolve input\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, use_refl=True):\r\n super(Conv3x3, self).__init__()\r\n\r\n if use_refl:\r\n self.pad = nn.ReflectionPad2d(1)\r\n else:\r\n self.pad = nn.ZeroPad2d(1)\r\n self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)\r\n\r\n def forward(self, x):\r\n out = self.pad(x)\r\n out = self.conv(out)\r\n return out\r"
},
{
"identifier": "upsample",
"path": "networks/layers.py",
"snippet": "def upsample(x):\r\n \"\"\"Upsample input tensor by a factor of 2\r\n \"\"\"\r\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")\r"
},
{
"identifier": "disp_to_depth",
"path": "networks/layers.py",
"snippet": "def disp_to_depth(disp, min_depth, max_depth):\r\n \"\"\"Convert network's sigmoid output into depth prediction\r\n The formula for this conversion is given in the 'additional considerations'\r\n section of the paper.\r\n \"\"\"\r\n min_disp = 1 / max_depth\r\n max_disp = 1 / min_depth\r\n scaled_disp = min_disp + (max_disp - min_disp) * disp\r\n depth = 1 / scaled_disp\r\n\r\n return scaled_disp, depth\r"
},
{
"identifier": "coords_to_normals",
"path": "networks/layers.py",
"snippet": "def coords_to_normals(coords):\r\n \"\"\"Calculate surface normals using first order finite-differences.\r\n https://github.com/voyleg/perceptual-depth-sr/\r\n Parameters\r\n ----------\r\n coords : array_like\r\n Coordinates of the points (**, 3, h, w).\r\n Returns\r\n -------\r\n normals : torch.Tensor\r\n Surface normals (**, 3, h, w).\r\n \"\"\"\r\n coords = torch.as_tensor(coords)\r\n if coords.ndim < 4:\r\n coords = coords[None]\r\n\r\n dxdu = coords[..., 0, :, 1:] - coords[..., 0, :, :-1]\r\n dydu = coords[..., 1, :, 1:] - coords[..., 1, :, :-1]\r\n dzdu = coords[..., 2, :, 1:] - coords[..., 2, :, :-1]\r\n dxdv = coords[..., 0, 1:, :] - coords[..., 0, :-1, :]\r\n dydv = coords[..., 1, 1:, :] - coords[..., 1, :-1, :]\r\n dzdv = coords[..., 2, 1:, :] - coords[..., 2, :-1, :]\r\n\r\n dxdu = torch.nn.functional.pad(dxdu, (0, 1), mode='replicate')\r\n dydu = torch.nn.functional.pad(dydu, (0, 1), mode='replicate')\r\n dzdu = torch.nn.functional.pad(dzdu, (0, 1), mode='replicate')\r\n\r\n # pytorch cannot just do `dxdv = torch.nn.functional.pad(dxdv, (0, 0, 0, 1), mode='replicate')`, so\r\n dxdv = torch.cat([dxdv, dxdv[..., -1:, :]], dim=-2)\r\n dydv = torch.cat([dydv, dydv[..., -1:, :]], dim=-2)\r\n dzdv = torch.cat([dzdv, dzdv[..., -1:, :]], dim=-2)\r\n\r\n n_x = dydv * dzdu - dydu * dzdv\r\n n_y = dzdv * dxdu - dzdu * dxdv\r\n n_z = dxdv * dydu - dxdu * dydv\r\n\r\n n = torch.stack([n_x, n_y, n_z], dim=-3)\r\n n = torch.nn.functional.normalize(n, dim=-3)\r\n return n\r"
}
] | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .layers import ConvBlock, Conv3x3, upsample, disp_to_depth, coords_to_normals
from timm.models.layers import trunc_normal_
from .cadepth import SPM, DEM | 1,539 | # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales:
| # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales: | self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels) | 1 | 2023-12-12 08:29:30+00:00 | 2k |
marc-rigter/polygrad-world-models | polygrad/agent/a2c.py | [
{
"identifier": "EMA",
"path": "polygrad/utils/training.py",
"snippet": "class EMA():\n '''\n empirical moving average\n '''\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_model_average(self, ma_model, current_model):\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n old_weight, up_weight = ma_params.data, current_params.data\n ma_params.data = self.update_average(old_weight, up_weight)\n\n def update_average(self, old, new):\n if old is None:\n return new\n return old * self.beta + (1 - self.beta) * new"
},
{
"identifier": "get_standardized_stats",
"path": "polygrad/utils/evaluation.py",
"snippet": "def get_standardized_stats(policy_distr, act):\n # Compute logprob with all action distributions normalized to standard normal.\n policy_mean = policy_distr.mean\n policy_std = policy_distr.stddev\n standard_normal = D.independent.Independent(D.normal.Normal(torch.zeros_like(policy_mean), torch.ones_like(policy_mean)), 1)\n normed_act = (act - policy_mean) / policy_std\n standard_logprob = standard_normal.log_prob(normed_act)\n\n act_stds = torch.std(normed_act, dim=[0, 1])\n act_means = torch.mean(normed_act, dim=[0, 1])\n return standard_logprob, act_stds, act_means"
}
] | import torch
import copy
import torch.nn as nn
import copy
import torch.nn.functional as F
import torch.distributions as D
import importlib
import wandb
from torch import Tensor
from polygrad.utils.training import EMA
from .functions import *
from .common import *
from polygrad.utils.evaluation import get_standardized_stats | 1,009 |
class ActorCritic(nn.Module):
def __init__(self,
in_dim,
out_actions,
normalizer,
device="cuda:0",
hidden_dim=256,
min_std=0.01,
fixed_std=False,
decay_std_steps=500000,
init_std=0.5,
hidden_layers=2,
layer_norm=True,
gamma=0.999,
ema=0.995,
lambda_gae=0.8,
entropy_weight=1e-3,
entropy_target=-1,
tune_entropy=True,
target_interval=100,
lr_actor=1e-4,
lr_critic=3e-4,
lr_alpha=1e-2,
actor_grad='reinforce',
actor_dist='normal_tanh',
normalize_adv=False,
grad_clip=None,
clip_logprob=True,
min_logprob=-10.0,
learned_std=False,
ac_use_normed_inputs=True,
target_update=0.02,
tune_actor_lr=3e-4,
lr_schedule='constant',
lr_decay_steps=1000000,
log_interval=20000,
linesearch=False,
linesearch_tolerance=0.25,
linesearch_ratio=0.8,
**kwargs
):
super().__init__()
self.in_dim = in_dim
self.action_dim = out_actions
self.gamma = gamma
self.lambda_ = lambda_gae
self.target_interval = target_interval
self.actor_grad = actor_grad
self.actor_dist = actor_dist
self.min_std = min_std
self.clip_logprob = clip_logprob
self.normalizer = normalizer
self.min_logprob = min_logprob * self.action_dim
self.learned_std = learned_std
self.fixed_std = fixed_std
self.decay_std_steps = decay_std_steps
self.init_std = init_std
self.current_std = init_std
self.use_normed_inputs = ac_use_normed_inputs
self.lr_decay_steps = lr_decay_steps
self.log_interval = log_interval
self.last_log = -float('inf')
self.linesearch = linesearch
self.linesearch_tolerance = linesearch_tolerance
self.linesearch_ratio = linesearch_ratio
if not self.fixed_std and not self.learned_std:
actor_out_dim = 2 * out_actions
else:
actor_out_dim = out_actions
self.actor = MLP(in_dim, actor_out_dim, hidden_dim, hidden_layers, layer_norm).to(device)
self.critic = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_target.requires_grad_(False)
|
class ActorCritic(nn.Module):
def __init__(self,
in_dim,
out_actions,
normalizer,
device="cuda:0",
hidden_dim=256,
min_std=0.01,
fixed_std=False,
decay_std_steps=500000,
init_std=0.5,
hidden_layers=2,
layer_norm=True,
gamma=0.999,
ema=0.995,
lambda_gae=0.8,
entropy_weight=1e-3,
entropy_target=-1,
tune_entropy=True,
target_interval=100,
lr_actor=1e-4,
lr_critic=3e-4,
lr_alpha=1e-2,
actor_grad='reinforce',
actor_dist='normal_tanh',
normalize_adv=False,
grad_clip=None,
clip_logprob=True,
min_logprob=-10.0,
learned_std=False,
ac_use_normed_inputs=True,
target_update=0.02,
tune_actor_lr=3e-4,
lr_schedule='constant',
lr_decay_steps=1000000,
log_interval=20000,
linesearch=False,
linesearch_tolerance=0.25,
linesearch_ratio=0.8,
**kwargs
):
super().__init__()
self.in_dim = in_dim
self.action_dim = out_actions
self.gamma = gamma
self.lambda_ = lambda_gae
self.target_interval = target_interval
self.actor_grad = actor_grad
self.actor_dist = actor_dist
self.min_std = min_std
self.clip_logprob = clip_logprob
self.normalizer = normalizer
self.min_logprob = min_logprob * self.action_dim
self.learned_std = learned_std
self.fixed_std = fixed_std
self.decay_std_steps = decay_std_steps
self.init_std = init_std
self.current_std = init_std
self.use_normed_inputs = ac_use_normed_inputs
self.lr_decay_steps = lr_decay_steps
self.log_interval = log_interval
self.last_log = -float('inf')
self.linesearch = linesearch
self.linesearch_tolerance = linesearch_tolerance
self.linesearch_ratio = linesearch_ratio
if not self.fixed_std and not self.learned_std:
actor_out_dim = 2 * out_actions
else:
actor_out_dim = out_actions
self.actor = MLP(in_dim, actor_out_dim, hidden_dim, hidden_layers, layer_norm).to(device)
self.critic = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_target.requires_grad_(False) | self.ema = EMA(ema) | 0 | 2023-12-12 21:05:26+00:00 | 2k |
Chat-3D/Chat-3D-v2 | utils/logger.py | [
{
"identifier": "get_rank",
"path": "utils/distributed.py",
"snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()"
},
{
"identifier": "is_main_process",
"path": "utils/distributed.py",
"snippet": "def is_main_process():\n return get_rank() == 0"
}
] | import functools
import logging
import os
import sys
import time
import wandb
import torch
from typing import Any, Dict, Union
from .distributed import get_rank, is_main_process
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter | 831 | # from MMF: https://github.com/facebookresearch/mmf/blob/master/mmf/utils/logger.py
# Copyright (c) Facebook, Inc. and its affiliates.
def log_dict_to_wandb(log_dict, step, prefix=""):
"""include a separator `/` at the end of `prefix`"""
if not is_main_process():
return
log_dict = {f"{prefix}{k}": v for k, v in log_dict.items()}
wandb.log(log_dict, step)
def setup_wandb(config):
if not (config.wandb.enable and is_main_process()):
return
run = wandb.init(
config=config,
project=config.wandb.project,
entity=config.wandb.entity,
name=os.path.basename(config.output_dir),
reinit=True
)
return run
def setup_output_folder(save_dir: str, folder_only: bool = False):
"""Sets up and returns the output file where the logs will be placed
based on the configuration passed. Usually "save_dir/logs/log_<timestamp>.txt".
If env.log_dir is passed, logs will be directly saved in this folder.
Args:
folder_only (bool, optional): If folder should be returned and not the file.
Defaults to False.
Returns:
str: folder or file path depending on folder_only flag
"""
log_filename = "train_"
log_filename += time.strftime("%Y_%m_%dT%H_%M_%S")
log_filename += ".log"
log_folder = os.path.join(save_dir, "logs")
if not os.path.exists(log_folder):
os.path.mkdirs(log_folder)
if folder_only:
return log_folder
log_filename = os.path.join(log_folder, log_filename)
return log_filename
def setup_logger(
output: str = None,
color: bool = True,
name: str = "mmf",
disable: bool = False,
clear_handlers=True,
*args,
**kwargs,
):
"""
Initialize the MMF logger and set its verbosity level to "INFO".
Outside libraries shouldn't call this in case they have set there
own logging handlers and setup. If they do, and don't want to
clear handlers, pass clear_handlers options.
The initial version of this function was taken from D2 and adapted
for MMF.
Args:
output (str): a file name or a directory to save log.
If ends with ".txt" or ".log", assumed to be a file name.
Default: Saved to file <save_dir/logs/log_[timestamp].txt>
color (bool): If false, won't log colored logs. Default: true
name (str): the root module name of this logger. Defaults to "mmf".
disable: do not use
clear_handlers (bool): If false, won't clear existing handlers.
Returns:
logging.Logger: a logger
"""
if disable:
return None
logger = logging.getLogger(name)
logger.propagate = False
logging.captureWarnings(True)
warnings_logger = logging.getLogger("py.warnings")
plain_formatter = logging.Formatter(
"%(asctime)s | %(levelname)s | %(name)s : %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
| # from MMF: https://github.com/facebookresearch/mmf/blob/master/mmf/utils/logger.py
# Copyright (c) Facebook, Inc. and its affiliates.
def log_dict_to_wandb(log_dict, step, prefix=""):
"""include a separator `/` at the end of `prefix`"""
if not is_main_process():
return
log_dict = {f"{prefix}{k}": v for k, v in log_dict.items()}
wandb.log(log_dict, step)
def setup_wandb(config):
if not (config.wandb.enable and is_main_process()):
return
run = wandb.init(
config=config,
project=config.wandb.project,
entity=config.wandb.entity,
name=os.path.basename(config.output_dir),
reinit=True
)
return run
def setup_output_folder(save_dir: str, folder_only: bool = False):
"""Sets up and returns the output file where the logs will be placed
based on the configuration passed. Usually "save_dir/logs/log_<timestamp>.txt".
If env.log_dir is passed, logs will be directly saved in this folder.
Args:
folder_only (bool, optional): If folder should be returned and not the file.
Defaults to False.
Returns:
str: folder or file path depending on folder_only flag
"""
log_filename = "train_"
log_filename += time.strftime("%Y_%m_%dT%H_%M_%S")
log_filename += ".log"
log_folder = os.path.join(save_dir, "logs")
if not os.path.exists(log_folder):
os.path.mkdirs(log_folder)
if folder_only:
return log_folder
log_filename = os.path.join(log_folder, log_filename)
return log_filename
def setup_logger(
output: str = None,
color: bool = True,
name: str = "mmf",
disable: bool = False,
clear_handlers=True,
*args,
**kwargs,
):
"""
Initialize the MMF logger and set its verbosity level to "INFO".
Outside libraries shouldn't call this in case they have set there
own logging handlers and setup. If they do, and don't want to
clear handlers, pass clear_handlers options.
The initial version of this function was taken from D2 and adapted
for MMF.
Args:
output (str): a file name or a directory to save log.
If ends with ".txt" or ".log", assumed to be a file name.
Default: Saved to file <save_dir/logs/log_[timestamp].txt>
color (bool): If false, won't log colored logs. Default: true
name (str): the root module name of this logger. Defaults to "mmf".
disable: do not use
clear_handlers (bool): If false, won't clear existing handlers.
Returns:
logging.Logger: a logger
"""
if disable:
return None
logger = logging.getLogger(name)
logger.propagate = False
logging.captureWarnings(True)
warnings_logger = logging.getLogger("py.warnings")
plain_formatter = logging.Formatter(
"%(asctime)s | %(levelname)s | %(name)s : %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
| distributed_rank = get_rank() | 0 | 2023-12-11 14:39:58+00:00 | 2k |
SqueezeBits/owlite | owlite/calib/mse_calibrator.py | [
{
"identifier": "log",
"path": "owlite/logger.py",
"snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):"
},
{
"identifier": "_HistogramCalibrator",
"path": "owlite/calib/_histogram_calibrator.py",
"snippet": "class _HistogramCalibrator(_Calibrator):\n \"\"\"Histogram calibrator.\n\n Attributes:\n set_attr_list (Dict[str, torch.Tensor]): Initialized properties to register with the quantizer.\n 'histogram': histogram count. Default [0, ..., 0], len = 2048.\n 'bin_edges': histogram edges. Default [0, ..., 0], len = 2048.\n 'histc_bins': integer. number of histogram bins. Default 2048.\n \"\"\"\n\n def __init__(self, quantizer):\n \"\"\"Initializes for histogram calibrator\"\"\"\n super().__init__(quantizer)\n self.set_attr_list = {}\n\n def update(self):\n raise NotImplementedError\n\n def prepare(self):\n # define forward hook function\n def histogram_forward_hook_func(module, inputs, output):\n \"\"\"Forward hook function to get histogram value\"\"\"\n\n _input = inputs[0].clone()\n if module.is_enabled:\n raise RuntimeError(\n \"The quantizer should be disabled during calibration.\"\n )\n if (\n module.symmetric.item()\n and module.unsigned.item()\n and inputs[0].min() < 0\n ):\n log.warning(\n \"The unsigned fake quantizer has a negative number as input. \"\n \"It will automatically convert to a signed fake quantizer.\",\n stacklevel=2,\n )\n module.invert_signedness()\n\n with torch.no_grad():\n new_input = []\n if module.per_channel:\n _channel_axis = 0\n _channel_size = _input.shape[_channel_axis]\n for chn in range(_channel_size):\n _input_chn = torch.select(_input, _channel_axis, chn)\n new_input.append(_input_chn)\n else:\n new_input.append(_input)\n\n # _histc_cuda does not have a deterministic implementation\n _deterministic_enable = torch.are_deterministic_algorithms_enabled()\n if _deterministic_enable:\n torch.use_deterministic_algorithms(False)\n\n for i, val in enumerate(new_input):\n local_max = val.abs().max().clone().to(module.bin_edges[i].device)\n if (\n module.histogram[i].data.sum() == 0\n and module.bin_edges[i].data.sum() == 0\n ):\n module.histogram[i].data = torch.histc(\n val.abs(),\n bins=int(module.histc_bins[i].data),\n min=0,\n max=local_max,\n ).to(module.histogram[i].device)\n module.bin_edges[i].data = torch.linspace(\n 0, local_max, int(module.histc_bins[i].data) + 1\n ).to(module.bin_edges[i].device)\n else:\n if module.per_channel:\n break\n if local_max > module.bin_edges[i].data[-1]:\n interval = (\n module.bin_edges[i].data[1]\n - module.bin_edges[i].data[0]\n )\n module.histc_bins[i].data = torch.Tensor(\n [int((local_max / interval).ceil().item())]\n )\n module.bin_edges[i].data = torch.arange(\n 0,\n local_max + interval,\n interval,\n device=module.bin_edges[i].device,\n )\n local_hist = torch.histc(\n val.abs(),\n bins=int(module.histc_bins[i].data),\n min=0,\n max=module.bin_edges[i].data[-1],\n ).to(module.bin_edges[i].device)\n local_hist[\n : module.histogram[i].data.numel()\n ] += module.histogram[i].data\n module.histogram[i].data = local_hist\n\n # allocate to original state\n if _deterministic_enable:\n torch.use_deterministic_algorithms(True)\n\n return output\n\n # ~define forward hook function\n\n # set histogram, bin_edges attr and register forward hook\n _histogram_size = 2048\n if self.quantizer.per_channel:\n _channel_size = self.quantizer.step_size.shape[0]\n else:\n _channel_size = 1\n\n device = self.quantizer.step_size.device\n\n self.set_attr_list = {\n \"histogram\": [\n torch.zeros(_histogram_size).to(device) for _ch in range(_channel_size)\n ],\n \"bin_edges\": [\n torch.zeros(_histogram_size + 1).to(device)\n for _ch in range(_channel_size)\n ],\n \"histc_bins\": [\n torch.Tensor([_histogram_size]).to(device)\n for _ch in range(_channel_size)\n ],\n }\n\n for attr, default in self.set_attr_list.items():\n if hasattr(self.quantizer, attr):\n raise AttributeError(f\"In Quantizer, {attr} attribution already exists\")\n setattr(self.quantizer, attr, default)\n self.hook_handler = self.quantizer.register_forward_hook(\n histogram_forward_hook_func\n )"
}
] | import torch
from ..logger import log
from ._histogram_calibrator import _HistogramCalibrator | 1,461 | """MSE(Mean Squared Error) calibrator"""
class MSECalibrator(_HistogramCalibrator):
"""MSE Calibrator Class"""
def update(self):
# update step_size using "mse"
if self.quantizer.histogram is None or self.quantizer.bin_edges is None:
| """MSE(Mean Squared Error) calibrator"""
class MSECalibrator(_HistogramCalibrator):
"""MSE Calibrator Class"""
def update(self):
# update step_size using "mse"
if self.quantizer.histogram is None or self.quantizer.bin_edges is None: | log.error(f"quantizer.histogram : {self.quantizer.histogram}") | 0 | 2023-12-08 06:41:50+00:00 | 2k |
ximinng/PyTorch-SVGRender | pytorch_svgrender/svgtools/process.py | [
{
"identifier": "circle_tag",
"path": "pytorch_svgrender/svgtools/shape.py",
"snippet": "def circle_tag(cx: float, cy: float, r: float, transform: str = None):\n attrib = {\n 'cx': f'{cx}', 'cy': f'{cy}', 'r': f'{r}'\n }\n if transform is not None:\n attrib['transform'] = transform\n _circle = ET.Element('circle', attrib) # tag, attrib\n return _circle"
},
{
"identifier": "rect_tag",
"path": "pytorch_svgrender/svgtools/shape.py",
"snippet": "def rect_tag(\n x: float, y: float, rx: float, ry: float,\n width: float = 600, height: float = 600,\n transform: str = None\n):\n attrib = {\n 'x': f'{x}', 'y': f'{y}', 'rx': f'{rx}', 'ry': f'{ry}',\n 'width': f'{width}', 'height': f'{height}'\n }\n if transform is not None:\n attrib['transform'] = transform\n _rect = ET.Element('rect', attrib) # tag, attrib\n return _rect"
},
{
"identifier": "is_valid_svg",
"path": "pytorch_svgrender/svgtools/type.py",
"snippet": "def is_valid_svg(file_path: AnyStr) -> bool:\n try:\n tree = ET.parse(file_path)\n root = tree.getroot()\n if root.tag.endswith('svg') and 'xmlns' in root.attrib:\n return True\n else:\n return False\n except ET.ParseError:\n return False"
}
] | import xml.etree.ElementTree as ET
import omegaconf
from typing import Tuple
from .shape import circle_tag, rect_tag
from .type import is_valid_svg | 768 | # -*- coding: utf-8 -*-
# Author: ximing
# Description: process
# Copyright (c) 2023, XiMing Xing.
# License: MIT License
def delete_empty_path(input_svg: str, output_svg: str):
is_valid_svg(input_svg)
# read svg
tree = ET.parse(input_svg)
root = tree.getroot()
group = ET.Element('g')
for i, element in enumerate(root.iter()):
element.tag = element.tag.split('}')[-1]
if element.tag == 'path':
if element.get('d') == 'C NaN NaN' or element.get('d') == '':
continue
group.append(element)
# new svg
svg = ET.Element('svg',
xmlns="http://www.w3.org/2000/svg",
version='1.1',
width=root.get('width'),
height=root.get('height'),
viewBox=root.get('viewBox'))
svg.append(group)
tree = ET.ElementTree(svg)
tree.write(output_svg, encoding='utf-8', xml_declaration=True)
def add_clipPath2def(mounted_node: ET.Element, tag_name: str, attrs: omegaconf.DictConfig):
# add defs node
defs = ET.SubElement(mounted_node, 'defs') # parent=mounted_node, tag='defs'
if tag_name == 'none':
return None
# add clipPath node
id = 'def_clip'
_circleClip = ET.SubElement(defs, 'clipPath', id='def_clip') # parent=defs, tag='clipPath'
# add ops
if tag_name == 'circle_clip':
_circleClip.append(
| # -*- coding: utf-8 -*-
# Author: ximing
# Description: process
# Copyright (c) 2023, XiMing Xing.
# License: MIT License
def delete_empty_path(input_svg: str, output_svg: str):
is_valid_svg(input_svg)
# read svg
tree = ET.parse(input_svg)
root = tree.getroot()
group = ET.Element('g')
for i, element in enumerate(root.iter()):
element.tag = element.tag.split('}')[-1]
if element.tag == 'path':
if element.get('d') == 'C NaN NaN' or element.get('d') == '':
continue
group.append(element)
# new svg
svg = ET.Element('svg',
xmlns="http://www.w3.org/2000/svg",
version='1.1',
width=root.get('width'),
height=root.get('height'),
viewBox=root.get('viewBox'))
svg.append(group)
tree = ET.ElementTree(svg)
tree.write(output_svg, encoding='utf-8', xml_declaration=True)
def add_clipPath2def(mounted_node: ET.Element, tag_name: str, attrs: omegaconf.DictConfig):
# add defs node
defs = ET.SubElement(mounted_node, 'defs') # parent=mounted_node, tag='defs'
if tag_name == 'none':
return None
# add clipPath node
id = 'def_clip'
_circleClip = ET.SubElement(defs, 'clipPath', id='def_clip') # parent=defs, tag='clipPath'
# add ops
if tag_name == 'circle_clip':
_circleClip.append( | circle_tag(cx=attrs.cx, cy=attrs.cy, r=attrs.r) | 0 | 2023-12-13 08:18:01+00:00 | 2k |
lyhisme/DeST | libs/models/SP.py | [
{
"identifier": "Graph",
"path": "libs/models/graph/graph.py",
"snippet": "class Graph:\n def __init__(self, labeling_mode='spatial', layout='MCFS-22'):\n\n self.get_edge(layout)\n self.A = self.get_adjacency_matrix(labeling_mode)\n\n def get_edge(self, layout):\n if layout == 'MCFS-22' or layout == 'MCFS-130':\n self.num_node = 25\n self.self_link = [(i, i) for i in range(self.num_node)]\n inward_ori_index = [(2,9), (1,2), (16,1), (18,16), (17,1), (19,17), (6,2),\n (7,6), (8,7), (3,2), (4,3), (5,4), (10,9),\n (11, 10), (12, 11), (25, 12), (23, 12), (24, 23), (13,9),\n (14, 13), (15, 14), (22, 15), (20, 15), (21, 20)]\n self.inward = [(i - 1, j - 1) for (i, j) in inward_ori_index]\n self.outward = [(j, i) for (i, j) in self.inward]\n self.neighbor = self.inward + self.outward\n elif layout == 'PKU-subject' or layout == 'PKU-view':\n self.num_node = 25\n self.self_link = [(i, i) for i in range(self.num_node)]\n self.inward = [(12, 0), (13, 12), (14, 13), (15, 14), (16, 0), (17, 16), \n (18, 17), (19, 18), (1, 0), (20, 1), (2, 20), (3, 2), (4,20),\n (5,4), (6,5), (7,6), (21,7), (22,6), (8,20), (9,8), (10, 9),\n (11,10), (24,10), (23,11)]\n self.outward = [(j, i) for (i, j) in self.inward]\n self.neighbor = self.inward + self.outward\n elif layout == 'LARA':\n self.num_node = 19\n self.self_link = [(i, i) for i in range(self.num_node)]\n self.inward = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 0), (6, 5), (7, 6), (8, 7), (9, 0), (10, 9), (11, 9), (12,10), (13,12), (14,13), (15,9), (16,15), (17,16), (18,17)]\n self.outward = [(j, i) for (i, j) in self.inward]\n self.neighbor = self.inward + self.outward\n else:\n raise ValueError(\"Do Not Exist This Layout.\")\n\n def get_adjacency_matrix(self, labeling_mode=None):\n if labeling_mode is None:\n return self.A\n if labeling_mode == 'spatial':\n A = tools.get_spatial_graph(self.num_node, self.self_link, self.inward, self.outward)\n else:\n raise ValueError()\n return A"
},
{
"identifier": "k_adjacency",
"path": "libs/models/graph/tools.py",
"snippet": "def k_adjacency(A, k, with_self=False, self_factor=1):\n assert isinstance(A, np.ndarray)\n I = np.eye(len(A), dtype=A.dtype)\n if k == 0:\n return I\n Ak = np.minimum(np.linalg.matrix_power(A + I, k), 1) \\\n - np.minimum(np.linalg.matrix_power(A + I, k - 1), 1)\n if with_self:\n Ak += (self_factor * I)\n return Ak"
},
{
"identifier": "normalize_adjacency_matrix",
"path": "libs/models/graph/tools.py",
"snippet": "def normalize_adjacency_matrix(A):\n node_degrees = A.sum(-1)\n degs_inv_sqrt = np.power(node_degrees, -0.5)\n norm_degs_matrix = np.eye(len(node_degrees)) * degs_inv_sqrt\n return (norm_degs_matrix @ A @ norm_degs_matrix).astype(np.float32)"
},
{
"identifier": "get_adjacency_matrix",
"path": "libs/models/graph/tools.py",
"snippet": "def get_adjacency_matrix(edges, num_nodes=25):\n A = np.zeros((num_nodes, num_nodes), dtype=np.float32)\n for edge in edges:\n A[edge] = 1.\n return A"
}
] | import torch
import torch.nn as nn
import numpy as np
from .graph.graph import Graph
from .graph.tools import k_adjacency, normalize_adjacency_matrix, get_adjacency_matrix | 1,214 |
class MultiScale_GraphConv(nn.Module):
def __init__(self,
num_scales, # 13
in_channels,
out_channels,
dataset,
disentangled_agg=True,
use_mask=True,
dropout=0,
activation='relu'):
super().__init__()
|
class MultiScale_GraphConv(nn.Module):
def __init__(self,
num_scales, # 13
in_channels,
out_channels,
dataset,
disentangled_agg=True,
use_mask=True,
dropout=0,
activation='relu'):
super().__init__()
| self.graph = Graph(labeling_mode='spatial', layout=dataset) | 0 | 2023-12-12 02:27:15+00:00 | 2k |
soCzech/GenHowTo | genhowto.py | [
{
"identifier": "load_genhowto_model",
"path": "genhowto_utils.py",
"snippet": "def load_genhowto_model(weights_path, device=\"cpu\"):\n with open(os.path.join(weights_path, \"GenHowTo_controlnet_config.json\")) as file:\n gef_controlnet_config = json.load(file)\n\n controlnet = ControlNetModel.from_config(gef_controlnet_config, torch_dtype=torch.float32)\n # patch forward function of the ControlNet conditioning embedding\n controlnet.controlnet_cond_embedding.forward = GenHowTo_ControlNetConditioningEmbedding_forward.__get__(\n controlnet.controlnet_cond_embedding, ControlNetConditioningEmbedding)\n # load weights for the ControlNet\n controlnet.load_state_dict(torch.load(os.path.join(weights_path, \"GenHowTo_controlnet.pth\"), map_location=\"cpu\"))\n\n pipe = StableDiffusionControlNetPipeline.from_pretrained(\n \"stabilityai/stable-diffusion-2\", controlnet=controlnet, torch_dtype=torch.float32)\n # load our fine-tuned weights for the UNet\n pipe.unet.load_state_dict(torch.load(os.path.join(weights_path, \"GenHowTo_sdunet.pth\"), map_location=\"cpu\"))\n # change image preprocessor to our custom one which uses VAE to preprocess input images\n pipe.control_image_processor = GenHowToControlImagePreprocessor(pipe)\n # our model is trained to predict noise directly - we do not use \"v_prediction\" used by stabilityai/stable-diffusion-2\n pipe.scheduler.config.prediction_type = \"epsilon\"\n pipe.scheduler.config[\"prediction_type\"] = \"epsilon\"\n\n pipe = pipe.to(device)\n if device == \"cpu\":\n return pipe\n\n try:\n pipe.enable_xformers_memory_efficient_attention()\n except:\n print(\"Failed to enable memory efficient attention, continuing without it.\")\n return pipe"
},
{
"identifier": "DDIMSkipScheduler",
"path": "genhowto_utils.py",
"snippet": "class DDIMSkipScheduler(DDIMScheduler):\n\n @register_to_config\n def __init__(self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False):\n super().__init__(\n num_train_timesteps,\n beta_start,\n beta_end,\n beta_schedule,\n trained_betas,\n clip_sample,\n set_alpha_to_one,\n steps_offset,\n prediction_type,\n thresholding,\n dynamic_thresholding_ratio,\n clip_sample_range,\n sample_max_value,\n timestep_spacing,\n rescale_betas_zero_snr)\n self.num_steps_to_skip = None\n\n def set_num_steps_to_skip(self, num_steps_to_skip: int, num_inference_steps: int):\n self.num_steps_to_skip = num_steps_to_skip\n self.set_timesteps(num_inference_steps)\n\n def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):\n super().set_timesteps(num_inference_steps, device)\n if self.num_steps_to_skip is None:\n return\n\n if self.num_steps_to_skip >= num_inference_steps:\n raise ValueError(\n f\"`self.num_steps_to_skip`: {self.num_steps_to_skip} cannot be larger or equal to \"\n f\"`num_inference_steps`: {num_inference_steps}.\"\n )\n if self.config.timestep_spacing != \"leading\":\n raise ValueError(\n f\"`self.config.timestep_spacing`: {self.config.timestep_spacing} must be `leading` \"\n f\"if `num_steps_to_skip` is not None.\"\n )\n self.timesteps = self.timesteps[self.num_steps_to_skip:]"
}
] | import os
import math
import torch
import argparse
import numpy as np
from PIL import Image
from genhowto_utils import load_genhowto_model, DDIMSkipScheduler | 1,103 |
def main(args):
if os.path.exists(args.output_path):
print(f"{args.output_path} already exists.")
return
pipe = load_genhowto_model(args.weights_path, device=args.device)
pipe.scheduler.set_timesteps(args.num_inference_steps)
if args.num_steps_to_skip is not None: # possibly do not start from complete noise
|
def main(args):
if os.path.exists(args.output_path):
print(f"{args.output_path} already exists.")
return
pipe = load_genhowto_model(args.weights_path, device=args.device)
pipe.scheduler.set_timesteps(args.num_inference_steps)
if args.num_steps_to_skip is not None: # possibly do not start from complete noise | pipe.scheduler = DDIMSkipScheduler.from_config(pipe.scheduler.config) | 1 | 2023-12-11 08:47:51+00:00 | 2k |
bolna-ai/bolna | bolna/helpers/utils.py | [
{
"identifier": "configure_logger",
"path": "bolna/helpers/logger_config.py",
"snippet": "def configure_logger(file_name, enabled=True, logging_level='INFO'):\n if logging_level not in VALID_LOGGING_LEVELS:\n logging_level = \"INFO\"\n\n logging.basicConfig(\n level=logging_level,\n format=\"%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n logger = logging.getLogger(file_name)\n\n if not enabled:\n logger.disabled = True\n return logger"
},
{
"identifier": "PREPROCESS_DIR",
"path": "bolna/constants.py",
"snippet": "PREPROCESS_DIR = 'agent_data'"
}
] | import json
import asyncio
import re
import numpy as np
import copy
import hashlib
import os
import traceback
import ast
from botocore.exceptions import BotoCoreError, ClientError
from aiobotocore.session import AioSession
from contextlib import AsyncExitStack
from dotenv import load_dotenv
from pydantic import BaseModel, create_model
from .logger_config import configure_logger
from bolna.constants import PREPROCESS_DIR | 1,049 |
logger = configure_logger(__name__)
load_dotenv()
BUCKET_NAME = os.getenv('BUCKET_NAME')
def load_file(file_path, is_json=False):
data = None
with open(file_path, "r") as f:
if is_json:
data = json.load(f)
else:
data = f.read()
return data
def write_json_file(file_path, data):
with open(file_path, 'w') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False):
metadata = copy.deepcopy(meta_info)
if meta_info is not None: #It'll be none in case we connect through dashboard playground
metadata["is_md5_hash"] = is_md5_hash
metadata["llm_generated"] = llm_generated
return {
'data': data,
'meta_info': metadata
}
def int2float(sound):
abs_max = np.abs(sound).max()
sound = sound.astype('float32')
if abs_max > 0:
sound *= 1 / 32768
sound = sound.squeeze() # depends on the use case
return sound
def float2int(sound):
sound = np.int16(sound * 32767)
return sound
def mu_law_encode(audio, quantization_channels=256):
mu = quantization_channels - 1
safe_audio_abs = np.minimum(np.abs(audio), 1.0)
magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)
signal = np.sign(audio) * magnitude
return ((signal + 1) / 2 * mu + 0.5).astype(np.int32)
def raw_to_mulaw(raw_bytes):
# Convert bytes to numpy array of int16 values
samples = np.frombuffer(raw_bytes, dtype=np.int16)
samples = samples.astype(np.float32) / (2 ** 15)
mulaw_encoded = mu_law_encode(samples)
return mulaw_encoded
async def get_s3_file(bucket_name, file_key):
session = AioSession()
async with AsyncExitStack() as exit_stack:
s3_client = await exit_stack.enter_async_context(session.create_client('s3'))
try:
response = await s3_client.get_object(Bucket=bucket_name, Key=file_key)
except (BotoCoreError, ClientError) as error:
logger.error(error)
else:
file_content = await response['Body'].read()
return file_content
async def put_s3_file(bucket_name, file_key, file_data, content_type):
session = AioSession()
async with AsyncExitStack() as exit_stack:
s3_client = await exit_stack.enter_async_context(session.create_client('s3'))
data = None
if content_type == "json":
data = json.dumps(file_data)
elif content_type in ["mp3", "wav", "pcm"]:
data = file_data
try:
await s3_client.put_object(Bucket=bucket_name, Key=file_key, Body=data)
except (BotoCoreError, ClientError) as error:
logger.error(error)
except Exception as e:
logger.error('Exception occurred while s3 put object: {}'.format(e))
async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False):
# we are already storing pcm formatted audio in the filler config. No need to encode/decode them further
audio_data = None
if local:
|
logger = configure_logger(__name__)
load_dotenv()
BUCKET_NAME = os.getenv('BUCKET_NAME')
def load_file(file_path, is_json=False):
data = None
with open(file_path, "r") as f:
if is_json:
data = json.load(f)
else:
data = f.read()
return data
def write_json_file(file_path, data):
with open(file_path, 'w') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False):
metadata = copy.deepcopy(meta_info)
if meta_info is not None: #It'll be none in case we connect through dashboard playground
metadata["is_md5_hash"] = is_md5_hash
metadata["llm_generated"] = llm_generated
return {
'data': data,
'meta_info': metadata
}
def int2float(sound):
abs_max = np.abs(sound).max()
sound = sound.astype('float32')
if abs_max > 0:
sound *= 1 / 32768
sound = sound.squeeze() # depends on the use case
return sound
def float2int(sound):
sound = np.int16(sound * 32767)
return sound
def mu_law_encode(audio, quantization_channels=256):
mu = quantization_channels - 1
safe_audio_abs = np.minimum(np.abs(audio), 1.0)
magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)
signal = np.sign(audio) * magnitude
return ((signal + 1) / 2 * mu + 0.5).astype(np.int32)
def raw_to_mulaw(raw_bytes):
# Convert bytes to numpy array of int16 values
samples = np.frombuffer(raw_bytes, dtype=np.int16)
samples = samples.astype(np.float32) / (2 ** 15)
mulaw_encoded = mu_law_encode(samples)
return mulaw_encoded
async def get_s3_file(bucket_name, file_key):
session = AioSession()
async with AsyncExitStack() as exit_stack:
s3_client = await exit_stack.enter_async_context(session.create_client('s3'))
try:
response = await s3_client.get_object(Bucket=bucket_name, Key=file_key)
except (BotoCoreError, ClientError) as error:
logger.error(error)
else:
file_content = await response['Body'].read()
return file_content
async def put_s3_file(bucket_name, file_key, file_data, content_type):
session = AioSession()
async with AsyncExitStack() as exit_stack:
s3_client = await exit_stack.enter_async_context(session.create_client('s3'))
data = None
if content_type == "json":
data = json.dumps(file_data)
elif content_type in ["mp3", "wav", "pcm"]:
data = file_data
try:
await s3_client.put_object(Bucket=bucket_name, Key=file_key, Body=data)
except (BotoCoreError, ClientError) as error:
logger.error(error)
except Exception as e:
logger.error('Exception occurred while s3 put object: {}'.format(e))
async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False):
# we are already storing pcm formatted audio in the filler config. No need to encode/decode them further
audio_data = None
if local: | file_name = f"{PREPROCESS_DIR}/{agent_name}/{audio_format}/{b64_string}.{audio_format}" | 1 | 2023-12-13 09:07:35+00:00 | 2k |
relari-ai/continuous-eval | continuous_eval/metrics/generation_LLM_based_metrics.py | [
{
"identifier": "DefaultLLM",
"path": "continuous_eval/llm_factory.py",
"snippet": " GOOGLE_GENAI_AVAILABLE = True\n GOOGLE_GENAI_AVAILABLE = False\n ANTHROPIC_AVAILABLE = True\n ANTHROPIC_AVAILABLE = False\nclass LLMInterface(ABC):\nclass LLMFactory(LLMInterface):\n def run(self, prompt, temperature=0):\n def __init__(self, model):\n def _llm_response(self, prompt, temperature):\n def run(self, prompt, temperature=0):"
},
{
"identifier": "LLMBasedMetric",
"path": "continuous_eval/metrics/base.py",
"snippet": "class LLMBasedMetric(Metric):\n \"\"\"\n Base class for all LLM based metrics.\n \"\"\"\n\n def __init__(self, model: LLMInterface = DefaultLLM):\n super().__init__()\n assert isinstance(model, LLMInterface), \"model must be an instance of LLMInterface.\"\n self._llm = model"
},
{
"identifier": "LLMBasedContextCoverage",
"path": "continuous_eval/metrics/retrieval_LLM_based_metrics.py",
"snippet": "class LLMBasedContextCoverage(LLMBasedMetric):\n def __init__(self, model: LLMInterface = DefaultLLM, use_few_shot: bool = True):\n super().__init__(model)\n self.use_few_shot = use_few_shot\n\n def __str__(self):\n return f\"LLMBasedContextCoverage(model={self.model}, use_few_shot={self.use_few_shot})\"\n\n def calculate(self, question, retrieved_contexts, answer, **kwargs):\n \"\"\"\n Calculate the context relevance score for the given datapoint.\n \"\"\"\n context = \"\\n\".join(retrieved_contexts)\n\n few_shot_prompt = (\n \"\"\"Example:\nquestion: What are the main characteristics of Jupiter?\ncontext: Jupiter is the fifth planet from the Sun and the largest in the Solar System. It is a gas giant with a mass more than two and a half times that of all the other planets in the Solar System combined, but less than one-thousandth the mass of the Sun. Jupiter is known for its prominent Great Red Spot, a giant storm larger than Earth that has been ongoing for hundreds of years.\nanswer: Jupiter is the largest planet in our Solar System and has a giant storm known as the Great Red Spot.\nclassification:\n[\n {{\n \"statement_1\":\"Jupiter is the largest planet in the Solar System.\",\n \"reason\": \"This is directly stated in the context.\",\n \"Attributed\": 1\n }},\n {{\n \"statement_2\":\"Jupiter is closer to the Sun than Earth.\",\n \"reason\": \"The context contradicts this, stating Jupiter is the fifth planet from the Sun, while Earth is the third.\",\n \"Attributed\": 0\n }}\n]\"\"\"\n if self.use_few_shot\n else \"\"\n )\n\n prompt = {\n \"system_prompt\": (\n \"\"\"\nGiven a question, context, and answer, analyze each statement in the answer and classify if the statement can be attributed to the given context or not. Output JSON strictly in the following format.\n\"\"\"\n + few_shot_prompt\n ),\n \"user_prompt\": (\"question: \" + question + \"\\ncontext: \" + context + \"\\nanswer: \" + answer),\n }\n\n content = self._llm.run(prompt)\n\n try:\n coverage = self.extract_attributed_from_broken_json(content)\n except Exception as e:\n print(f\"{type(e).__name__} Error: {content}, skipping\")\n return {\n \"LLM_based_context_coverage\": None,\n \"LLM_based_context_statements\": content,\n }\n\n return {\n \"LLM_based_context_coverage\": coverage,\n \"LLM_based_context_statements\": content,\n }\n\n @staticmethod\n def extract_attributed_from_broken_json(statements):\n pattern = r'\"Attributed\":\\s*(\\d+)'\n attributed_numbers = re.findall(pattern, statements, re.IGNORECASE)\n try:\n attributed_numbers = [int(num) for group in attributed_numbers for num in group if num]\n except Exception as e:\n print(f\"{type(e).__name__} Error: {attributed_numbers}, skipping\")\n return None\n coverage = sum(attributed_numbers) / len(attributed_numbers) if attributed_numbers else None\n return coverage"
}
] | from continuous_eval.llm_factory import DefaultLLM, LLMInterface
from continuous_eval.metrics.base import LLMBasedMetric
from continuous_eval.metrics.retrieval_LLM_based_metrics import LLMBasedContextCoverage | 1,293 |
class LLMBasedFaithfulness(LLMBasedMetric):
"""
The LLM based faithfulness metric.
Measures whether the generated answer is faithful to the retrieved context.
"""
def __init__(
self,
model: LLMInterface = DefaultLLM,
use_few_shot: bool = True,
classify_by_statement: bool = False,
):
super().__init__(model)
self.use_few_shot = use_few_shot
self.classify_by_statement = classify_by_statement
def __str__(self):
return f"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})"
def calculate(self, question, retrieved_contexts, answer, **kwargs):
"""
Calculate the faithfulness score for the given datapoint.
"""
if self.classify_by_statement:
# Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context.
# The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context).
|
class LLMBasedFaithfulness(LLMBasedMetric):
"""
The LLM based faithfulness metric.
Measures whether the generated answer is faithful to the retrieved context.
"""
def __init__(
self,
model: LLMInterface = DefaultLLM,
use_few_shot: bool = True,
classify_by_statement: bool = False,
):
super().__init__(model)
self.use_few_shot = use_few_shot
self.classify_by_statement = classify_by_statement
def __str__(self):
return f"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})"
def calculate(self, question, retrieved_contexts, answer, **kwargs):
"""
Calculate the faithfulness score for the given datapoint.
"""
if self.classify_by_statement:
# Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context.
# The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context). | context_coverage = LLMBasedContextCoverage(use_few_shot=self.use_few_shot) | 2 | 2023-12-08 21:30:39+00:00 | 2k |
ryanhe312/STSSNet-AAAI2024 | eval.py | [
{
"identifier": "matlab_metric",
"path": "utils/matlab_metric.py",
"snippet": "def rgb2ycbcr(img, only_y=True):\ndef calc_metrics(img1, img2, crop_border, test_Y=True, norm=False, mask=None):\ndef calc_metrics_y(img1, img2, crop_border, test_Y=True):\ndef calc_psnr(img1, img2, mask=None):\ndef ssim(img1, img2, mask=None):\ndef calc_ssim(img1, img2, mask=None):\n C1 = (0.01 * 255)**2\n C2 = (0.03 * 255)**2"
},
{
"identifier": "metrics",
"path": "utils/metrics.py",
"snippet": "class cvtColor:\n def __init__(self) -> None:\n def rgb2ycbcr(self, tensor):\n def ycrcb2rgb(self, tensor):\ndef accuracy(output, target):\ndef top_k_acc(output, target, k=3):\ndef mse(output, target):\ndef psnr(output, target, only_y=False):\ndef ssim(output, target, only_y=False):\n R = tensor[:,0:1]\n G = tensor[:,1:2]\n B = tensor[:,2:3]\n Y = self.rgb2ycbcr_coeffs[0] * R + self.rgb2ycbcr_coeffs[1] * G + self.rgb2ycbcr_coeffs[2] * B + self.rgb2ycbcr_coeffs[3]\n Y = tensor[:,0:1]\n R = self.ycbcr2rgb_coeffs[0] * Y + self.ycbcr2rgb_coeffs[1] * Cb + self.ycbcr2rgb_coeffs[2] * Cr + self.ycbcr2rgb_coeffs[3]\n G = self.ycbcr2rgb_coeffs[4] * Y + self.ycbcr2rgb_coeffs[5] * Cb + self.ycbcr2rgb_coeffs[6] * Cr + self.ycbcr2rgb_coeffs[7]\n B = self.ycbcr2rgb_coeffs[8] * Y + self.ycbcr2rgb_coeffs[9] * Cb + self.ycbcr2rgb_coeffs[10] * Cr + self.ycbcr2rgb_coeffs[11]"
}
] | import os
import cv2
import lpips
import torch
import numpy as np
import torch.nn.functional as F
import torch.utils.data as data
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import matlab_metric, metrics
from dataloaders import *
from model import STSSNet | 1,184 |
def ImgWrite(mPath,prefix,idx,img):
cv2.imwrite(os.path.join(mPath,prefix+"."+str(idx).zfill(4)+".png"),img)
@torch.no_grad()
def save_res(dataLoaderIns, model, modelPath, save_dir, save_img=True, mode='all'):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if modelPath.endswith(".tar"):
model_CKPT = torch.load(modelPath, map_location="cuda:0")["state_dict"]
elif modelPath.endswith(".ckpt"):
model_CKPT = {k[6:]:v for k,v in torch.load(modelPath, map_location="cuda:0")["state_dict"].items() if 'vgg' not in k}
else:
model_CKPT = torch.load(modelPath, map_location="cuda:0")
model.load_state_dict(model_CKPT)
model = model.to("cuda:0")
model.eval()
all_PSNR_SF = []
all_ssim_SF = []
all_lpips_SF = []
all_PSNR_IF = []
all_ssim_IF = []
all_lpips_IF = []
loss_fn_alex = lpips.LPIPS(net='alex').cuda()
print('saving to ',save_dir)
f = open(os.path.join(save_dir, 'metrics.csv'), 'w')
print('frame,psnr,ssim,lpips', file=f)
for index, (input,features,mask,hisBuffer,label) in tqdm(dataLoaderIns):
index = index[0].item()
input=input.cuda()
hisBuffer=hisBuffer.cuda()
mask=mask.cuda()
features=features.cuda()
label=label.cuda()
B,C,H,W = input.size()
input = F.pad(input,(0,0,0,4),'replicate')
mask = F.pad(mask,(0,0,0,4),'replicate')
features = F.pad(features,(0,0,0,4),'replicate')
hisBuffer = F.pad(hisBuffer.reshape(B,-1,H,W),(0,0,0,4),'replicate').reshape(B,3,4,H+4,W)
res=model(input, features, mask, hisBuffer)
res = res[:,:,:-8]
## mask
if mode == 'edge':
gray = cv2.cvtColor((label[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_RGB2GRAY)
mask = cv2.Canny(gray, 100, 200)
elif mode == 'hole':
mask = 1 - mask[:, :, :-4]
mask = F.interpolate(mask, scale_factor=2, mode='bilinear').squeeze().cpu().numpy()
else:
mask = None
## calculate metrics
|
def ImgWrite(mPath,prefix,idx,img):
cv2.imwrite(os.path.join(mPath,prefix+"."+str(idx).zfill(4)+".png"),img)
@torch.no_grad()
def save_res(dataLoaderIns, model, modelPath, save_dir, save_img=True, mode='all'):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if modelPath.endswith(".tar"):
model_CKPT = torch.load(modelPath, map_location="cuda:0")["state_dict"]
elif modelPath.endswith(".ckpt"):
model_CKPT = {k[6:]:v for k,v in torch.load(modelPath, map_location="cuda:0")["state_dict"].items() if 'vgg' not in k}
else:
model_CKPT = torch.load(modelPath, map_location="cuda:0")
model.load_state_dict(model_CKPT)
model = model.to("cuda:0")
model.eval()
all_PSNR_SF = []
all_ssim_SF = []
all_lpips_SF = []
all_PSNR_IF = []
all_ssim_IF = []
all_lpips_IF = []
loss_fn_alex = lpips.LPIPS(net='alex').cuda()
print('saving to ',save_dir)
f = open(os.path.join(save_dir, 'metrics.csv'), 'w')
print('frame,psnr,ssim,lpips', file=f)
for index, (input,features,mask,hisBuffer,label) in tqdm(dataLoaderIns):
index = index[0].item()
input=input.cuda()
hisBuffer=hisBuffer.cuda()
mask=mask.cuda()
features=features.cuda()
label=label.cuda()
B,C,H,W = input.size()
input = F.pad(input,(0,0,0,4),'replicate')
mask = F.pad(mask,(0,0,0,4),'replicate')
features = F.pad(features,(0,0,0,4),'replicate')
hisBuffer = F.pad(hisBuffer.reshape(B,-1,H,W),(0,0,0,4),'replicate').reshape(B,3,4,H+4,W)
res=model(input, features, mask, hisBuffer)
res = res[:,:,:-8]
## mask
if mode == 'edge':
gray = cv2.cvtColor((label[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_RGB2GRAY)
mask = cv2.Canny(gray, 100, 200)
elif mode == 'hole':
mask = 1 - mask[:, :, :-4]
mask = F.interpolate(mask, scale_factor=2, mode='bilinear').squeeze().cpu().numpy()
else:
mask = None
## calculate metrics | psnr, ssim = matlab_metric.calc_metrics(res[0].permute(1,2,0).detach().cpu().numpy(), label[0].permute(1,2,0).detach().cpu().numpy(), 0, norm=True, mask=mask) | 0 | 2023-12-10 02:02:37+00:00 | 2k |
Seunggu0305/VLCounter | tools/models/ViT_Encoder_add.py | [
{
"identifier": "LayerNorm",
"path": "tools/models/Encoder_utils.py",
"snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n ret = super().forward(x.type(torch.float32))\n return ret.type(orig_type)"
},
{
"identifier": "Transformer",
"path": "tools/models/Encoder_utils.py",
"snippet": "class Transformer(nn.Module):\n def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, drop_path_rate=0.):\n super().__init__()\n self.width = width\n self.layers = layers\n self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for i in range(layers)])\n\n def forward(self, x: torch.Tensor):\n return self.resblocks(x)\n\n # ADDED\n def forward_attention(self, x: torch.Tensor):\n for index, layer in enumerate(self.resblocks):\n if index == len(self.resblocks) - 1:\n return layer(x, return_attention=True)\n x = layer(x)"
},
{
"identifier": "Attention",
"path": "tools/models/Encoder_utils.py",
"snippet": "class Attention(nn.Module):\n def __init__(self, out_dim, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., settings=''):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(out_dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.settings = settings\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n # original self-attention for the original path\n attn_ori = (q @ k.transpose(-2, -1)) * self.scale\n attn_ori = attn_ori.softmax(dim=-1)\n attn_ori = self.attn_drop(attn_ori)\n\n # replace k & q by v\n k = v\n q = k\n\n # resnets have only one self-attention, norm and larger scale perform better\n if self.settings == 'resnet':\n k = k / (k.norm(p=2, dim=-1, keepdim=True) + 1e-6)\n q = k\n scale = self.scale * 8\n else:\n scale = self.scale\n \n # self-attention, higher temperate for resnets performs better\n attn = (q @ k.transpose(-2, -1)) * scale\n attn = (attn).softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x_ori = (attn_ori @ v).transpose(1, 2).reshape(B, N, C)\n # x = (attn @ v).transpose(1, 2).reshape(B, N, C) # clip_surgery\n x = v.transpose(1, 2).reshape(B, N, C) # mask_clip\n x = self.proj_drop(self.proj(x))\n x_ori = self.proj_drop(self.proj(x_ori))\n return [x, x_ori]"
}
] | import torch
import torch.nn.functional as F
import math
from torch.nn import Dropout
from torch import nn
from functools import reduce
from operator import mul
from .Encoder_utils import LayerNorm, Transformer, Attention | 1,124 |
class SPTCLIPVisionTransformer(nn.Module):
def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True,
num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):
super().__init__()
self.pretrained = pretrained
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.spatial_size = input_resolution // patch_size
|
class SPTCLIPVisionTransformer(nn.Module):
def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True,
num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):
super().__init__()
self.pretrained = pretrained
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.spatial_size = input_resolution // patch_size | self.ln_pre = LayerNorm(width) | 0 | 2023-12-13 08:00:28+00:00 | 2k |
qitan/devops-backend-lite | apps/workflow/serializers.py | [
{
"identifier": "Product",
"path": "dbapp/models.py",
"snippet": ""
},
{
"identifier": "RecursiveField",
"path": "common/recursive.py",
"snippet": "class RecursiveField(Field):\n \"\"\"\n A field that gets its representation from its parent.\n\n This method could be used to serialize a tree structure, a linked list, or\n even a directed acyclic graph. As with all recursive things, it is\n important to keep the base case in mind. In the case of the tree serializer\n example below, the base case is a node with an empty list of children. In\n the case of the list serializer below, the base case is when `next==None`.\n Above all, beware of cyclical references.\n\n Examples:\n\n class TreeSerializer(self):\n children = ListField(child=RecursiveField())\n\n class ListSerializer(self):\n next = RecursiveField(allow_null=True)\n \"\"\"\n\n # This list of attributes determined by the attributes that\n # `rest_framework.serializers` calls to on a field object\n PROXIED_ATTRS = (\n # methods\n 'get_value',\n 'get_initial',\n 'run_validation',\n 'get_attribute',\n 'to_representation',\n\n # attributes\n 'field_name',\n 'source',\n 'read_only',\n 'default',\n 'source_attrs',\n 'write_only',\n )\n\n def __init__(self, to=None, **kwargs):\n \"\"\"\n arguments:\n to - `None`, the name of another serializer defined in the same module\n as this serializer, or the fully qualified import path to another\n serializer. e.g. `ExampleSerializer` or\n `path.to.module.ExampleSerializer`\n \"\"\"\n self.to = to\n self.init_kwargs = kwargs\n self._proxied = None\n\n # need to call super-constructor to support ModelSerializer\n super_kwargs = dict(\n (key, kwargs[key])\n for key in kwargs\n if key in _signature_parameters(Field.__init__)\n )\n super(RecursiveField, self).__init__(**super_kwargs)\n\n def bind(self, field_name, parent):\n # Extra-lazy binding, because when we are nested in a ListField, the\n # RecursiveField will be bound before the ListField is bound\n self.bind_args = (field_name, parent)\n\n @property\n def proxied(self):\n if not self._proxied:\n if self.bind_args:\n field_name, parent = self.bind_args\n\n if hasattr(parent, 'child') and parent.child is self:\n # RecursiveField nested inside of a ListField\n parent_class = parent.parent.__class__\n else:\n # RecursiveField directly inside a Serializer\n parent_class = parent.__class__\n\n assert issubclass(parent_class, BaseSerializer)\n\n if self.to is None:\n proxied_class = parent_class\n else:\n try:\n module_name, class_name = self.to.rsplit('.', 1)\n except ValueError:\n module_name, class_name = parent_class.__module__, self.to\n\n try:\n proxied_class = getattr(\n importlib.import_module(module_name), class_name)\n except Exception as e:\n raise ImportError(\n 'could not locate serializer %s' % self.to, e)\n\n # Create a new serializer instance and proxy it\n proxied = proxied_class(**self.init_kwargs)\n proxied.bind(field_name, parent)\n self._proxied = proxied\n\n return self._proxied\n\n def __getattribute__(self, name):\n if name in RecursiveField.PROXIED_ATTRS:\n try:\n proxied = object.__getattribute__(self, 'proxied')\n return getattr(proxied, name)\n except AttributeError:\n pass\n\n return object.__getattribute__(self, name)"
},
{
"identifier": "UserProfile",
"path": "dbapp/models.py",
"snippet": ""
},
{
"identifier": "WorkflowCategory",
"path": "dbapp/models.py",
"snippet": ""
},
{
"identifier": "ModelSerializer",
"path": "common/extends/serializers.py",
"snippet": "class ModelSerializer(BaseModelSerializer):\n\n def to_representation(self, instance):\n \"\"\"\n Object instance -> Dict of primitive datatypes.\n \"\"\"\n ret = OrderedDict()\n fields = self._readable_fields\n\n for field in fields:\n try:\n attribute = field.get_attribute(instance)\n except SkipField:\n continue\n\n # We skip `to_representation` for `None` values so that fields do\n # not have to explicitly deal with that case.\n #\n # For related fields with `use_pk_only_optimization` we need to\n # resolve the pk value.\n check_for_none = attribute.pk if isinstance(\n attribute, PKOnlyObject) else attribute\n if check_for_none is None:\n ret[field.field_name] = None\n else:\n if field.field_name == 'name':\n try:\n ret[field.field_name] = field.to_representation(\n attribute).lower()\n except:\n ret[field.field_name] = field.to_representation(\n attribute)\n else:\n ret[field.field_name] = field.to_representation(attribute)\n return ret"
}
] | from rest_framework import serializers
from dbapp.models import Product, Project
from common.recursive import RecursiveField
from dbapp.models import UserProfile
from dbapp.models import WorkflowCategory, Workflow, WorkflowNodeHistory, WorkflowTemplate, \
WorkflowTemplateRevisionHistory, WorkflowNodeHistoryCallback
from common.extends.serializers import ModelSerializer
from django.conf import settings
import logging | 1,600 | """
@Author : Ken Chen
@Contact : [email protected]
@Time : 2021/11/2 上午9:50
"""
logger = logging.getLogger(__name__)
class WorkflowTemplateSerializer(ModelSerializer):
projects_info = serializers.SerializerMethodField()
env_info = serializers.SerializerMethodField()
def get_env_info(self, instance):
if instance.environment:
return {'name': instance.environment.name, 'alias': instance.environment.alias}
return {}
def get_projects_info(self, instance):
data = []
product_ids = {}
for i in instance.projects:
if i[0] not in product_ids:
product_ids[i[0]] = []
product_ids[i[0]].append(i[1])
for k, v in product_ids.items():
product = Product.objects.get(id=k)
_projects = Project.objects.filter(id__in=v)
data.append({'value': product.id, 'name': product.name, 'label': product.alias,
'children': [{'value': i.id, 'name': i.name, 'label': i.alias} for i in _projects]})
return data
class Meta:
model = WorkflowTemplate
fields = '__all__'
class WorkflowTemplateForRetrieveSerializer(ModelSerializer):
class Meta:
model = WorkflowTemplate
fields = '__all__'
class WorkflowRevisionTemplateSerializer(ModelSerializer):
class Meta:
| """
@Author : Ken Chen
@Contact : [email protected]
@Time : 2021/11/2 上午9:50
"""
logger = logging.getLogger(__name__)
class WorkflowTemplateSerializer(ModelSerializer):
projects_info = serializers.SerializerMethodField()
env_info = serializers.SerializerMethodField()
def get_env_info(self, instance):
if instance.environment:
return {'name': instance.environment.name, 'alias': instance.environment.alias}
return {}
def get_projects_info(self, instance):
data = []
product_ids = {}
for i in instance.projects:
if i[0] not in product_ids:
product_ids[i[0]] = []
product_ids[i[0]].append(i[1])
for k, v in product_ids.items():
product = Product.objects.get(id=k)
_projects = Project.objects.filter(id__in=v)
data.append({'value': product.id, 'name': product.name, 'label': product.alias,
'children': [{'value': i.id, 'name': i.name, 'label': i.alias} for i in _projects]})
return data
class Meta:
model = WorkflowTemplate
fields = '__all__'
class WorkflowTemplateForRetrieveSerializer(ModelSerializer):
class Meta:
model = WorkflowTemplate
fields = '__all__'
class WorkflowRevisionTemplateSerializer(ModelSerializer):
class Meta: | model = WorkflowTemplateRevisionHistory | 3 | 2023-12-13 03:09:32+00:00 | 2k |
timo-reymann/python-oauth2-cli-auth | oauth2_cli_auth/simplified_flow.py | [
{
"identifier": "OAuthCallbackHttpServer",
"path": "oauth2_cli_auth/http_server.py",
"snippet": "class OAuthCallbackHttpServer(HTTPServer):\n \"\"\"\n Simplistic HTTP Server to provide local callback URL for oauth2 provider\n \"\"\"\n\n def __init__(self, port):\n super().__init__((\"\", port), OAuthRedirectHandler)\n\n self._code = None\n\n def get_code(self):\n return self._code\n\n @property\n def callback_url(self):\n return f\"http://localhost:{self.server_port}\"\n\n def wait_for_code(self, attempts: int = 3, timeout_per_attempt=10) -> Optional[int]:\n \"\"\"\n Wait for the server to open the callback page containing the code query parameter.\n\n It tries for #attempts with a timeout of #timeout_per_attempts for each attempt.\n This prevents the CLI from getting stuck by unsolved callback URls\n\n :param attempts: Amount of attempts\n :param timeout_per_attempt: Timeout for each attempt to be successful\n :return: Code from callback page or None if the callback page is not called successfully\n \"\"\"\n for i in range(0, attempts):\n try:\n _method_with_timeout(self.handle_request, timeout_seconds=timeout_per_attempt)\n except TimeoutException:\n continue\n if self.get_code() is not None:\n return self.get_code()\n\n return None"
},
{
"identifier": "OAuth2ClientInfo",
"path": "oauth2_cli_auth/code_grant.py",
"snippet": "class OAuth2ClientInfo:\n \"\"\"\n Metadata for Oauth2 client\n \"\"\"\n authorization_url: str\n \"\"\"Authorization URL to redirect the user to\"\"\"\n token_url: str\n \"\"\"Token URL for fetching the access token\"\"\"\n client_id: str\n \"\"\"Id of the client to request for\"\"\"\n scopes: list[str]\n \"\"\"List of scopes to request\"\"\"\n\n @staticmethod\n def from_oidc_endpoint(oidc_config_endpoint: str, client_id: str, scopes: list[str]):\n config = load_oidc_config(oidc_config_endpoint)\n return OAuth2ClientInfo(\n authorization_url=config.get(\"authorization_endpoint\"),\n token_url=config.get(\"token_endpoint\"),\n client_id=client_id,\n scopes=scopes,\n )"
},
{
"identifier": "exchange_code_for_access_token",
"path": "oauth2_cli_auth/code_grant.py",
"snippet": "def exchange_code_for_access_token(client_info: OAuth2ClientInfo, redirect_uri: str, code: str,\n access_token_field: str = \"access_token\") -> str:\n \"\"\"\n Exchange a code for an access token using the endpoints from client info\n\n :param client_info: Info about oauth2 client\n :param redirect_uri: Callback URL\n :param code: Code to redeem\n :param access_token_field: Name of the field containing the access token to use. This might differ depending on\n the provider you are using. For example for Auth0 you have to set this to id_token\n :return: Extracted access token from response\n \"\"\"\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": \"Basic \" + base64.b64encode(f\"{client_info.client_id}:\".encode()).decode(),\n }\n\n data = {\n \"code\": code,\n \"redirect_uri\": redirect_uri,\n \"grant_type\": \"authorization_code\",\n }\n encoded_data = urllib.parse.urlencode(data).encode('utf-8')\n\n request = urllib.request.Request(client_info.token_url, data=encoded_data, headers=headers)\n json_response = _load_json(request)\n\n return json_response.get(access_token_field)"
},
{
"identifier": "get_auth_url",
"path": "oauth2_cli_auth/code_grant.py",
"snippet": "def get_auth_url(client_info: OAuth2ClientInfo, redirect_uri: str) -> str:\n \"\"\"\n Build authorization url for browser\n\n :param client_info: Info about oauth2 client\n :param redirect_uri: Callback URL\n :return: Ready to use URL\n \"\"\"\n return (f\"{client_info.authorization_url}\"\n f\"?client_id={client_info.client_id}\"\n f\"&redirect_uri={redirect_uri}\"\n f\"&scope={' '.join(client_info.scopes)}\"\n f\"&response_type=code\")"
},
{
"identifier": "open_browser",
"path": "oauth2_cli_auth/code_grant.py",
"snippet": "def open_browser(url: str) -> None:\n \"\"\"\n Open browser using webbrowser module and show message about URL open\n :param url: URL to open and display\n :return: None\n \"\"\"\n print(f\"Open your browser at\\n{url}\")\n webbrowser.open(url)"
}
] | from oauth2_cli_auth import OAuthCallbackHttpServer, get_auth_url, exchange_code_for_access_token, OAuth2ClientInfo, \
open_browser | 1,227 |
def get_access_token_with_browser_open(client_info: OAuth2ClientInfo, server_port: int = 8080) -> str:
"""
Provides a simplified API to:
- Spin up the callback server
- Open the browser with the authorization URL
- Wait for the code to arrive
- Get access token from code
:param client_info: Client Info for Oauth2 Interaction
:param server_port: Port of the local web server to spin up
:return: Access Token
"""
callback_server = OAuthCallbackHttpServer(server_port)
auth_url = get_auth_url(client_info, callback_server.callback_url)
|
def get_access_token_with_browser_open(client_info: OAuth2ClientInfo, server_port: int = 8080) -> str:
"""
Provides a simplified API to:
- Spin up the callback server
- Open the browser with the authorization URL
- Wait for the code to arrive
- Get access token from code
:param client_info: Client Info for Oauth2 Interaction
:param server_port: Port of the local web server to spin up
:return: Access Token
"""
callback_server = OAuthCallbackHttpServer(server_port)
auth_url = get_auth_url(client_info, callback_server.callback_url) | open_browser(auth_url) | 4 | 2023-12-09 12:14:33+00:00 | 2k |
solanav/phishflood | phishflood/__main__.py | [
{
"identifier": "extract_inputs",
"path": "credfind/utils.py",
"snippet": "def extract_inputs(html: str) -> InputList:\n \"\"\"Given an HTML page, returns a list of inputs or None if nothing was found\"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n\n print(\"Finding all forms in the page\")\n forms = soup.find_all(\"form\")\n print(f\"Found {len(forms)} forms\")\n\n if len(forms) == 0:\n return []\n\n inputs = []\n for fmid, f in enumerate(forms):\n form = Form.from_tag(f, fmid)\n form_inputs = [\n Input.from_tag(tag, imid) for imid, tag in enumerate(f.find_all(\"input\"))\n ]\n fi = count_fillable_inputs(form_inputs)\n inputs.append((fi, form, form_inputs))\n print(f\"Found {len(form_inputs)} inputs inside form\")\n\n if len(forms) == 0:\n return []\n elif len(inputs) > 1:\n inputs.sort(key=lambda x: x[0], reverse=True)\n\n return inputs"
},
{
"identifier": "Input",
"path": "credfind/objects.py",
"snippet": "class InputType(Enum):\nclass Method(Enum):\nclass Form:\nclass Input:\n BUTTON = \"button\"\n CHECKBOX = \"checkbox\"\n COLOR = \"color\"\n DATE = \"date\"\n DATETIMELOCAL = \"datetime-local\"\n EMAIL = \"email\"\n FILE = \"file\"\n HIDDEN = \"hidden\"\n IMAGE = \"image\"\n MONTH = \"month\"\n NUMBER = \"number\"\n PASSWORD = \"password\"\n RADIO = \"radio\"\n RANGE = \"range\"\n RESET = \"reset\"\n SEARCH = \"search\"\n SUBMIT = \"submit\"\n TEL = \"tel\"\n TEXT = \"text\"\n TIME = \"time\"\n URL = \"url\"\n WEEK = \"week\"\n GET = \"get\"\n POST = \"post\"\n NONE = \"none\"\n def from_str(cls, s: str) -> Self:\n def from_str(cls, s: str) -> Self:\n def from_tag(cls, tag: Tag, meta_id: int) -> Self:\n def __str__(self) -> str:\n def to_dict(self) -> Dict[str, Any]:\n def from_tag(cls, tag: Tag, meta_id: int) -> Self:\n def __str__(self) -> str:\n def to_dict(self) -> Dict[str, Any]:"
},
{
"identifier": "creds_from_input",
"path": "credgen/utils.py",
"snippet": "def creds_from_input(inp: Input) -> str:\n # Check by keywords\n text_fields = [inp.name, inp.id_, inp.placeholder]\n\n if in_any([\"email\"], text_fields):\n return fake_email()\n elif in_any([\"code\", \"key\", \"pin\"], text_fields):\n return fake_number(6)\n elif in_any([\"password\"], text_fields):\n return fake_password()\n elif in_any([\"user\", \"uid\"], text_fields):\n return fake_username()\n elif in_any([\"document\", \"dni\"], text_fields):\n return fake_dni()\n\n # Check by basic type\n match inp.type_:\n case InputType.EMAIL:\n return fake_email()\n case InputType.PASSWORD:\n return fake_password()\n case InputType.TEL:\n return fake_number(12)\n\n return fake_letters(10)"
},
{
"identifier": "RabbitConsumer",
"path": "phishflood/rabbit.py",
"snippet": "class RabbitConsumer(object):\n def __init__(self, callback):\n self._callback = callback\n self._reconnect_delay = 0\n self._consumer = RawConsumer(self._callback)\n\n def run(self):\n while True:\n try:\n self._consumer.run()\n except KeyboardInterrupt:\n self._consumer.stop()\n break\n self._maybe_reconnect()\n\n def _maybe_reconnect(self):\n if self._consumer.should_reconnect:\n self._consumer.stop()\n reconnect_delay = self._get_reconnect_delay()\n logger.info(\"Reconnecting after %d seconds\", reconnect_delay)\n time.sleep(reconnect_delay)\n self._consumer = RawConsumer(self._callback)\n\n def _get_reconnect_delay(self):\n if self._consumer.was_consuming:\n self._reconnect_delay = 0\n else:\n self._reconnect_delay += 1\n if self._reconnect_delay > 30:\n self._reconnect_delay = 30\n return self._reconnect_delay"
},
{
"identifier": "general_conf",
"path": "config/general_conf.py",
"snippet": "API_URL = \"http://localhost:8000/api/v1/\"\nTOKEN = \"dff78ca834d84f829bd912662ee5ce86ca771939\""
}
] | import json
import os
import sys
import time
import requests
from hashlib import sha256
from typing import Any, Dict, List, Optional, Tuple
from credfind.utils import extract_inputs
from credfind.objects import Input, InputList, InputType
from playwright.sync_api import sync_playwright, TimeoutError, Page
from credgen.utils import creds_from_input
from phishflood.rabbit import RabbitConsumer
from config import general_conf
from pprint import pprint; pprint(forms) | 1,591 |
SCREENSHOT_I = 0
Actions = List[Dict[str, Any]]
def screenshot(page: Page):
global SCREENSHOT_I
SCREENSHOT_I += 1
page.screenshot(path=f"samples/{SCREENSHOT_I}.png")
def hash_inputs(inputs: List[Input]) -> str:
"""Returns a unique string identifying the inputs in the website"""
return sha256("".join([str(i) for i in inputs]).encode()).hexdigest()
def flood_page(
page: Page, last_hash: str = "", page_num: int = 0
) -> Optional[Tuple[str, InputList, Actions]]:
"""Returns a unique string identifying the inputs in the website"""
# Get a first screenshot
page.wait_for_timeout(3000)
screenshot(page)
# Get html and extract the inputs
try:
html = page.content()
except:
return None
res = extract_inputs(html)
if len(res) > 0:
fi, form, inputs = res[0]
else:
print("No inputs found")
return None
# Calculate the hash of the inputs
input_hash = hash_inputs(inputs)
print(f"Input hash: {input_hash}")
if input_hash == last_hash:
print("Already flooded this page")
return None
form_locator = page.locator(f"form >> nth = {form.meta_id}")
actions = []
# Generate the fake credentials for each form and each input
for inp in inputs:
FILLABLE_INPUTS = [
InputType.TEXT,
InputType.EMAIL,
InputType.PASSWORD,
InputType.NUMBER,
InputType.TEL,
InputType.SEARCH,
InputType.URL,
]
if inp.type_ in FILLABLE_INPUTS:
|
SCREENSHOT_I = 0
Actions = List[Dict[str, Any]]
def screenshot(page: Page):
global SCREENSHOT_I
SCREENSHOT_I += 1
page.screenshot(path=f"samples/{SCREENSHOT_I}.png")
def hash_inputs(inputs: List[Input]) -> str:
"""Returns a unique string identifying the inputs in the website"""
return sha256("".join([str(i) for i in inputs]).encode()).hexdigest()
def flood_page(
page: Page, last_hash: str = "", page_num: int = 0
) -> Optional[Tuple[str, InputList, Actions]]:
"""Returns a unique string identifying the inputs in the website"""
# Get a first screenshot
page.wait_for_timeout(3000)
screenshot(page)
# Get html and extract the inputs
try:
html = page.content()
except:
return None
res = extract_inputs(html)
if len(res) > 0:
fi, form, inputs = res[0]
else:
print("No inputs found")
return None
# Calculate the hash of the inputs
input_hash = hash_inputs(inputs)
print(f"Input hash: {input_hash}")
if input_hash == last_hash:
print("Already flooded this page")
return None
form_locator = page.locator(f"form >> nth = {form.meta_id}")
actions = []
# Generate the fake credentials for each form and each input
for inp in inputs:
FILLABLE_INPUTS = [
InputType.TEXT,
InputType.EMAIL,
InputType.PASSWORD,
InputType.NUMBER,
InputType.TEL,
InputType.SEARCH,
InputType.URL,
]
if inp.type_ in FILLABLE_INPUTS: | text = creds_from_input(inp) | 2 | 2023-12-11 16:38:36+00:00 | 2k |
abing7k/redroid-script | stuffs/ndk.py | [
{
"identifier": "General",
"path": "stuffs/general.py",
"snippet": "class General:\n def download(self):\n loc_md5 = \"\"\n if os.path.isfile(self.dl_file_name):\n with open(self.dl_file_name,\"rb\") as f:\n bytes = f.read()\n loc_md5 = hashlib.md5(bytes).hexdigest()\n while not os.path.isfile(self.dl_file_name) or loc_md5 != self.act_md5:\n if os.path.isfile(self.dl_file_name):\n os.remove(self.dl_file_name)\n print_color(\"md5 mismatches, redownloading now ....\",bcolors.YELLOW)\n loc_md5 = download_file(self.dl_link, self.dl_file_name)\n \n def extract(self):\n print_color(\"Extracting archive...\", bcolors.GREEN)\n print(self.dl_file_name)\n print(self.extract_to)\n with zipfile.ZipFile(self.dl_file_name) as z:\n z.extractall(self.extract_to)\n def copy(self):\n pass\n def install(self):\n # pass\n self.download()\n self.extract()\n self.copy()"
},
{
"identifier": "bcolors",
"path": "tools/helper.py",
"snippet": "class bcolors:\n RED = '\\033[31m'\n YELLOW = '\\033[33m'\n GREEN = '\\033[32m'\n ENDC = '\\033[0m'"
},
{
"identifier": "get_download_dir",
"path": "tools/helper.py",
"snippet": "def get_download_dir():\n download_loc = \"\"\n if os.environ.get(\"XDG_CACHE_HOME\", None) is None:\n download_loc = os.path.join('/', \"home\", os.environ.get(\"SUDO_USER\", os.environ[\"USER\"]), \".cache\", \"redroid\", \"downloads\")\n else:\n download_loc = os.path.join(os.environ[\"XDG_CACHE_HOME\"], \"redroid\", \"downloads\")\n if not os.path.exists(download_loc):\n os.makedirs(download_loc)\n return download_loc"
},
{
"identifier": "print_color",
"path": "tools/helper.py",
"snippet": "def print_color(str, color):\n print(color+str+bcolors.ENDC)"
},
{
"identifier": "run",
"path": "tools/helper.py",
"snippet": "def run(args):\n result = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if result.stderr:\n print(result.stderr.decode(\"utf-8\"))\n raise subprocess.CalledProcessError(\n returncode = result.returncode,\n cmd = result.args,\n stderr = result.stderr\n )\n return result"
}
] | import os
import shutil
from stuffs.general import General
from tools.helper import bcolors, get_download_dir, print_color, run | 845 |
class Ndk(General):
download_loc = get_download_dir()
copy_dir = "./ndk"
dl_link = "https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip"
dl_file_name = os.path.join(download_loc, "libndktranslation.zip")
extract_to = "/tmp/libndkunpack"
act_md5 = "0beff55f312492f24d539569d84f5bfb"
# init_rc_component = """
# # Enable native bridge for target executables
# on early-init
# mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc
# on property:ro.enable.native.bridge.exec=1
# copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register
# """
def download(self):
|
class Ndk(General):
download_loc = get_download_dir()
copy_dir = "./ndk"
dl_link = "https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip"
dl_file_name = os.path.join(download_loc, "libndktranslation.zip")
extract_to = "/tmp/libndkunpack"
act_md5 = "0beff55f312492f24d539569d84f5bfb"
# init_rc_component = """
# # Enable native bridge for target executables
# on early-init
# mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc
# on property:ro.enable.native.bridge.exec=1
# copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register
# """
def download(self): | print_color("Downloading libndk now .....", bcolors.GREEN) | 3 | 2023-12-06 09:03:05+00:00 | 2k |
zvict/papr | dataset/dataset.py | [
{
"identifier": "load_meta_data",
"path": "dataset/utils.py",
"snippet": "def load_meta_data(args, mode=\"train\"):\n \"\"\"\n 0 -----------> W\n |\n |\n |\n ⬇\n H\n [H, W, 4]\n \"\"\"\n image_paths = None\n\n if args.type == \"synthetic\":\n images, poses, hwf, image_paths = load_blender_data(\n args.path, split=mode, factor=args.factor, read_offline=args.read_offline)\n print('Loaded blender', images.shape, hwf, args.path)\n\n H, W, focal = hwf\n hwf = [H, W, focal, focal]\n\n if args.white_bg:\n images = images[..., :3] * \\\n images[..., -1:] + (1. - images[..., -1:])\n else:\n images = images[..., :3]\n\n elif args.type == \"t2\":\n images, poses, hwf, image_paths = load_t2_data(\n args.path, factor=args.factor, split=mode, read_offline=args.read_offline)\n print('Loaded t2', images.shape, hwf, args.path,\n images.min(), images.max(), images[0, 10, 10, :])\n\n if args.white_bg and images.shape[-1] == 4:\n images = images[..., :3] * \\\n images[..., -1:] + (1. - images[..., -1:])\n elif not args.white_bg:\n images = images[..., :3]\n mask = images.sum(-1) == 3.0\n images[mask] = 0.\n\n else:\n raise ValueError(\"Unknown dataset type: {}\".format(args.type))\n\n H, W, focal_x, focal_y = hwf\n\n images = torch.from_numpy(images).float()\n poses = torch.from_numpy(poses).float()\n\n return images, poses, H, W, focal_x, focal_y, image_paths"
},
{
"identifier": "get_rays",
"path": "dataset/utils.py",
"snippet": "def get_rays(H, W, focal_x, focal_y, c2w, fineness=1):\n N = c2w.shape[0]\n width = torch.linspace(\n 0, W / focal_x, steps=int(W / fineness) + 1, dtype=torch.float32)\n height = torch.linspace(\n 0, H / focal_y, steps=int(H / fineness) + 1, dtype=torch.float32)\n y, x = torch.meshgrid(height, width)\n pixel_size_x = width[1] - width[0]\n pixel_size_y = height[1] - height[0]\n x = (x - W / focal_x / 2 + pixel_size_x / 2)[:-1, :-1]\n y = -(y - H / focal_y / 2 + pixel_size_y / 2)[:-1, :-1]\n # [H, W, 3], vectors, since the camera is at the origin\n dirs_d = torch.stack([x, y, -torch.ones_like(x)], -1)\n rays_d = cam_to_world(dirs_d.unsqueeze(0), c2w) # [N, H, W, 3]\n rays_o = c2w[:, :3, -1] # [N, 3]\n return rays_o, rays_d / torch.norm(rays_d, dim=-1, keepdim=True)"
},
{
"identifier": "extract_patches",
"path": "dataset/utils.py",
"snippet": "def extract_patches(imgs, rays_o, rays_d, args):\n patch_opt = args.patches\n N, H, W, C = imgs.shape\n\n if patch_opt.type == \"continuous\":\n num_patches_H = math.ceil(\n (H - patch_opt.overlap) / (patch_opt.height - patch_opt.overlap))\n num_patches_W = math.ceil(\n (W - patch_opt.overlap) / (patch_opt.width - patch_opt.overlap))\n num_patches = num_patches_H * num_patches_W\n rayd_patches = np.zeros(\n (N, num_patches, patch_opt.height, patch_opt.width, 3), dtype=np.float32)\n rayo_patches = np.zeros((N, num_patches, 3), dtype=np.float32)\n img_patches = np.zeros(\n (N, num_patches, patch_opt.height, patch_opt.width, C), dtype=np.float32)\n\n for i in range(N):\n n_patch = 0\n for start_height in range(0, H - patch_opt.overlap, patch_opt.height - patch_opt.overlap):\n for start_width in range(0, W - patch_opt.overlap, patch_opt.width - patch_opt.overlap):\n end_height = min(start_height + patch_opt.height, H)\n end_width = min(start_width + patch_opt.width, W)\n start_height = end_height - patch_opt.height\n start_width = end_width - patch_opt.width\n rayd_patches[i, n_patch, :, :] = rays_d[i,\n start_height:end_height, start_width:end_width]\n rayo_patches[i, n_patch, :] = rays_o[i, :]\n img_patches[i, n_patch, :, :] = imgs[i,\n start_height:end_height, start_width:end_width]\n n_patch += 1\n\n elif patch_opt.type == \"random\":\n num_patches = patch_opt.max_patches\n rayd_patches = np.zeros(\n (N, num_patches, patch_opt.height, patch_opt.width, 3), dtype=np.float32)\n rayo_patches = np.zeros((N, num_patches, 3), dtype=np.float32)\n img_patches = np.zeros(\n (N, num_patches, patch_opt.height, patch_opt.width, C), dtype=np.float32)\n\n for i in range(N):\n for n_patch in range(num_patches):\n start_height = np.random.randint(0, H - patch_opt.height)\n start_width = np.random.randint(0, W - patch_opt.width)\n end_height = start_height + patch_opt.height\n end_width = start_width + patch_opt.width\n rayd_patches[i, n_patch, :, :] = rays_d[i,\n start_height:end_height, start_width:end_width]\n rayo_patches[i, n_patch, :] = rays_o[i, :]\n img_patches[i, n_patch, :, :] = imgs[i,\n start_height:end_height, start_width:end_width]\n\n return img_patches, rayd_patches, rayo_patches, num_patches"
}
] | import torch
import numpy as np
import imageio
from torch.utils.data import Dataset
from PIL import Image
from .utils import load_meta_data, get_rays, extract_patches | 1,572 |
class RINDataset(Dataset):
""" Ray Image Normal Dataset """
def __init__(self, args, mode='train'):
self.args = args
|
class RINDataset(Dataset):
""" Ray Image Normal Dataset """
def __init__(self, args, mode='train'):
self.args = args | images, c2w, H, W, focal_x, focal_y, image_paths = load_meta_data( | 0 | 2023-12-08 19:51:42+00:00 | 2k |
rinnakk/nue-asr | nue_asr/cli.py | [
{
"identifier": "transcribe",
"path": "nue_asr/transcribe.py",
"snippet": "@torch.inference_mode()\ndef transcribe(\n model: NueASRModel,\n tokenizer: PreTrainedTokenizer,\n audio: Union[str, np.ndarray, torch.Tensor],\n **decode_options,\n) -> ASRResult:\n device = model.device\n sr = 16000\n\n decode_options.setdefault(\"do_sample\", False)\n decode_options.setdefault(\"num_beams\", 1)\n decode_options.setdefault(\"temperature\", 1.0)\n decode_options.setdefault(\"top_p\", 1.0)\n decode_options.setdefault(\"min_new_tokens\", 2)\n decode_options.setdefault(\"max_new_tokens\", None)\n\n if isinstance(audio, str):\n from librosa import load\n\n audio = load(audio, sr=sr)[0]\n\n if not torch.is_tensor(audio):\n audio = torch.from_numpy(audio)\n\n if audio.dim() != 1:\n assert audio.dim() == 2 and audio.shape[0] == 1, \"Only mono audio is supported.\"\n\n audio = audio.to(model.dtype).reshape(1, -1)\n audio_len_sec = audio.shape[-1] / sr\n if decode_options[\"max_new_tokens\"] is None:\n decode_options[\"max_new_tokens\"] = int(4 * audio_len_sec + 20 + 0.5)\n\n if audio_len_sec > WARN_TOO_LONG_THRESHOLD:\n logger.warning(\n f\"The input audio is {audio_len_sec:.1f} sec, \"\n \"but such long audio inputs may degrade recognition accuracy. \"\n \"It is recommended to split the audio into shorter segments.\"\n )\n\n prefix_token = tokenizer.encode(\n \"<s>\",\n add_special_tokens=False,\n return_tensors=\"pt\",\n )\n postfix_token = tokenizer.encode(\n \"[SEP]\",\n add_special_tokens=False,\n return_tensors=\"pt\",\n )\n outputs = model(\n prefix_token.to(device),\n audio.to(device),\n postfix_token.to(device),\n pad_token_id=tokenizer.pad_token_id,\n bos_token_id=tokenizer.bos_token_id,\n eos_token_id=tokenizer.eos_token_id,\n **decode_options,\n )\n output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n return ASRResult(text=output_text)"
},
{
"identifier": "load_model",
"path": "nue_asr/utils.py",
"snippet": "def load_model(\n model_name_or_path: Optional[str] = None,\n device: Optional[Union[str, torch.device]] = \"cuda\",\n fp16: bool = True,\n use_deepspeed: bool = False,\n) -> NueASRModel:\n if model_name_or_path is None:\n model_name_or_path = DEFAULT_MODEL_NAME\n\n device = torch.device(device)\n if device.type == \"cpu\":\n if torch.cuda.is_available():\n logging.warning(\n \"CUDA is available but using CPU. \"\n \"If you want to use CUDA, set `device` to `cuda`.\"\n )\n if fp16:\n logging.warning(\"FP16 is not supported on CPU. Using FP32 instead.\")\n fp16 = False\n if use_deepspeed:\n logging.warning(\"DeepSpeed is not supported on CPU. Disabling it.\")\n use_deepspeed = False\n\n dtype = torch.float16 if fp16 else torch.float32\n\n model = NueASRModel.from_pretrained(model_name_or_path)\n model.to(dtype)\n\n if use_deepspeed:\n try:\n import deepspeed\n except ImportError:\n raise ImportError(\n \"DeepSpeed is not installed. Please install it with `pip install deepspeed`.\"\n )\n\n ds_engine = deepspeed.init_inference(\n model.llm,\n replace_with_kernel_inject=True,\n dtype=dtype,\n )\n for m in ds_engine.modules():\n if (\n getattr(m, \"config\", None)\n and getattr(m.config, \"mlp_after_attn\", None) is not None\n ):\n m.config.mlp_after_attn = not model.llm.config.use_parallel_residual\n model.llm = ds_engine.module\n\n if device is not None:\n model.to(device)\n\n logger.info(f\"Finished loading model from {model_name_or_path}\")\n\n return model"
},
{
"identifier": "load_tokenizer",
"path": "nue_asr/utils.py",
"snippet": "def load_tokenizer(model_name_or_path: Optional[str] = None):\n if model_name_or_path is None:\n model_name_or_path = DEFAULT_MODEL_NAME\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_name_or_path, use_fast=False, legacy=True\n )\n return tokenizer"
},
{
"identifier": "set_seed",
"path": "nue_asr/utils.py",
"snippet": "def set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)"
},
{
"identifier": "str2bool",
"path": "nue_asr/utils.py",
"snippet": "def str2bool(v: str):\n if v.lower() in (\"true\", \"t\", \"yes\", \"y\", \"1\"):\n return True\n if v.lower() in (\"false\", \"f\", \"no\", \"n\", \"0\"):\n return False\n raise ValueError(f\"Invalid boolean value: {v}\")"
}
] | import argparse
import os
import torch
from .transcribe import transcribe
from .utils import load_model, load_tokenizer, set_seed, str2bool | 1,542 | #!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument(
| #!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument( | "--fp16", type=str2bool, default=True, help="Whether to fp16 inference." | 4 | 2023-12-07 01:37:23+00:00 | 2k |
AdaCheng/EgoThink | models/instruct_blip/processors/blip_processors.py | [
{
"identifier": "registry",
"path": "models/instruct_blip/common/registry.py",
"snippet": "class Registry:\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "BaseProcessor",
"path": "models/instruct_blip/processors/base_processor.py",
"snippet": "class BaseProcessor:\n def __init__(self):\n self.transform = lambda x: x\n return\n\n def __call__(self, item):\n return self.transform(item)\n\n @classmethod\n def from_config(cls, cfg=None):\n return cls()\n\n def build(self, **kwargs):\n cfg = OmegaConf.create(kwargs)\n\n return self.from_config(cfg)"
},
{
"identifier": "RandomAugment",
"path": "models/instruct_blip/processors/randaugment.py",
"snippet": "class RandomAugment(object):\n def __init__(self, N=2, M=10, isPIL=False, augs=[]):\n self.N = N\n self.M = M\n self.isPIL = isPIL\n if augs:\n self.augs = augs\n else:\n self.augs = list(arg_dict.keys())\n\n def get_random_ops(self):\n sampled_ops = np.random.choice(self.augs, self.N)\n return [(op, 0.5, self.M) for op in sampled_ops]\n\n def __call__(self, img):\n if self.isPIL:\n img = np.array(img)\n ops = self.get_random_ops()\n for name, prob, level in ops:\n if np.random.random() > prob:\n continue\n args = arg_dict[name](level)\n img = func_dict[name](img, *args)\n return img"
}
] | import re
from ..common.registry import registry
from .base_processor import BaseProcessor
from .randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode | 805 | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
| @registry.register_processor("blip_caption") | 0 | 2023-12-05 14:17:17+00:00 | 2k |
TristanBilot/mlx-GCN | main_torch.py | [
{
"identifier": "download_cora",
"path": "datasets.py",
"snippet": "def download_cora():\n \"\"\"Downloads the cora dataset into a local cora folder.\"\"\"\n\n url = \"https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz\"\n extract_to = \".\"\n\n if os.path.exists(os.path.join(extract_to, \"cora\")):\n return\n\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n file_path = os.path.join(extract_to, url.split(\"/\")[-1])\n\n # Write the file to local disk\n with open(file_path, \"wb\") as file:\n file.write(response.raw.read())\n\n # Extract the .tgz file\n with tarfile.open(file_path, \"r:gz\") as tar:\n tar.extractall(path=extract_to)\n print(f\"Cora dataset extracted to {extract_to}\")\n\n os.remove(file_path)"
},
{
"identifier": "load_data",
"path": "datasets.py",
"snippet": "def load_data(config):\n \"\"\"Loads the Cora graph data into MLX array format.\"\"\"\n print(\"Loading Cora dataset...\")\n\n # Graph nodes\n raw_nodes_data = np.genfromtxt(config.nodes_path, dtype=\"str\")\n raw_node_ids = raw_nodes_data[:, 0].astype(\n \"int32\"\n ) # unique identifier of each node\n raw_node_labels = raw_nodes_data[:, -1]\n labels_enumerated = enumerate_labels(raw_node_labels) # target labels as integers\n node_features = sparse.csr_matrix(raw_nodes_data[:, 1:-1], dtype=\"float32\")\n\n # Edges\n ids_ordered = {raw_id: order for order, raw_id in enumerate(raw_node_ids)}\n raw_edges_data = np.genfromtxt(config.edges_path, dtype=\"int32\")\n edges_ordered = np.array(\n list(map(ids_ordered.get, raw_edges_data.flatten())), dtype=\"int32\"\n ).reshape(raw_edges_data.shape)\n\n # Adjacency matrix\n adj = sparse.coo_matrix(\n (np.ones(edges_ordered.shape[0]), (edges_ordered[:, 0], edges_ordered[:, 1])),\n shape=(labels_enumerated.shape[0], labels_enumerated.shape[0]),\n dtype=np.float32,\n )\n\n # Make the adjacency matrix symmetric\n adj = adj + adj.T.multiply(adj.T > adj)\n adj = normalize_adjacency(adj)\n\n print(\"Dataset loaded.\")\n return node_features.toarray(), labels_enumerated, adj.toarray()"
},
{
"identifier": "train_val_test_mask",
"path": "datasets.py",
"snippet": "def train_val_test_mask(labels, num_classes):\n \"\"\"Splits the loaded dataset into train/validation/test sets.\"\"\"\n\n train_set = list(range(140))\n validation_set = list(range(200, 500))\n test_set = list(range(500, 1500))\n\n return train_set, validation_set, test_set"
}
] | from argparse import ArgumentParser
from time import time
from datasets import download_cora, load_data, train_val_test_mask
import torch
import torch.nn as nn | 1,238 |
class GCNLayer(nn.Module):
def __init__(self, x_dim, h_dim, bias=True):
super(GCNLayer, self).__init__()
self.weight = nn.Parameter(torch.FloatTensor(torch.zeros(size=(x_dim, h_dim))))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(torch.zeros(size=(h_dim,))))
else:
self.register_parameter('bias', None)
self.initialize_weights()
def initialize_weights(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
def forward(self, x, adj):
x = x @ self.weight
if self.bias is not None:
x += self.bias
return torch.mm(adj, x)
class GCN(nn.Module):
def __init__(self, x_dim, h_dim, out_dim, nb_layers=2, dropout=0.5, bias=True):
super(GCN, self).__init__()
layer_sizes = [x_dim] + [h_dim] * nb_layers + [out_dim]
self.gcn_layers = nn.Sequential(*[
GCNLayer(in_dim, out_dim, bias)
for in_dim, out_dim in zip(layer_sizes[:-1], layer_sizes[1:])
])
self.dropout = nn.Dropout(p=dropout)
def initialize_weights(self):
self.gcn_1.initialize_weights()
self.gcn_2.initialize_weights()
def forward(self, x, adj):
for layer in self.gcn_layers[:-1]:
x = torch.relu(layer(x, adj))
x = self.dropout(x)
x = self.gcn_layers[-1](x, adj)
return x
def to_torch(device, x, y, adj, train_mask, val_mask, test_mask):
x = torch.tensor(x, dtype=torch.float32, device=device)
y = torch.tensor(y, dtype=torch.long, device=device)
adj = torch.tensor(adj, dtype=torch.float32, device=device)
train_mask = torch.tensor(train_mask, device=device)
val_mask = torch.tensor(val_mask, device=device)
test_mask = torch.tensor(test_mask, device=device)
return x, y, adj, train_mask, val_mask, test_mask
def eval_fn(x, y):
return torch.mean((torch.argmax(x, axis=1) == y).float())
def main(args, device):
# Data loading
download_cora()
x, y, adj = load_data(args)
|
class GCNLayer(nn.Module):
def __init__(self, x_dim, h_dim, bias=True):
super(GCNLayer, self).__init__()
self.weight = nn.Parameter(torch.FloatTensor(torch.zeros(size=(x_dim, h_dim))))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(torch.zeros(size=(h_dim,))))
else:
self.register_parameter('bias', None)
self.initialize_weights()
def initialize_weights(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
def forward(self, x, adj):
x = x @ self.weight
if self.bias is not None:
x += self.bias
return torch.mm(adj, x)
class GCN(nn.Module):
def __init__(self, x_dim, h_dim, out_dim, nb_layers=2, dropout=0.5, bias=True):
super(GCN, self).__init__()
layer_sizes = [x_dim] + [h_dim] * nb_layers + [out_dim]
self.gcn_layers = nn.Sequential(*[
GCNLayer(in_dim, out_dim, bias)
for in_dim, out_dim in zip(layer_sizes[:-1], layer_sizes[1:])
])
self.dropout = nn.Dropout(p=dropout)
def initialize_weights(self):
self.gcn_1.initialize_weights()
self.gcn_2.initialize_weights()
def forward(self, x, adj):
for layer in self.gcn_layers[:-1]:
x = torch.relu(layer(x, adj))
x = self.dropout(x)
x = self.gcn_layers[-1](x, adj)
return x
def to_torch(device, x, y, adj, train_mask, val_mask, test_mask):
x = torch.tensor(x, dtype=torch.float32, device=device)
y = torch.tensor(y, dtype=torch.long, device=device)
adj = torch.tensor(adj, dtype=torch.float32, device=device)
train_mask = torch.tensor(train_mask, device=device)
val_mask = torch.tensor(val_mask, device=device)
test_mask = torch.tensor(test_mask, device=device)
return x, y, adj, train_mask, val_mask, test_mask
def eval_fn(x, y):
return torch.mean((torch.argmax(x, axis=1) == y).float())
def main(args, device):
# Data loading
download_cora()
x, y, adj = load_data(args) | train_mask, val_mask, test_mask = train_val_test_mask(y, args.nb_classes) | 2 | 2023-12-11 09:40:09+00:00 | 2k |
3dlg-hcvc/cage | models/denoiser.py | [
{
"identifier": "FinalLayer",
"path": "models/utils.py",
"snippet": "class FinalLayer(nn.Module):\n def __init__(self, in_ch, out_ch=None, dropout=0.):\n super().__init__()\n out_ch = in_ch if out_ch is None else out_ch\n self.linear = nn.Linear(in_ch, out_ch)\n self.norm = AdaLayerNormTC(in_ch, 2*in_ch, dropout)\n \n def forward(self, x, t, cond=None):\n assert cond is not None\n x = self.norm(x, t, cond)\n x = self.linear(x)\n return x"
},
{
"identifier": "PEmbeder",
"path": "models/utils.py",
"snippet": "class PEmbeder(nn.Module):\n def __init__(self, vocab_size, d_model):\n super().__init__()\n self.embed = nn.Embedding(vocab_size, d_model)\n self._init_embeddings()\n\n def _init_embeddings(self):\n nn.init.kaiming_normal_(self.embed.weight, mode=\"fan_in\")\n\n def forward(self, x, idx=None):\n if idx is None:\n idx = torch.arange(x.shape[1], device=x.device).long()\n return x + self.embed(idx)"
},
{
"identifier": "AAB",
"path": "models/utils.py",
"snippet": "class AAB(nn.Module):\n def __init__(self, \n dim: int,\n num_attention_heads: int,\n attention_head_dim: int,\n dropout=0.0,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: int = None,\n attention_bias: bool = False,\n norm_elementwise_affine: bool = True,\n final_dropout: bool = False,\n class_dropout_prob: float = 0.0 # for classifier-free\n ):\n super().__init__()\n\n self.norm1 = MyAdaLayerNormZero(dim, num_embeds_ada_norm, class_dropout_prob) \n \n self.global_attn = Attention(\n query_dim=dim,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n )\n\n self.norm2 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n \n self.attr_attn = Attention(\n query_dim=dim,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n )\n\n self.graph_attn = Attention(\n query_dim=dim,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n )\n\n self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n self.norm4 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n\n self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)\n \n def forward(self, hidden_states, pad_mask, attr_mask, graph_mask, timestep, class_labels):\n norm_hidden_states, gate_1, shift_mlp, scale_mlp, gate_mlp, gate_2, gate_3 = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n attr_out = self.attr_attn(norm_hidden_states, attention_mask=attr_mask)\n attr_out = gate_1.unsqueeze(1) * attr_out\n hidden_states = hidden_states + attr_out\n\n norm_hidden_states = self.norm2(hidden_states)\n global_out = self.global_attn(norm_hidden_states, attention_mask=pad_mask)\n global_out = gate_2.unsqueeze(1) * global_out\n hidden_states = hidden_states + global_out \n\n norm_hidden_states = self.norm3(hidden_states)\n graph_out = self.graph_attn(norm_hidden_states, attention_mask=graph_mask)\n graph_out = gate_3.unsqueeze(1) * graph_out\n hidden_states = hidden_states + graph_out\n\n norm_hidden_states = self.norm4(hidden_states)\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n ff_output = self.ff(norm_hidden_states)\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n \n hidden_states = ff_output + hidden_states\n return hidden_states"
}
] | import torch
import models
from torch import nn
from models.utils import FinalLayer, PEmbeder, AAB | 1,352 |
@models.register('denoiser')
class AABModel(nn.Module):
'''
Denoiser based on Attribute Attention Block (AAB)
3 sequential attentions: local -> global -> graph
'''
def __init__(self, hparams):
super(AABModel, self).__init__()
self.hparams = hparams
in_ch = hparams.in_ch
attn_dim = hparams.attn_dim
dropout = hparams.dropout
n_head = hparams.n_head
head_dim = attn_dim // n_head
num_embeds_ada_norm = 6*attn_dim
self.K = self.hparams.get('K', 32)
self.x_embedding = nn.Linear(in_ch, attn_dim)
self.pe_node = PEmbeder(self.K, attn_dim)
self.pe_attr = PEmbeder(5, attn_dim)
self.attn_layers = nn.ModuleList(
[ # to do: refactor this block, customize the eps of layernorm if train with fp16
AAB(dim=attn_dim,
num_attention_heads=n_head,
attention_head_dim=head_dim,
dropout=dropout,
activation_fn="geglu",
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=False,
norm_elementwise_affine=True,
final_dropout=False,
)
for d in range(hparams.n_layers)
]
)
|
@models.register('denoiser')
class AABModel(nn.Module):
'''
Denoiser based on Attribute Attention Block (AAB)
3 sequential attentions: local -> global -> graph
'''
def __init__(self, hparams):
super(AABModel, self).__init__()
self.hparams = hparams
in_ch = hparams.in_ch
attn_dim = hparams.attn_dim
dropout = hparams.dropout
n_head = hparams.n_head
head_dim = attn_dim // n_head
num_embeds_ada_norm = 6*attn_dim
self.K = self.hparams.get('K', 32)
self.x_embedding = nn.Linear(in_ch, attn_dim)
self.pe_node = PEmbeder(self.K, attn_dim)
self.pe_attr = PEmbeder(5, attn_dim)
self.attn_layers = nn.ModuleList(
[ # to do: refactor this block, customize the eps of layernorm if train with fp16
AAB(dim=attn_dim,
num_attention_heads=n_head,
attention_head_dim=head_dim,
dropout=dropout,
activation_fn="geglu",
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=False,
norm_elementwise_affine=True,
final_dropout=False,
)
for d in range(hparams.n_layers)
]
)
| self.final_layer = FinalLayer(attn_dim, in_ch, dropout=dropout) | 0 | 2023-12-06 23:08:41+00:00 | 2k |
modelscope/llmuses | llmuses/benchmarks/data_adapter.py | [
{
"identifier": "Benchmark",
"path": "llmuses/benchmarks/benchmark.py",
"snippet": "class Benchmark(object):\n \"\"\"\n Wrapper for loading datasets from ModelScope or HuggingFace.\n \"\"\"\n\n def __init__(self):\n ...\n\n @staticmethod\n def load(dataset_name: str,\n subset: str = None,\n split: str = None,\n token: str = None,\n hub: str = 'ModelScope',\n work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n **kwargs):\n \"\"\"\n Load a dataset from ModelScope or HuggingFace.\n\n Args:\n dataset_name (str): The dataset id or path.\n If it is dataset id, should be in the format of `organization/name` for ModelScope and HuggingFace hub.\n If it is dataset path, should be the path on local disk.\n subset (str):\n split:\n token: sdk token for ModelScope, optional, default None\n hub: `ModelScope` or `HuggingFace`\n work_dir: the work directory for caching, optional\n\n Returns:\n A dict.\n \"\"\"\n work_dir = os.path.join(work_dir, 'benchmarks', dataset_name.replace('/', '_'))\n if hub == 'ModelScope':\n from modelscope.msdatasets import MsDataset\n dataset = MsDataset.load(dataset_name=dataset_name, subset_name=subset, split=split, token=token,\n cache_dir=work_dir, **kwargs)\n\n dataset.dataset_name = dataset_name.split('/')[-1]\n dataset.subset_name = subset\n dataset.split = split\n return dataset\n elif hub == 'HuggingFace':\n # TODO: implement this by [email protected]\n ...\n else:\n raise ValueError(f'hub must be `ModelScope` or `HuggingFace`, but got {hub}')"
},
{
"identifier": "DEFAULT_ROOT_CACHE_DIR",
"path": "llmuses/constants.py",
"snippet": "DEFAULT_ROOT_CACHE_DIR = '~/.cache/llmuses'"
},
{
"identifier": "AnswerKeys",
"path": "llmuses/constants.py",
"snippet": "class AnswerKeys:\n\n ANSWER_ID = 'answer_id'\n\n RAW_INPUT = 'raw_input'\n\n ORIGIN_PROMPT = 'origin_prompt'\n\n MODEL_SPEC = 'model_spec'\n\n SUBSET_NAME = 'subset_name'\n\n CHOICES = 'choices'"
},
{
"identifier": "get_logger",
"path": "llmuses/utils/logger.py",
"snippet": "def get_logger(log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\" Get logging logger\n\n Args:\n log_file: Log filename, if specified, file handler will be added to\n logger\n log_level: Logging level.\n file_mode: Specifies the mode to open the file, if filename is\n specified (if filemode is unspecified, it defaults to 'w').\n \"\"\"\n\n logger_name = __name__.split('.')[0]\n logger = logging.getLogger(logger_name)\n\n if logger_name in init_loggers:\n add_file_handler_if_needed(logger, log_file, file_mode, log_level)\n return logger\n\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n\n init_loggers[logger_name] = True\n\n return logger"
}
] | from abc import ABC, abstractmethod
from typing import Any, Optional
from llmuses.benchmarks import Benchmark
from llmuses.constants import DEFAULT_ROOT_CACHE_DIR, AnswerKeys
from llmuses.utils.logger import get_logger
import random | 1,377 | # Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
class DataAdapter(ABC):
def __init__(self,
subset_list: list,
metric_list: list,
few_shot_num: Optional[int] = 0,
train_split: Optional[str] = None,
eval_split: Optional[str] = None,
**kwargs):
"""
Args:
subset_list: list of subset names for the dataset.
metric_list: list, the metric list to evaluate the model on specific benchmark.
few_shot_num: int, number of few-shot examples. Default: 0
train_split: str, usually for few-shot examples. e.g. 'train'
eval_split: str, the target eval split name. e.g. 'test'
"""
self.subset_list = subset_list
self.metric_list = metric_list
self.few_shot_num = few_shot_num
self.train_split = train_split
self.eval_split = eval_split
def load(self,
dataset_name_or_path: str,
subset_list: list = None,
work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,
**kwargs) -> dict:
"""
Load the dataset. Remote and local datasets are supported.
You can rewrite this method to support your own local dataset, just follow the format of the output.
Returns: {'subset_name': {'train': train_dataset, 'test': test_dataset}}
train_dataset, test_dataset: Iterable dataset, object each item of which is a dict.
TODO: local data path to be supported.
"""
data_dict = {}
split_list = [split for split in [self.train_split, self.eval_split] if split is not None]
if len(split_list) == 0:
logger.error(f'Got empty split list: {split_list}')
subset_list = subset_list if subset_list is not None else self.subset_list
for sub_name in subset_list:
data_dict[sub_name] = {}
# e.g. train: few-shot, test: target dataset to evaluate
for split in split_list:
| # Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
class DataAdapter(ABC):
def __init__(self,
subset_list: list,
metric_list: list,
few_shot_num: Optional[int] = 0,
train_split: Optional[str] = None,
eval_split: Optional[str] = None,
**kwargs):
"""
Args:
subset_list: list of subset names for the dataset.
metric_list: list, the metric list to evaluate the model on specific benchmark.
few_shot_num: int, number of few-shot examples. Default: 0
train_split: str, usually for few-shot examples. e.g. 'train'
eval_split: str, the target eval split name. e.g. 'test'
"""
self.subset_list = subset_list
self.metric_list = metric_list
self.few_shot_num = few_shot_num
self.train_split = train_split
self.eval_split = eval_split
def load(self,
dataset_name_or_path: str,
subset_list: list = None,
work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,
**kwargs) -> dict:
"""
Load the dataset. Remote and local datasets are supported.
You can rewrite this method to support your own local dataset, just follow the format of the output.
Returns: {'subset_name': {'train': train_dataset, 'test': test_dataset}}
train_dataset, test_dataset: Iterable dataset, object each item of which is a dict.
TODO: local data path to be supported.
"""
data_dict = {}
split_list = [split for split in [self.train_split, self.eval_split] if split is not None]
if len(split_list) == 0:
logger.error(f'Got empty split list: {split_list}')
subset_list = subset_list if subset_list is not None else self.subset_list
for sub_name in subset_list:
data_dict[sub_name] = {}
# e.g. train: few-shot, test: target dataset to evaluate
for split in split_list: | dataset = Benchmark.load(dataset_name=dataset_name_or_path, | 0 | 2023-12-07 06:10:49+00:00 | 2k |
Subsets and Splits