repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
google-research/semivl | model/vlm.py | [
{
"identifier": "aggregate_concept_predictions",
"path": "model/text_embeddings.py",
"snippet": "def aggregate_concept_predictions(pred, class_to_concept_idxs):\n B, _, H, W = pred.shape\n agg_pred = torch.zeros(B, len(class_to_concept_idxs), H, W, device=pred.device)\n for cls_i, conc_i in class_to_concept_idxs.items():\n agg_pred[:, cls_i] = pred[:, conc_i].max(dim=1).values\n return agg_pred"
},
{
"identifier": "get_class_to_concept_idxs",
"path": "model/text_embeddings.py",
"snippet": "def get_class_to_concept_idxs(save_path):\n if save_path == 'configs/_base_/datasets/text_embedding/voc12_wbg_concept4_single.npy':\n _, _, class_to_concept_idxs = flatten_class_concepts(VOC12_wbg_classes_w_concepts4)\n elif save_path == 'configs/_base_/datasets/text_embedding/cityscapes_concept3_single.npy':\n _, _, class_to_concept_idxs = flatten_class_concepts(Cityscapes_classes_w_concepts3)\n else:\n raise ValueError(save_path)\n return class_to_concept_idxs"
}
] | import numpy as np
import torch
import torch.nn.functional as F
from mmseg.models import builder
from mmseg.models.builder import SEGMENTORS
from mmseg.models.segmentors.encoder_decoder import EncoderDecoder
from model.text_embeddings import (aggregate_concept_predictions,
get_class_to_concept_idxs) | 1,186 | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@SEGMENTORS.register_module()
class VLM(EncoderDecoder):
def __init__(self,
freeze_backbone=False,
exclude_keys=None,
load_text_embedding=None,
load_mcc_text_embedding=None,
load_pl_text_embedding=None,
clip_encoder=None,
conv_encoder=None,
maskclip_class_filter=None,
maskclip_trust_head=None,
renorm_clip_img=False,
**args):
super(VLM, self).__init__(**args)
assert load_text_embedding == load_pl_text_embedding
assert maskclip_class_filter is None
assert maskclip_trust_head is None
self.local_iter = 0
self.clip_encoder = None
if clip_encoder is not None:
self.clip_encoder = builder.build_backbone(clip_encoder)
self.conv_encoder = None
if conv_encoder is not None:
self.conv_encoder = builder.build_backbone(conv_encoder)
self.load_text_embedding = load_text_embedding
self.decode_head.load_text_embedding = load_text_embedding
self.load_mcc_text_embedding = load_mcc_text_embedding
self.renorm_clip_img = renorm_clip_img
if renorm_clip_img:
print('Renormalize clip image.')
if self.load_mcc_text_embedding:
self.loaded_mcc_text_feat = np.load(self.load_mcc_text_embedding)
self.loaded_mcc_text_feat = torch.from_numpy(self.loaded_mcc_text_feat).float()
else:
raise NotImplementedError
if freeze_backbone:
self.freeze(self.backbone, exclude_keys=exclude_keys)
def renormalize_img_for_clip(self, img):
if not self.renorm_clip_img:
return img
loader_mean, loader_std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
clip_mean, clip_std = [0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]
loader_mean = torch.tensor(loader_mean, device=img.device).view(1, -1, 1, 1)
loader_std = torch.tensor(loader_std, device=img.device).view(1, -1, 1, 1)
clip_mean = torch.tensor(clip_mean, device=img.device).view(1, -1, 1, 1)
clip_std = torch.tensor(clip_std, device=img.device).view(1, -1, 1, 1)
return (img * loader_std + loader_mean - clip_mean) / clip_std
def freeze(self, model, exclude_keys=None):
for n, m in model.named_parameters():
m.requires_grad = False
if exclude_keys is not None:
assert isinstance(exclude_keys, list)
for k in exclude_keys:
if str(k) in n:
m.requires_grad = True
print(f'Finetune {n}')
def forward_maskclip(self, img, conf_tresh):
img = self.renormalize_img_for_clip(img)
self.clip_encoder.eval()
with torch.no_grad():
text_feat = self.loaded_mcc_text_feat.detach().to(img.device)
visual_feat, _ = self.clip_encoder(img)
visual_feat = visual_feat[-1]
dense_pred = F.conv2d(visual_feat, text_feat[:, :, None, None])
if dense_pred.shape[1] != self.num_classes:
| # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@SEGMENTORS.register_module()
class VLM(EncoderDecoder):
def __init__(self,
freeze_backbone=False,
exclude_keys=None,
load_text_embedding=None,
load_mcc_text_embedding=None,
load_pl_text_embedding=None,
clip_encoder=None,
conv_encoder=None,
maskclip_class_filter=None,
maskclip_trust_head=None,
renorm_clip_img=False,
**args):
super(VLM, self).__init__(**args)
assert load_text_embedding == load_pl_text_embedding
assert maskclip_class_filter is None
assert maskclip_trust_head is None
self.local_iter = 0
self.clip_encoder = None
if clip_encoder is not None:
self.clip_encoder = builder.build_backbone(clip_encoder)
self.conv_encoder = None
if conv_encoder is not None:
self.conv_encoder = builder.build_backbone(conv_encoder)
self.load_text_embedding = load_text_embedding
self.decode_head.load_text_embedding = load_text_embedding
self.load_mcc_text_embedding = load_mcc_text_embedding
self.renorm_clip_img = renorm_clip_img
if renorm_clip_img:
print('Renormalize clip image.')
if self.load_mcc_text_embedding:
self.loaded_mcc_text_feat = np.load(self.load_mcc_text_embedding)
self.loaded_mcc_text_feat = torch.from_numpy(self.loaded_mcc_text_feat).float()
else:
raise NotImplementedError
if freeze_backbone:
self.freeze(self.backbone, exclude_keys=exclude_keys)
def renormalize_img_for_clip(self, img):
if not self.renorm_clip_img:
return img
loader_mean, loader_std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
clip_mean, clip_std = [0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]
loader_mean = torch.tensor(loader_mean, device=img.device).view(1, -1, 1, 1)
loader_std = torch.tensor(loader_std, device=img.device).view(1, -1, 1, 1)
clip_mean = torch.tensor(clip_mean, device=img.device).view(1, -1, 1, 1)
clip_std = torch.tensor(clip_std, device=img.device).view(1, -1, 1, 1)
return (img * loader_std + loader_mean - clip_mean) / clip_std
def freeze(self, model, exclude_keys=None):
for n, m in model.named_parameters():
m.requires_grad = False
if exclude_keys is not None:
assert isinstance(exclude_keys, list)
for k in exclude_keys:
if str(k) in n:
m.requires_grad = True
print(f'Finetune {n}')
def forward_maskclip(self, img, conf_tresh):
img = self.renormalize_img_for_clip(img)
self.clip_encoder.eval()
with torch.no_grad():
text_feat = self.loaded_mcc_text_feat.detach().to(img.device)
visual_feat, _ = self.clip_encoder(img)
visual_feat = visual_feat[-1]
dense_pred = F.conv2d(visual_feat, text_feat[:, :, None, None])
if dense_pred.shape[1] != self.num_classes: | cls2con = get_class_to_concept_idxs(self.load_mcc_text_embedding) | 1 | 2023-11-02 14:49:38+00:00 | 2k |
ej52/hass-ollama-conversation | custom_components/ollama_conversation/api.py | [
{
"identifier": "TIMEOUT",
"path": "custom_components/ollama_conversation/const.py",
"snippet": "TIMEOUT = 60"
},
{
"identifier": "ApiClientError",
"path": "custom_components/ollama_conversation/exceptions.py",
"snippet": "class ApiClientError(HomeAssistantError):\n \"\"\"Exception to indicate a general API error.\"\"\""
},
{
"identifier": "ApiCommError",
"path": "custom_components/ollama_conversation/exceptions.py",
"snippet": "class ApiCommError(ApiClientError):\n \"\"\"Exception to indicate a communication error.\"\"\""
},
{
"identifier": "ApiJsonError",
"path": "custom_components/ollama_conversation/exceptions.py",
"snippet": "class ApiJsonError(ApiClientError):\n \"\"\"Exception to indicate an error with json response.\"\"\""
},
{
"identifier": "ApiTimeoutError",
"path": "custom_components/ollama_conversation/exceptions.py",
"snippet": "class ApiTimeoutError(ApiClientError):\n \"\"\"Exception to indicate a timeout error.\"\"\""
}
] | import asyncio
import socket
import aiohttp
import async_timeout
from .const import TIMEOUT
from .exceptions import (
ApiClientError,
ApiCommError,
ApiJsonError,
ApiTimeoutError
) | 692 | """Ollama API Client."""
from __future__ import annotations
class OllamaApiClient:
"""Ollama API Client."""
def __init__(
self,
base_url: str,
session: aiohttp.ClientSession,
) -> None:
"""Sample API Client."""
self._base_url = base_url.rstrip("/")
self._session = session
async def async_get_heartbeat(self) -> bool:
"""Get heartbeat from the API."""
response: str = await self._api_wrapper(
method="get", url=self._base_url, decode_json=False
)
return response.strip() == "Ollama is running"
async def async_get_models(self) -> any:
"""Get models from the API."""
return await self._api_wrapper(
method="get",
url=f"{self._base_url}/api/tags",
headers={"Content-type": "application/json; charset=UTF-8"},
)
async def async_generate(self, data: dict | None = None,) -> any:
"""Generate a completion from the API."""
return await self._api_wrapper(
method="post",
url=f"{self._base_url}/api/generate",
data=data,
headers={"Content-type": "application/json; charset=UTF-8"},
)
async def _api_wrapper(
self,
method: str,
url: str,
data: dict | None = None,
headers: dict | None = None,
decode_json: bool = True,
) -> any:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT):
response = await self._session.request(
method=method,
url=url,
headers=headers,
json=data,
)
if response.status == 404 and decode_json:
json = await response.json()
raise ApiJsonError(json["error"])
response.raise_for_status()
if decode_json:
return await response.json()
return await response.text()
except ApiJsonError as e:
raise e
except asyncio.TimeoutError as e:
raise ApiTimeoutError("timeout while talking to the server") from e
except (aiohttp.ClientError, socket.gaierror) as e:
| """Ollama API Client."""
from __future__ import annotations
class OllamaApiClient:
"""Ollama API Client."""
def __init__(
self,
base_url: str,
session: aiohttp.ClientSession,
) -> None:
"""Sample API Client."""
self._base_url = base_url.rstrip("/")
self._session = session
async def async_get_heartbeat(self) -> bool:
"""Get heartbeat from the API."""
response: str = await self._api_wrapper(
method="get", url=self._base_url, decode_json=False
)
return response.strip() == "Ollama is running"
async def async_get_models(self) -> any:
"""Get models from the API."""
return await self._api_wrapper(
method="get",
url=f"{self._base_url}/api/tags",
headers={"Content-type": "application/json; charset=UTF-8"},
)
async def async_generate(self, data: dict | None = None,) -> any:
"""Generate a completion from the API."""
return await self._api_wrapper(
method="post",
url=f"{self._base_url}/api/generate",
data=data,
headers={"Content-type": "application/json; charset=UTF-8"},
)
async def _api_wrapper(
self,
method: str,
url: str,
data: dict | None = None,
headers: dict | None = None,
decode_json: bool = True,
) -> any:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT):
response = await self._session.request(
method=method,
url=url,
headers=headers,
json=data,
)
if response.status == 404 and decode_json:
json = await response.json()
raise ApiJsonError(json["error"])
response.raise_for_status()
if decode_json:
return await response.json()
return await response.text()
except ApiJsonError as e:
raise e
except asyncio.TimeoutError as e:
raise ApiTimeoutError("timeout while talking to the server") from e
except (aiohttp.ClientError, socket.gaierror) as e: | raise ApiCommError("unknown error while talking to the server") from e | 2 | 2023-11-03 14:48:45+00:00 | 2k |
Zaczero/openstreetmap-ng | src/repositories/message_repository.py | [
{
"identifier": "DB",
"path": "src/db.py",
"snippet": "DB = async_sessionmaker(\n DB_ENGINE,\n expire_on_commit=False,\n)"
},
{
"identifier": "Message",
"path": "src/models/db/message.py",
"snippet": "class Message(Base.Sequential, CreatedAtMixin, RichTextMixin):\n __tablename__ = 'message'\n __rich_text_fields__ = (('body', TextFormat.markdown),)\n\n from_user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)\n from_user: Mapped[User] = relationship(foreign_keys=[from_user_id], lazy='raise')\n to_user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)\n to_user: Mapped[User] = relationship(foreign_keys=[to_user_id], lazy='raise')\n subject: Mapped[str] = mapped_column(UnicodeText, nullable=False)\n body: Mapped[str] = mapped_column(UnicodeText, nullable=False)\n body_rich_hash: Mapped[bytes | None] = mapped_column(LargeBinary(HASH_SIZE), nullable=True, default=None)\n body_rich: Mapped[CacheEntry | None] = relationship(\n CacheEntry,\n primaryjoin=CacheEntry.id == body_rich_hash,\n viewonly=True,\n default=None,\n lazy='raise',\n )\n\n # defaults\n is_read: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)\n from_hidden: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)\n to_hidden: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)\n\n @validates('body')\n def validate_body(self, _: str, value: str) -> str:\n if len(value) > MESSAGE_BODY_MAX_LENGTH:\n raise ValueError('Message is too long')\n return value\n\n @classmethod\n def from_email(cls, mail: EmailMessage, from_user_id: int, to_user_id: int) -> Self:\n \"\"\"\n Create a message instance from an email message.\n \"\"\"\n\n subject = mail.get('Subject')\n\n if not subject:\n raise ValueError('Message has no subject')\n\n def get_body(part: EmailMessage) -> str | None:\n content_type = part.get_content_type()\n if content_type == 'text/plain':\n payload: str = part.get_payload(decode=True).decode()\n return payload.strip()\n elif content_type == 'text/html':\n payload: str = part.get_payload(decode=True).decode()\n return BeautifulSoup(payload, 'html.parser').get_text(separator=' ').strip()\n else:\n return None\n\n if mail.is_multipart():\n body = None\n for part in mail.iter_parts():\n if body := get_body(part):\n break\n else:\n body = get_body(mail)\n\n if not body:\n raise ValueError('Message has no body')\n\n return cls(\n from_user_id=from_user_id,\n to_user_id=to_user_id,\n subject=subject,\n body=body, # TODO: body check etc.\n )"
}
] | from sqlalchemy import false, func, select
from src.db import DB
from src.models.db.message import Message | 808 |
class MessageRepository:
@staticmethod
async def count_received_by_user_id(user_id: int) -> tuple[int, int]:
"""
Count received messages by user id.
Returns a tuple of (total, unread).
"""
|
class MessageRepository:
@staticmethod
async def count_received_by_user_id(user_id: int) -> tuple[int, int]:
"""
Count received messages by user id.
Returns a tuple of (total, unread).
"""
| async with DB() as session: | 0 | 2023-11-04 01:12:13+00:00 | 2k |
codefuse-ai/Collinear-Constrained-Attention | data/multi_task_dataset.py | [
{
"identifier": "print_rank_0",
"path": "utils/common_utils.py",
"snippet": "def print_rank_0(*message):\n \"\"\"If distributed is initialized print only on rank 0.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(*message, flush=True)\n else:\n print(*message, flush=True)"
},
{
"identifier": "TASK2ID",
"path": "utils/common_utils.py",
"snippet": "TASK2ID = {}"
},
{
"identifier": "ID2TASK",
"path": "utils/common_utils.py",
"snippet": "ID2TASK = {}"
},
{
"identifier": "get_local_rank",
"path": "utils/common_utils.py",
"snippet": "def get_local_rank():\n return atorch.local_rank()"
}
] | import os
import math
import json
import random
import time
import numpy as np
import torch
from functools import partial
from utils.common_utils import print_rank_0, TASK2ID, ID2TASK, get_local_rank
from data import helpers | 864 |
class SingleTaskDataset(torch.utils.data.Dataset):
def __init__(
self,
name,
data_prefix,
input_dataset,
# loss_mask_dataset,
# num_samples,
seq_length,
weighted_loss_mode=None,
ds_weight=1.0,
):
self.name = name
self.input_dataset = input_dataset
self.num_samples = len(self.input_dataset['input_ids'])
# self.loss_mask_dataset = loss_mask_dataset
self.seq_length = seq_length
self.weighted_loss_mode = weighted_loss_mode
self.ds_weight = ds_weight
self.task_name = data_prefix.split('/')[-1]
self.task_id = TASK2ID[self.task_name]
# Checks
def update_ds_weight(self, weight):
self.ds_weight = weight
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
try:
# Get the shuffled index.
idx = idx % self.num_samples
idx_data = {key: self.input_dataset[key][idx]
for key in self.input_dataset}
if self.weighted_loss_mode:
idx_data["weight"] = np.array([self.ds_weight], dtype=np.float32)
idx_data["task_id"] = np.array([self.task_id], dtype=np.int)
return idx_data
else:
idx_data["task_id"] = np.array([self.task_id], dtype=np.int)
return idx_data
except IndexError:
new_idx = idx % len(self)
print(
f"WARNING: Got index out of bounds error with index {idx} - taking modulo of index instead ({new_idx})"
)
return self[new_idx]
class MultiTaskDataset(torch.utils.data.Dataset):
def __init__(self,
args,
dataset_type,
data_paths,
tokenizer,
max_input_length=550,
max_output_length=550,
max_length=1024,
no_append_glm_mask=False,
gpt_data=False,
world_size=1,
global_rank=0,
left_truncate=False,
shard_data=False,
**kwargs):
super().__init__()
self.args = args
self.dataset_type = dataset_type
self.mode = args.tokenize_mode
# self.seq_length = max_length
# self.max_seq_length = args.seq_length + 1
self.data_paths = data_paths
self.tokenizer = tokenizer
self.gpt_data = gpt_data
# if self.gpt_data:
# self.tokenizer.sop_token = "<|endoftext|>"
# self.tokenizer.eop_token = "<|endoftext|>"
# self.tokenizer.pad_token = "<|extratoken_1|>"
self.sop_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.sop_token)
# self.eop_id = self.tokenizer.convert_tokens_to_ids(
# self.tokenizer.eop_token)
self.eop_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.eos_token)
self.pad_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.pad_token)
|
class SingleTaskDataset(torch.utils.data.Dataset):
def __init__(
self,
name,
data_prefix,
input_dataset,
# loss_mask_dataset,
# num_samples,
seq_length,
weighted_loss_mode=None,
ds_weight=1.0,
):
self.name = name
self.input_dataset = input_dataset
self.num_samples = len(self.input_dataset['input_ids'])
# self.loss_mask_dataset = loss_mask_dataset
self.seq_length = seq_length
self.weighted_loss_mode = weighted_loss_mode
self.ds_weight = ds_weight
self.task_name = data_prefix.split('/')[-1]
self.task_id = TASK2ID[self.task_name]
# Checks
def update_ds_weight(self, weight):
self.ds_weight = weight
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
try:
# Get the shuffled index.
idx = idx % self.num_samples
idx_data = {key: self.input_dataset[key][idx]
for key in self.input_dataset}
if self.weighted_loss_mode:
idx_data["weight"] = np.array([self.ds_weight], dtype=np.float32)
idx_data["task_id"] = np.array([self.task_id], dtype=np.int)
return idx_data
else:
idx_data["task_id"] = np.array([self.task_id], dtype=np.int)
return idx_data
except IndexError:
new_idx = idx % len(self)
print(
f"WARNING: Got index out of bounds error with index {idx} - taking modulo of index instead ({new_idx})"
)
return self[new_idx]
class MultiTaskDataset(torch.utils.data.Dataset):
def __init__(self,
args,
dataset_type,
data_paths,
tokenizer,
max_input_length=550,
max_output_length=550,
max_length=1024,
no_append_glm_mask=False,
gpt_data=False,
world_size=1,
global_rank=0,
left_truncate=False,
shard_data=False,
**kwargs):
super().__init__()
self.args = args
self.dataset_type = dataset_type
self.mode = args.tokenize_mode
# self.seq_length = max_length
# self.max_seq_length = args.seq_length + 1
self.data_paths = data_paths
self.tokenizer = tokenizer
self.gpt_data = gpt_data
# if self.gpt_data:
# self.tokenizer.sop_token = "<|endoftext|>"
# self.tokenizer.eop_token = "<|endoftext|>"
# self.tokenizer.pad_token = "<|extratoken_1|>"
self.sop_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.sop_token)
# self.eop_id = self.tokenizer.convert_tokens_to_ids(
# self.tokenizer.eop_token)
self.eop_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.eos_token)
self.pad_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.pad_token) | print_rank_0(f'self.tokenizer.sop_token {self.tokenizer.sop_token} id: {self.sop_id}') | 0 | 2023-11-02 01:37:01+00:00 | 2k |
rezaakb/pinns-tf2 | pinnstf2/models/pinn_module.py | [
{
"identifier": "gradient",
"path": "pinnstf2/utils/gradient.py",
"snippet": "def gradient(dy, dx, grad_ys=None):\n if grad_ys is None:\n dy_dx = tf.gradients(dy, dx)\n else:\n dy_dx = tf.gradients(dy, dx, grad_ys=grad_ys)\n if len(dy_dx)==1:\n dy_dx = dy_dx[0]\n return dy_dx"
},
{
"identifier": "fwd_gradient",
"path": "pinnstf2/utils/gradient.py",
"snippet": "def fwd_gradient(dy, dx):\n dummy = tf.ones_like(dy)\n G = tf.gradients(dy, dx, grad_ys=dummy)[0]\n Y_x = tf.gradients(G, dummy)[0]\n return Y_x"
},
{
"identifier": "fix_extra_variables",
"path": "pinnstf2/utils/module_fn.py",
"snippet": "def fix_extra_variables(trainable_variables, extra_variables, dtype):\n \"\"\"Convert extra variables to tf tensors with gradient tracking. These variables are\n trainables in inverse problems.\n\n :param extra_variables: Dictionary of extra variables to be converted.\n :return: Dictionary of converted extra variables as tf tensors with gradients.\n \"\"\"\n \n if extra_variables is None:\n return trainable_variables, None\n extra_variables_dict = {}\n for key in extra_variables:\n variable = tf.Variable(extra_variables[key], dtype=tf.float32, trainable=True)\n extra_variables_dict[key] = variable\n trainable_variables.append(variable)\n return trainable_variables, extra_variables_dict"
},
{
"identifier": "sse",
"path": "pinnstf2/utils/module_fn.py",
"snippet": "def sse(loss: tf.Tensor,\n preds: Dict[str, tf.Tensor],\n target: Union[Dict[str, tf.Tensor], None] = None,\n keys: Union[List[str], None] = None,\n mid: Union[int, None] = None) -> tf.Tensor:\n \"\"\"Calculate the sum of squared errors (SSE) loss for given predictions and optional targets.\n\n :param loss: Loss variable.\n :param preds: Dictionary containing prediction tensors for different keys.\n :param target: Dictionary containing target tensors (optional).\n :param keys: List of keys for which to calculate SSE loss (optional).\n :param mid: Index to separate predictions for mid-point calculation (optional).\n :return: Calculated SSE loss.\n \"\"\"\n \n if keys is None:\n return loss\n\n for key in keys:\n if target is None and mid is None:\n loss = loss + tf.reduce_sum(tf.square(preds[key]))\n elif target is None and mid is not None:\n loss = loss + tf.reduce_sum(tf.square(preds[key][:mid] - preds[key][mid:]))\n elif target is not None:\n loss = loss + tf.reduce_sum(tf.square(preds[key] - target[key]))\n\n return loss"
},
{
"identifier": "mse",
"path": "pinnstf2/utils/module_fn.py",
"snippet": "def mse(loss: tf.Tensor,\n preds: Dict[str, tf.Tensor],\n target: Union[Dict[str, tf.Tensor], None] = None,\n keys: Union[List[str], None] = None,\n mid: Union[int, None] = None) -> tf.Tensor:\n \"\"\"Calculate the mean squared error (MSE) loss for given predictions and optional targets.\n\n :param loss: Loss variable.\n :param preds: Dictionary containing prediction tensors for different keys.\n :param target: Dictionary containing target tensors (optional).\n :param keys: List of keys for which to calculate SSE loss (optional).\n :param mid: Index to separate predictions for mid-point calculation (optional).\n :return: Calculated MSE loss.\n \"\"\"\n \n if keys is None:\n return loss\n\n for key in keys:\n if target is None:\n loss = loss + tf.reduce_mean(tf.square(preds[key]))\n elif target is None and mid is not None:\n loss = loss + tf.reduce_mean(tf.square(preds[key][:mid] - preds[key][mid:]))\n elif target is not None:\n loss = loss + tf.reduce_mean(tf.square(preds[key] - target[key]))\n\n return loss"
},
{
"identifier": "relative_l2_error",
"path": "pinnstf2/utils/module_fn.py",
"snippet": "def relative_l2_error(preds, target):\n \"\"\"Calculate the relative L2 error between predictions and target tensors.\n\n :param preds: Predicted tensors.\n :param target: Target tensors.\n :return: Relative L2 error value.\n \"\"\"\n \n #return tf.sqrt(tf.reduce_mean(tf.square(preds - target))/tf.reduce_mean(tf.square(target)))\n return tf.sqrt(tf.reduce_mean(tf.square(preds - target))/tf.reduce_mean(tf.square(target - tf.reduce_mean(target))))"
}
] | from typing import List, Dict, Callable, Any, Tuple, Union
from pinnstf2.utils import fwd_gradient, gradient
from pinnstf2.utils import (
fix_extra_variables,
mse,
relative_l2_error,
sse
)
import tensorflow as tf
import sys, os, logging, time | 1,554 |
class PINNModule:
def __init__(
self,
net,
pde_fn: Callable[[Any, ...], tf.Tensor],
optimizer: tf.keras.optimizers.Adam = tf.keras.optimizers.Adam,
loss_fn: str = "sse",
extra_variables: Dict[str, Any] = None,
output_fn: Callable[[Any, ...], tf.Tensor] = None,
runge_kutta=None,
jit_compile: bool = True,
amp: bool = False,
dtype: str = 'float32'
) -> None:
"""
Initialize a `PINNModule`.
:param net: The neural network model to be used for approximating solutions.
:param pde_fn: The partial differential equation (PDE) function defining the PDE to solve.
:param optimizer: The optimizer used for training the neural network.
:param loss_fn: The name of the loss function to be used. Default is 'sse' (sum of squared errors).
:param extra_variables: Additional variables used in the model, provided as a dictionary. Default is None.
:param output_fn: A function applied to the output of the network, for post-processing or transformations.
:param runge_kutta: An optional Runge-Kutta method implementation for solving discrete problems. Default is None.
:param jit_compile: If True, TensorFlow's JIT compiler will be used for optimizing computations. Default is True.
:param amp: Automatic mixed precision (amp) for optimizing training performance. Default is False.
:param dtype: Data type to be used for the computations. Default is 'float32'.
"""
super().__init__()
self.net = net
self.tf_dtype = tf.as_dtype(dtype)
if hasattr(self.net, 'model'):
self.trainable_variables = self.net.model.trainable_variables
else:
self.trainable_variables = self.net.trainable_variables
(self.trainable_variables,
|
class PINNModule:
def __init__(
self,
net,
pde_fn: Callable[[Any, ...], tf.Tensor],
optimizer: tf.keras.optimizers.Adam = tf.keras.optimizers.Adam,
loss_fn: str = "sse",
extra_variables: Dict[str, Any] = None,
output_fn: Callable[[Any, ...], tf.Tensor] = None,
runge_kutta=None,
jit_compile: bool = True,
amp: bool = False,
dtype: str = 'float32'
) -> None:
"""
Initialize a `PINNModule`.
:param net: The neural network model to be used for approximating solutions.
:param pde_fn: The partial differential equation (PDE) function defining the PDE to solve.
:param optimizer: The optimizer used for training the neural network.
:param loss_fn: The name of the loss function to be used. Default is 'sse' (sum of squared errors).
:param extra_variables: Additional variables used in the model, provided as a dictionary. Default is None.
:param output_fn: A function applied to the output of the network, for post-processing or transformations.
:param runge_kutta: An optional Runge-Kutta method implementation for solving discrete problems. Default is None.
:param jit_compile: If True, TensorFlow's JIT compiler will be used for optimizing computations. Default is True.
:param amp: Automatic mixed precision (amp) for optimizing training performance. Default is False.
:param dtype: Data type to be used for the computations. Default is 'float32'.
"""
super().__init__()
self.net = net
self.tf_dtype = tf.as_dtype(dtype)
if hasattr(self.net, 'model'):
self.trainable_variables = self.net.model.trainable_variables
else:
self.trainable_variables = self.net.trainable_variables
(self.trainable_variables, | self.extra_variables) = fix_extra_variables(self.trainable_variables, extra_variables, self.tf_dtype) | 2 | 2023-11-01 03:25:51+00:00 | 2k |
djinni-co/djinni-inbox-test | app/sandbox/views.py | [
{
"identifier": "Recruiter",
"path": "app/sandbox/models.py",
"snippet": "class Recruiter(models.Model):\n USERTYPE = \"recruiter\"\n\n name = models.CharField(max_length=255, blank=True, default='')\n email = models.EmailField(blank=False, db_index=True, unique=True)\n picture_url = models.CharField(max_length=255, blank=True, default='', null=True)\n\n # Profile settings\n lang = models.CharField(max_length=10, blank=True, default='EN')\n\n # Meta fields\n last_updated = models.DateTimeField(blank=True, null=True)\n last_seen = models.DateTimeField(blank=True, null=True, db_index=True)\n signup_date = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "MessageThread",
"path": "app/sandbox/models.py",
"snippet": "class MessageThread(models.Model):\n class MatchReason(models.TextChoices):\n MATCHED = \"matched_v1\"\n RECRUITER_POKED = \"recruiter_poked\"\n RECRUITER_SHORTLISTED = \"recruiter_shortlisted\"\n RECRUITER_ANSWERED = \"recruiter_answered\"\n RECRUITER_ARCHIVED = \"recruiter_archived\"\n NO_RELATED_JOB = \"no_related_job\"\n ACCEPTED = \"accepted\"\n DECLINED = \"declined\"\n\n is_anonymous = models.BooleanField(default=True)\n iou_bonus = models.IntegerField(blank=True, default=0)\n last_sender = models.CharField(\n max_length=40, blank=False, choices=Message.Sender.choices\n )\n first_message = models.CharField(max_length=16, blank=False, choices=Action.choices)\n bucket = models.CharField(max_length=40, default=Bucket.INBOX, db_index=True)\n\n candidate_archived = models.BooleanField(blank=True, default=False)\n candidate_favorite = models.BooleanField(blank=True, null=True, db_index=True)\n feedback_candidate = models.CharField(max_length=20, blank=True, default=\"\")\n\n recruiter_favorite = models.BooleanField(blank=True, null=True, db_index=True)\n feedback_recruiter = models.CharField(max_length=20, blank=True, default=\"\")\n notified_notinterested = models.DateTimeField(blank=True, null=True, db_index=True)\n\n job = models.ForeignKey(\"JobPosting\", on_delete=models.SET_NULL, null=True, blank=True)\n candidate = models.ForeignKey(\"Candidate\", on_delete=models.CASCADE)\n recruiter = models.ForeignKey(\"Recruiter\", on_delete=models.CASCADE)\n\n last_updated = models.DateTimeField(blank=False, db_index=True)\n last_seen_recruiter = models.DateTimeField(null=True)\n last_seen_candidate = models.DateTimeField(null=True)\n created = models.DateTimeField(auto_now_add=True)\n\n @property\n def last_message(self):\n return self.message_set.last()\n\n class Meta:\n ordering = (\"-last_updated\",)\n unique_together = (Message.Sender.CANDIDATE, Message.Sender.RECRUITER)"
}
] | from django.http import HttpResponse
from django.db.models import Count, Q
from django.shortcuts import render
from .models import Recruiter, MessageThread | 736 |
# Hardcode for logged in as recruiter
RECRUITER_ID = 125528
def inbox(request):
recruiter = Recruiter.objects.get(id = RECRUITER_ID)
|
# Hardcode for logged in as recruiter
RECRUITER_ID = 125528
def inbox(request):
recruiter = Recruiter.objects.get(id = RECRUITER_ID) | threads = MessageThread.objects.filter(recruiter = recruiter).select_related('candidate', 'job') | 1 | 2023-11-02 15:12:54+00:00 | 2k |
XinyuanWangCS/PromptAgent | src/prompt_optim_agent/world_model/beam_world_model.py | [
{
"identifier": "eval_instruction_with_loader",
"path": "src/prompt_optim_agent/test_helper.py",
"snippet": "def eval_instruction_with_loader(task, eval_prompt, dataloader, model='gpt-3.5-turbo', temperature=0, record_outputs=True):\n '''\n evaluate cur_prompt on task testing dataset\n '''\n \n build_forward_prompts_func = task.build_forward_prompts_completion\n if model in COMPLETION_MODELS:\n batch_forward_func = batch_forward_completion\n elif model in CHAT_COMPLETION_MODELS:\n batch_forward_func = batch_forward_chatcompletion\n elif model in PALM_MODELS:\n batch_forward_func = batch_forward_chatcompletion_palm\n else:\n raise ValueError(f\"Model {model} not supported.\")\n \n all_questions = []\n all_labels = []\n all_preds = []\n all_prompts = []\n all_responses = []\n eval_output = {}\n \n pbar = tqdm(dataloader, leave=False)\n for batch in pbar:\n batch_prompts = build_forward_prompts_func(batch['question'], eval_prompt)\n responses = batch_forward_func(batch_prompts, model=model, temperature=temperature)\n preds = task.batch_clean_responses(responses)\n labels = task.clean_labels(batch['answer'])\n all_preds.extend(preds)\n all_labels.extend(labels)\n all_questions.extend(batch['question'])\n if record_outputs:\n all_prompts.extend(batch_prompts)\n all_responses.extend(responses)\n metric = task.cal_metric(all_preds, all_labels, all_questions)\n if not isinstance(metric, tuple):\n pbar.set_postfix_str(f\"Test Metric: {metric:.4f}\")\n else:\n pbar.set_postfix_str(f\"Test Metrics: {metric}\")\n \n if record_outputs:\n eval_output['model_inputs'] = all_prompts\n eval_output['model_responses'] = all_responses\n eval_output['preds'] = all_preds\n eval_output['labels'] = all_labels\n eval_output['correct'] = task.cal_correct(all_preds, all_labels) \n metric = task.cal_metric(all_preds, all_labels, all_questions)\n return metric, eval_output"
},
{
"identifier": "State",
"path": "src/prompt_optim_agent/search_algo/base_algo.py",
"snippet": "class SearchAlgo(ABC):\n def __init__(self, \n task,\n world_model, \n action_agent,\n logger=None, \n seed=0, \n print_log=True,\n test_every_step=True,\n depth_limit = None,\n ) -> None:\n def search(self):\n def get_states(self):\n def process_all_correct_batch(self):"
},
{
"identifier": "BeamNode",
"path": "src/prompt_optim_agent/search_algo/beam_search.py",
"snippet": "class BeamNode(Generic[State, Action]):\n id_iter = itertools.count()\n\n @classmethod\n def reset_id(cls):\n cls.id_iter = itertools.count()\n\n def __init__(self, \n prompt: str, \n action: str = None,\n parent: \"Optional[BeamNode]\" = None,\n ):\n\n self.id = next(BeamNode.id_iter)\n self.prompt = prompt\n self.test_metric = -1.0\n self.eval_metric = 0. \n self.action = action\n self.parent = parent\n self.children: 'Optional[list[BeamNode]]' = []\n\n if parent is None:\n self.depth = 0\n else:\n self.depth = parent.depth + 1\n \n def to_dict(self):\n if self.parent is None:\n p_id = -1\n else:\n p_id = self.parent.id\n \n return {\n 'id': self.id,\n 'depth':self.depth,\n 'parent':p_id,\n 'eval_metric': self.eval_metric,\n 'test_metric': self.test_metric,\n 'prompt':self.prompt,\n }"
},
{
"identifier": "gpt_chat_completion",
"path": "src/prompt_optim_agent/utils.py",
"snippet": "def gpt_chat_completion(**kwargs):\n backoff_time = 1\n while True:\n try:\n return openai.ChatCompletion.create(**kwargs)\n except openai.error.OpenAIError:\n print(openai.error.OpenAIError, f' Sleeping {backoff_time} seconds...')\n time.sleep(backoff_time)\n backoff_time *= 1.5"
}
] | from .gradient_descent import *
from typing import NamedTuple
from ..test_helper import eval_instruction_with_loader
from typing import Generic
from ..search_algo.base_algo import State, Action
from ..search_algo.beam_search import BeamNode
from ..utils import gpt_chat_completion | 1,462 |
class BeamSearchWorldModel(Generic[State, Action]):
def __init__(
self,
task,
logger,
# model
pred_model: str,
optim_model: str,
pred_temperature: float,
optim_temperature: float,
prompt_length_limit:int,
num_new_prompts = 3,
train_shuffle = True,
train_batch_size: int = 5,
test_batch_size: int = 1,
eval_batch_size: int = 1,
**kwargs
) -> None:
self.task = task
self.logger = logger
self.pred_model = pred_model
self.optim_model = optim_model
self.pred_temperature=pred_temperature
self.optim_temperature = optim_temperature
self.train_dataloader = self.task.get_dataloader('train',
batch_size=train_batch_size,
shuffle=train_shuffle)
self.train_data_iterator = self._infinite_data_loader(self.train_dataloader)
self.test_dataloader = self.task.get_dataloader('test',
batch_size=test_batch_size,
shuffle=False)
self.eval_dataloader = self.task.get_dataloader('eval',
batch_size=eval_batch_size,
shuffle=False)
self.gradient_descent = GradientDescent(task=self.task,
logger=self.logger,
pred_model=pred_model,
optim_model=optim_model,
num_new_prompts = num_new_prompts,
forward_temperature=pred_temperature,
optim_temperature = optim_temperature,
prompt_length_limit=prompt_length_limit)
def _infinite_data_loader(self, data_loader):
while True:
for batch in data_loader:
yield batch
def get_train_batch(self):
return next(self.train_data_iterator)
|
class BeamSearchWorldModel(Generic[State, Action]):
def __init__(
self,
task,
logger,
# model
pred_model: str,
optim_model: str,
pred_temperature: float,
optim_temperature: float,
prompt_length_limit:int,
num_new_prompts = 3,
train_shuffle = True,
train_batch_size: int = 5,
test_batch_size: int = 1,
eval_batch_size: int = 1,
**kwargs
) -> None:
self.task = task
self.logger = logger
self.pred_model = pred_model
self.optim_model = optim_model
self.pred_temperature=pred_temperature
self.optim_temperature = optim_temperature
self.train_dataloader = self.task.get_dataloader('train',
batch_size=train_batch_size,
shuffle=train_shuffle)
self.train_data_iterator = self._infinite_data_loader(self.train_dataloader)
self.test_dataloader = self.task.get_dataloader('test',
batch_size=test_batch_size,
shuffle=False)
self.eval_dataloader = self.task.get_dataloader('eval',
batch_size=eval_batch_size,
shuffle=False)
self.gradient_descent = GradientDescent(task=self.task,
logger=self.logger,
pred_model=pred_model,
optim_model=optim_model,
num_new_prompts = num_new_prompts,
forward_temperature=pred_temperature,
optim_temperature = optim_temperature,
prompt_length_limit=prompt_length_limit)
def _infinite_data_loader(self, data_loader):
while True:
for batch in data_loader:
yield batch
def get_train_batch(self):
return next(self.train_data_iterator)
| def _get_trajectory_prompts(self, node: BeamNode): | 2 | 2023-11-03 19:14:00+00:00 | 2k |
evaluable-ai/auto-eval | evaluableai/models/candidate_models/null_model.py | [
{
"identifier": "InputRow",
"path": "evaluableai/data_model/input_row_object.py",
"snippet": "class InputRow:\n def __init__(self, input_text, context, input_id=None):\n self._input_id = input_id if input_id is not None else uuid.uuid4()\n self._input_text = input_text\n self._context = context\n\n def __repr__(self):\n return (f\"InputObject(input_text={repr(self._input_text)}, \"\n f\"context={repr(self._context)}, input_id={repr(self._input_id)})\")\n\n @property\n def input_id(self):\n return self._input_id\n\n @input_id.setter\n def input_id(self, value):\n raise ValueError(\"input_id cannot be changed once set.\")\n\n @property\n def input_text(self):\n return self._input_text\n\n @input_text.setter\n def input_text(self, value):\n self._input_text = value\n\n @property\n def context(self):\n return self._context\n\n @context.setter\n def context(self, value):\n self._context = value\n\n @classmethod\n def from_csv(cls, csv_file_path, text_column, context_column, id_column=None):\n df = pd.read_csv(csv_file_path)\n return cls.from_dataframe(df, text_column, context_column, id_column)\n\n @classmethod\n def from_dataframe(cls, dataframe, text_column, context_column, id_column=None):\n input_objects = []\n for index, row in dataframe.iterrows():\n # Use the id_column if it's provided and not null, otherwise generate a new UUID\n input_id = row[id_column] if id_column and not pd.isnull(row[id_column]) else None\n input_object = cls(input_text=row[text_column], context=row[context_column], input_id=input_id)\n input_objects.append(input_object)\n return input_objects\n\n def __str__(self):\n # Convert the dictionary to a JSON string\n return self.to_dict()\n\n def to_dict(self):\n \"\"\"Converts the object properties to a dictionary.\"\"\"\n return {\n 'input_id': str(self._input_id), # Convert UUID to string\n 'input_text': self._input_text,\n 'context': self._context\n }"
},
{
"identifier": "ModelResponseObject",
"path": "evaluableai/data_model/model_response_object.py",
"snippet": "class ModelResponseObject:\n def __init__(self, response_id, response_text, input_row, model):\n self._response_id = response_id\n self._response_text = response_text\n self._input_row = input_row\n self._model = model\n\n @property\n def response_text(self):\n return self._response_text\n\n def get_candidate_model_name(self):\n return self._model.model_name\n\n def get_candidate_model_version(self):\n return self._model.model_version\n\n def get_input_text(self):\n return self._input_row.input_text\n\n def get_input_context(self):\n return self._input_row.context\n\n def get_input_id(self):\n return self._input_row.input_id\n\n def __repr__(self):\n return (f\"ModelResponseObject(response_id={repr(self._response_id)}, \"\n f\"response_text={repr(self._response_text)}, \"\n f\"input_object={repr(self._input_row)}, \"\n f\"model={repr(self._model)})\")\n\n def __str__(self):\n return self.to_dict()\n\n def to_dict(self):\n \"\"\"Converts the object properties to a dictionary.\"\"\"\n return {\n 'response_id': str(self._response_id),\n 'response_text': self._response_text,\n 'input_row': self._input_row.to_dict(),\n 'model_name': self.get_candidate_model_name(),\n 'model_version': self.get_candidate_model_version()\n }"
}
] | import json
import logging
import uuid
from evaluableai.data_model.input_row_object import InputRow
from evaluableai.data_model.model_response_object import ModelResponseObject | 1,108 |
# Make sure to import InputRow if it's a separate class
class NullModel:
def __init__(self, model_name, model_version):
self._model_name = model_name
self._model_version = model_version
@property
def model_name(self):
return self._model_name
@property
def model_version(self):
return self._model_version
@classmethod
def from_json_array(cls, json_array):
data_array = [cls.parse_json(item) for item in json_array]
response_objects = []
for data in data_array:
input_text = data.get('input', '')
context = data.get('context', '')
|
# Make sure to import InputRow if it's a separate class
class NullModel:
def __init__(self, model_name, model_version):
self._model_name = model_name
self._model_version = model_version
@property
def model_name(self):
return self._model_name
@property
def model_version(self):
return self._model_version
@classmethod
def from_json_array(cls, json_array):
data_array = [cls.parse_json(item) for item in json_array]
response_objects = []
for data in data_array:
input_text = data.get('input', '')
context = data.get('context', '') | input_row = InputRow(input_text, context) # Assuming InputRow is imported | 0 | 2023-11-06 01:26:17+00:00 | 2k |
allenai/wimbd | wimbd/contamination/promptsource_parse.py | [
{
"identifier": "INCLUDED_USERS",
"path": "wimbd/contamination/templates.py",
"snippet": "INCLUDED_USERS = {\"Zaid\", \"craffel\"}"
},
{
"identifier": "TemplateCollection",
"path": "wimbd/contamination/templates.py",
"snippet": "class TemplateCollection:\n \"\"\"\n This helper class wraps the DatasetTemplates class\n - Initialized the DatasetTemplates for all existing template folder\n - Give access to each DatasetTemplates\n - Provides aggregated counts over all DatasetTemplates\n \"\"\"\n\n def __init__(self):\n\n # Dict of all the DatasetTemplates, key is the tuple (dataset_name, subset_name)\n self.datasets_templates: Dict[(str, Optional[str]), DatasetTemplates] = self._collect_datasets()\n\n @property\n def keys(self):\n return list(self.datasets_templates.keys())\n\n def __len__(self) -> int:\n return len(self.datasets_templates)\n\n def remove(self, dataset_name: str, subset_name: Optional[str] = None) -> None:\n del self.datasets_templates[dataset_name, subset_name]\n\n def _collect_datasets(self) -> Dict[Tuple[str, str], \"DatasetTemplates\"]:\n \"\"\"\n Initialize a DatasetTemplates object for each templates.yaml detected in the templates folder\n\n Returns: a dict with key=(dataset_name, subset_name)\n \"\"\"\n dataset_folders = os.listdir(TEMPLATES_FOLDER_PATH)\n dataset_folders = [folder for folder in dataset_folders if not folder.startswith(\".\")]\n\n output = {} # format is {(dataset_name, subset_name): DatasetsTemplates}\n for dataset in dataset_folders:\n if dataset in INCLUDED_USERS:\n for filename in os.listdir(os.path.join(TEMPLATES_FOLDER_PATH, dataset)):\n output = {**output, **self._collect_dataset(dataset + \"/\" + filename)}\n else:\n output = {**output, **self._collect_dataset(dataset)}\n return output\n\n def _collect_dataset(self, dataset):\n output = {} # format is {(dataset_name, subset_name): DatasetsTemplates}\n for filename in os.listdir(os.path.join(TEMPLATES_FOLDER_PATH, dataset)):\n if filename.endswith(\".yaml\"):\n # If there is no sub-folder, there is no subset for this dataset\n output[(dataset, None)] = DatasetTemplates(dataset)\n else:\n # This is a subfolder, and its name corresponds to the subset name\n output[(dataset, filename)] = DatasetTemplates(dataset_name=dataset, subset_name=filename)\n return output\n\n def get_dataset(self, dataset_name: str, subset_name: Optional[str] = None) -> \"DatasetTemplates\":\n \"\"\"\n Return the DatasetTemplates object corresponding to the dataset name\n\n :param dataset_name: name of the dataset to get\n :param subset_name: name of the subset\n \"\"\"\n # if the dataset does not exist, we add it\n if dataset_name not in self.keys:\n self.datasets_templates[(dataset_name, subset_name)] = DatasetTemplates(dataset_name, subset_name)\n\n return self.datasets_templates[(dataset_name, subset_name)]\n\n def get_templates_count(self) -> Dict:\n \"\"\"\n Return the overall number count over all datasets\n\n NB: we don't breakdown datasets into subsets for the count, i.e subsets count are included\n into the dataset count\n \"\"\"\n\n count_dict = defaultdict(int)\n for k, v in self.datasets_templates.items():\n # Subsets count towards dataset count\n count_dict[k[0]] += len(v)\n # converting to regular dict\n return dict(count_dict)"
},
{
"identifier": "get_dataset",
"path": "wimbd/contamination/utils.py",
"snippet": "def get_dataset(path, conf=None):\n \"Get a dataset from name and conf.\"\n try:\n return datasets.load_dataset(path, conf)\n except datasets.builder.ManualDownloadError:\n cache_root_dir = (\n os.environ[\"PROMPTSOURCE_MANUAL_DATASET_DIR\"]\n if \"PROMPTSOURCE_MANUAL_DATASET_DIR\" in os.environ\n else \"DEFAULT_PROMPTSOURCE_CACHE_HOME\"\n )\n data_dir = f\"{cache_root_dir}/{path}\" if conf is None else f\"{cache_root_dir}/{path}/{conf}\"\n try:\n return datasets.load_dataset(\n path,\n conf,\n data_dir=data_dir,\n )\n except Exception as err:\n raise err\n except Exception as err:\n raise err"
}
] | import argparse
import csv
import re
from glob import glob
from wimbd.contamination.templates import INCLUDED_USERS, TemplateCollection
from wimbd.contamination.utils import get_dataset | 1,194 |
def main():
parse = argparse.ArgumentParser("")
parse.add_argument("--path", type=str)
parse.add_argument("--out_file", type=str)
args = parse.parse_args()
datasets = []
for path in glob(args.path + '/**/templates.yaml', recursive=True):
datasets.append(path)
with open(args.out_file, 'w', newline='') as f:
writer = csv.writer(f, delimiter='\t', lineterminator='\n')
for dataset in datasets:
path = dataset.split('/')
dataset_name = path[8]
subset_name = path[9] if len(path) == 11 else ''
|
def main():
parse = argparse.ArgumentParser("")
parse.add_argument("--path", type=str)
parse.add_argument("--out_file", type=str)
args = parse.parse_args()
datasets = []
for path in glob(args.path + '/**/templates.yaml', recursive=True):
datasets.append(path)
with open(args.out_file, 'w', newline='') as f:
writer = csv.writer(f, delimiter='\t', lineterminator='\n')
for dataset in datasets:
path = dataset.split('/')
dataset_name = path[8]
subset_name = path[9] if len(path) == 11 else ''
| template_collection = TemplateCollection() | 1 | 2023-11-08 18:18:41+00:00 | 2k |
kakaobrain/cxr-clip | cxrclip/data/datasets/imagetext_eval.py | [
{
"identifier": "load_transform",
"path": "cxrclip/data/data_utils.py",
"snippet": "def load_transform(split: str = \"train\", transform_config: Dict = None):\n assert split in {\"train\", \"valid\", \"test\", \"aug\"}\n\n config = []\n if transform_config:\n if split in transform_config:\n config = transform_config[split]\n image_transforms = []\n\n for name in config:\n if hasattr(transforms, name):\n tr_ = getattr(transforms, name)\n else:\n tr_ = getattr(albumentations, name)\n tr = tr_(**config[name])\n image_transforms.append(tr)\n\n return image_transforms"
},
{
"identifier": "transform_image",
"path": "cxrclip/data/data_utils.py",
"snippet": "def transform_image(image_transforms, image: Union[Image.Image, np.ndarray], normalize=\"huggingface\"):\n for tr in image_transforms:\n if isinstance(tr, albumentations.BasicTransform):\n image = np.array(image) if not isinstance(image, np.ndarray) else image\n image = tr(image=image)[\"image\"]\n else:\n image = transforms.ToPILImage()(image) if not isinstance(image, Image.Image) else image\n image = tr(image)\n\n if normalize == \"huggingface\":\n image = transforms.ToTensor()(image)\n image = transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)(image)\n\n elif normalize == \"imagenet\":\n image = transforms.ToTensor()(image)\n image = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image)\n\n else:\n raise KeyError(f\"Not supported Normalize: {normalize}\")\n\n return image"
},
{
"identifier": "CHEXPERT_CLASS_PROMPTS",
"path": "cxrclip/prompt/constants.py",
"snippet": "CHEXPERT_CLASS_PROMPTS = {\n \"Atelectasis\": {\n \"severity\": [\"\", \"mild\", \"minimal\"],\n \"subtype\": [\n \"subsegmental atelectasis\",\n \"linear atelectasis\",\n \"trace atelectasis\",\n \"bibasilar atelectasis\",\n \"retrocardiac atelectasis\",\n \"bandlike atelectasis\",\n \"residual atelectasis\",\n ],\n \"location\": [\n \"at the mid lung zone\",\n \"at the upper lung zone\",\n \"at the right lung zone\",\n \"at the left lung zone\",\n \"at the lung bases\",\n \"at the right lung base\",\n \"at the left lung base\",\n \"at the bilateral lung bases\",\n \"at the left lower lobe\",\n \"at the right lower lobe\",\n ],\n },\n \"Cardiomegaly\": {\n \"severity\": [\"\"],\n \"subtype\": [\n \"cardiac silhouette size is upper limits of normal\",\n \"cardiomegaly which is unchanged\",\n \"mildly prominent cardiac silhouette\",\n \"portable view of the chest demonstrates stable cardiomegaly\",\n \"portable view of the chest demonstrates mild cardiomegaly\",\n \"persistent severe cardiomegaly\",\n \"heart size is borderline enlarged\",\n \"cardiomegaly unchanged\",\n \"heart size is at the upper limits of normal\",\n \"redemonstration of cardiomegaly\",\n \"ap erect chest radiograph demonstrates the heart size is the upper limits of normal\",\n \"cardiac silhouette size is mildly enlarged\",\n \"mildly enlarged cardiac silhouette, likely left ventricular enlargement. other chambers are less prominent\",\n \"heart size remains at mildly enlarged\",\n \"persistent cardiomegaly with prominent upper lobe vessels\",\n ],\n \"location\": [\"\"],\n },\n \"Consolidation\": {\n \"severity\": [\"\", \"increased\", \"improved\", \"appearance of\"],\n \"subtype\": [\n \"bilateral consolidation\",\n \"reticular consolidation\",\n \"retrocardiac consolidation\",\n \"patchy consolidation\",\n \"airspace consolidation\",\n \"partial consolidation\",\n ],\n \"location\": [\n \"at the lower lung zone\",\n \"at the upper lung zone\",\n \"at the left lower lobe\",\n \"at the right lower lobe\",\n \"at the left upper lobe\",\n \"at the right uppper lobe\",\n \"at the right lung base\",\n \"at the left lung base\",\n ],\n },\n \"Edema\": {\n \"severity\": [\"\", \"mild\", \"improvement in\", \"persistent\", \"moderate\", \"decreased\"],\n \"subtype\": [\"pulmonary edema\", \"trace interstitial edema\", \"pulmonary interstitial edema\"],\n \"location\": [\"\"],\n },\n \"Pleural Effusion\": {\n \"severity\": [\"\", \"small\", \"stable\", \"large\", \"decreased\", \"increased\"],\n \"location\": [\"left\", \"right\", \"tiny\"],\n \"subtype\": [\"bilateral pleural effusion\", \"subpulmonic pleural effusion\", \"bilateral pleural effusion\"],\n },\n}"
}
] | import ast
import pandas as pd
from typing import Dict, List
from PIL import Image
from torch.utils.data import default_collate
from torch.utils.data.dataset import Dataset
from cxrclip.data.data_utils import load_transform, transform_image
from cxrclip.prompt.constants import CHEXPERT_CLASS_PROMPTS | 1,417 |
class ImageTextEvalDataset(Dataset):
def __init__(
self,
name: str,
data_path: str,
split: str,
data_frac: float = 1.0,
tokenizer=None,
text_max_length: int = 256,
transform_config: Dict = None,
normalize: str = "huggingface",
**kwargs
):
super().__init__()
self.name = name
self.split = split
self.tokenizer = tokenizer
self.text_max_length = text_max_length
self.data_frac = data_frac
self.normalize = normalize
if self.name == "chexpert5x200":
|
class ImageTextEvalDataset(Dataset):
def __init__(
self,
name: str,
data_path: str,
split: str,
data_frac: float = 1.0,
tokenizer=None,
text_max_length: int = 256,
transform_config: Dict = None,
normalize: str = "huggingface",
**kwargs
):
super().__init__()
self.name = name
self.split = split
self.tokenizer = tokenizer
self.text_max_length = text_max_length
self.data_frac = data_frac
self.normalize = normalize
if self.name == "chexpert5x200": | self.label_list = list(CHEXPERT_CLASS_PROMPTS.keys()) | 2 | 2023-11-01 07:24:52+00:00 | 2k |
mihirp1998/Diffusion-TTA | diff_tta/models/build.py | [
{
"identifier": "get_obj_from_str",
"path": "diff_tta/utils.py",
"snippet": "def get_obj_from_str(string, reload=False):\n \"\"\"A helper function to instantiate a class from a config object.\n See https://github.com/CompVis/stable-diffusion/blob/main/ldm/util.py\n \"\"\"\n module, cls = string.rsplit(\".\", 1)\n if reload:\n module_imp = importlib.import_module(module)\n importlib.reload(module_imp)\n return getattr(importlib.import_module(module, package=None), cls)"
},
{
"identifier": "ClipClassifier",
"path": "diff_tta/models/clip_classifier.py",
"snippet": "class ClipClassifier(nn.Module):\n def __init__(self, classes, class_arch):\n super().__init__()\n\n imagenet_templates = [\n 'a photo of a {}.',\n ]\n if class_arch == \"clipr50\":\n model, _ = clip.load(\"RN50\",jit=False)\n elif class_arch == \"clipr101\":\n model, _ = clip.load(\"RN101\",jit=False)\n elif class_arch == \"clipb32\":\n model, _ = clip.load(\"ViT-B/32\",jit=False)\n elif class_arch == \"clipb16\":\n model, _ = clip.load(\"ViT-B/16\",jit=False)\n elif class_arch == \"clipl14\":\n model, _ = clip.load(\"ViT-L/14\",jit=False)\n\n\n self.final_fc = nn.Linear(768,1000,bias=False)\n with torch.no_grad():\n zeroshot_weights = zeroshot_classifier(classes, imagenet_templates, model)\n self.final_fc.weight.data = zeroshot_weights.T\n self.model = model\n\n def forward(self, images):\n image_features = self.model.encode_image(images)\n logits = 100. * self.final_fc(image_features)\n return logits"
},
{
"identifier": "utils",
"path": "diff_tta/utils.py",
"snippet": "class UnNormalize(object):\nclass VQVAEUnNormalize(UnNormalize):\n def __init__(self, mean, std):\n def __call__(self, tensor):\n def __call__(self, tensor):\ndef mean_list(l):\ndef segment_mean(x, index):\ndef get_class_sd_features(tokenizer, text_encoder, input, device):\ndef prepare_class_text_embeddings(device,\n tokenizer=None,\n text_encoder=None,\n class_names=None):\ndef initiate_time_steps(step, total_timestep, batch_size, config):\ndef instantiate_from_config(config):\ndef get_obj_from_str(string, reload=False):"
}
] | import torch
import torch.nn as nn
import torchvision
from diffusers import (
AutoencoderKL,
UNet2DConditionModel,
DDPMScheduler,
StableDiffusionPipeline,
EulerDiscreteScheduler
)
from transformers import CLIPTextModel, CLIPTokenizer
from diff_tta.utils import get_obj_from_str
from diff_tta.models.DiT.models import DiT_XL_2
from diff_tta.models.DiT.download import find_model
from diff_tta.models.clip_classifier import ClipClassifier
from diff_tta.models.DiT.diffusion import create_diffusion
from diff_tta import utils | 893 |
def load_dit_model(config, device):
"""Load DiT model"""
#@param ["stabilityai/sd-vae-ft-mse", "stabilityai/sd-vae-ft-ema"]
vae_model = "stabilityai/sd-vae-ft-ema"
image_size = config.input.sd_img_res
latent_size = int(image_size) // 8
model = DiT_XL_2(input_size=latent_size).to(device)
state_dict = find_model(f"DiT-XL-2-{image_size}x{image_size}.pt")
model.load_state_dict(state_dict)
model.eval() # important!
vae = AutoencoderKL.from_pretrained(vae_model).to(device)
vae.eval()
# default: 1000 steps, linear noise schedule
diffusion = create_diffusion(timestep_respacing="")
|
def load_dit_model(config, device):
"""Load DiT model"""
#@param ["stabilityai/sd-vae-ft-mse", "stabilityai/sd-vae-ft-ema"]
vae_model = "stabilityai/sd-vae-ft-ema"
image_size = config.input.sd_img_res
latent_size = int(image_size) // 8
model = DiT_XL_2(input_size=latent_size).to(device)
state_dict = find_model(f"DiT-XL-2-{image_size}x{image_size}.pt")
model.load_state_dict(state_dict)
model.eval() # important!
vae = AutoencoderKL.from_pretrained(vae_model).to(device)
vae.eval()
# default: 1000 steps, linear noise schedule
diffusion = create_diffusion(timestep_respacing="") | image_renormalizer = utils.VQVAEUnNormalize( | 2 | 2023-11-07 21:09:50+00:00 | 2k |
pofey/MemAI-Flow | memflow/main.py | [
{
"identifier": "CuboxErrorException",
"path": "memflow/exceptions.py",
"snippet": "class CuboxErrorException(RuntimeError):\n def __init__(self, message):\n self.message = message"
},
{
"identifier": "LOGGING_CONFIG",
"path": "memflow/common/logging.py",
"snippet": "LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - [%(threadName)s] - %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'default',\n },\n 'file': {\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'level': 'INFO',\n 'formatter': 'default',\n 'filename': f\"{os.environ.get('WORKDIR')}/logs/app.log\",\n 'when': 'D',\n 'interval': 1,\n 'backupCount': 7,\n },\n },\n 'loggers': {\n '': { # root logger\n 'handlers': ['console', 'file'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'apscheduler': { # Specific logger for apscheduler\n 'handlers': ['console', 'file'],\n 'level': 'ERROR', # Set to WARNING to suppress INFO and DEBUG messages\n 'propagate': False, # Do not propagate to root logger\n },\n 'httpx': { # Specific logger for httpx\n 'handlers': ['console', 'file'],\n 'level': 'ERROR', # Set to WARNING to suppress INFO and DEBUG messages\n 'propagate': False, # Do not propagate to root logger\n },\n }\n}"
},
{
"identifier": "MemApi",
"path": "memflow/memapi.py",
"snippet": "class MemApi:\n def __init__(self, api_key: str):\n self.api_key = api_key\n self.headers = {\n \"Authorization\": \"ApiAccessToken \" + self.api_key,\n }\n\n @retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))\n def create_mem(self, content: str):\n params = {\n \"content\": content\n }\n r = httpx.post(CREATE_MEM_API, json=params, headers=self.headers)\n r.raise_for_status()\n return r.json()"
},
{
"identifier": "create_all",
"path": "memflow/databases.py",
"snippet": "def create_all():\n \"\"\"\n 自动初始化数据库引擎和ORM框架\n 会自动生成模型定义的结构为数据表\n :return:\n \"\"\"\n Base.metadata.create_all(engine)"
},
{
"identifier": "json_200",
"path": "memflow/common/response.py",
"snippet": "def json_200(data: Union[bool, list, dict, str, None] = None, message: Union[str, None] = None) -> Response:\n \"\"\"\n 返回http_status=200的结果\n :param data: 返回结果\n :param message: 消息\n :return:\n \"\"\"\n if not message:\n message = \"success\"\n if data:\n if isinstance(data, list):\n if len(data) > 0 and 'to_dict' in dir(data[0]):\n data = [i.to_dict() for i in data]\n elif 'to_dict' in dir(data):\n data = data.to_dict()\n return PlainTextResponse(\n media_type=\"application/json\",\n status_code=status.HTTP_200_OK,\n content=json.dumps({\n 'success': True,\n 'errorCode': 0,\n 'message': message,\n 'data': data,\n }, cls=CustomJSONEncoder),\n )"
},
{
"identifier": "json_500",
"path": "memflow/common/response.py",
"snippet": "def json_500(data: Union[bool, list, dict, str, None] = None, message: Union[str, None] = None) -> Response:\n \"\"\"\n 返回http_status=500的结果\n :param data: 返回结果\n :param message: 消息\n :return:\n \"\"\"\n if not message:\n message = \"success\"\n return JSONResponse(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n content={\n 'success': False,\n 'errorCode': 1,\n 'message': message,\n 'data': data,\n }\n )"
},
{
"identifier": "json_with_status",
"path": "memflow/common/response.py",
"snippet": "def json_with_status(status_code: int, data: Union[bool, list, dict, str, None] = None,\n message: Union[str, None] = None) -> Response:\n \"\"\"\n 返回自定义statuscode的结果\n :param data: 返回结果\n :param message: 消息\n :return:\n \"\"\"\n if not message:\n message = \"success\"\n return JSONResponse(\n status_code=status_code,\n content={\n 'success': False,\n 'errorCode': 1,\n 'message': message,\n 'data': data,\n }\n )"
}
] | import os
import logging.config
import inject
import httpx
import uvicorn
from memflow.exceptions import CuboxErrorException
from apscheduler.schedulers.background import BackgroundScheduler
from fastapi.exceptions import RequestValidationError
from memflow.common.logging import LOGGING_CONFIG
from memflow.memapi import MemApi
from starlette.exceptions import HTTPException
from fastapi import FastAPI
from memflow.databases import create_all
from memflow.common.response import json_200, json_500, json_with_status
from memflow.models import *
from memflow.tasks.cuboxsynctask import CuboxSyncTask | 1,424 | """
程序启动入口类
"""
if not os.environ.get("WORKDIR"):
workdir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
else:
workdir = os.environ.get("WORKDIR")
if not os.path.exists(workdir):
os.makedirs(workdir)
log_dir = os.path.join(workdir, 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
os.environ["WORKDIR"] = workdir
| """
程序启动入口类
"""
if not os.environ.get("WORKDIR"):
workdir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
else:
workdir = os.environ.get("WORKDIR")
if not os.path.exists(workdir):
os.makedirs(workdir)
log_dir = os.path.join(workdir, 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
os.environ["WORKDIR"] = workdir
| logging.config.dictConfig(LOGGING_CONFIG) | 1 | 2023-11-08 10:02:00+00:00 | 2k |
sdebruyn/dbt-timescaledb | dbt/adapters/timescaledb/timescaledb_adapter.py | [
{
"identifier": "NO_TRANSACTION_MARKER",
"path": "dbt/adapters/timescaledb/timescaledb_connection_manager.py",
"snippet": "NO_TRANSACTION_MARKER = \"/* MARKER SHOULD RUN OUTSIDE TRANSACTION */\""
},
{
"identifier": "TimescaleDBConnectionManager",
"path": "dbt/adapters/timescaledb/timescaledb_connection_manager.py",
"snippet": "class TimescaleDBConnectionManager(PostgresConnectionManager):\n TYPE = \"timescaledb\"\n\n def add_query(\n self,\n sql: str,\n auto_begin: bool = True,\n bindings: Optional[Any] = None,\n abridge_sql_log: bool = False,\n ) -> Tuple[Connection, Any]:\n restore_isolation_level = ISOLATION_LEVEL_AUTOCOMMIT\n connection = None\n\n if NO_TRANSACTION_MARKER in sql:\n logger.debug(\"Found marker to run SQL outside transaction\")\n auto_begin = False\n connection = self.get_thread_connection()\n restore_isolation_level = connection.handle.isolation_level\n logger.debug(f\"Current isolation level: {restore_isolation_level}\")\n connection.handle.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n logger.debug(f\"Set isolation level to {ISOLATION_LEVEL_AUTOCOMMIT} and autocommit to False\")\n\n try:\n res1, res2 = super().add_query(sql, auto_begin, bindings, abridge_sql_log)\n finally:\n if restore_isolation_level != ISOLATION_LEVEL_AUTOCOMMIT:\n logger.debug(f\"Restoring isolation level to {restore_isolation_level}\")\n connection.handle.set_isolation_level(restore_isolation_level)\n\n return res1, res2"
},
{
"identifier": "TimescaleDBIndexConfig",
"path": "dbt/adapters/timescaledb/timescaledb_index_config.py",
"snippet": "class TimescaleDBIndexConfig(PostgresIndexConfig):\n transaction_per_chunk: bool = False\n\n def render(self, relation) -> str: # noqa: ANN001\n # We append the current timestamp to the index name because otherwise\n # the index will only be created on every other run. See\n # https://github.com/dbt-labs/dbt-core/issues/1945#issuecomment-576714925\n # for an explanation.\n inputs = self.columns + [\n relation.render(),\n str(self.unique),\n str(self.type),\n str(self.transaction_per_chunk),\n ]\n string = \"_\".join(inputs)\n return md5(string)"
}
] | from typing import Any, Optional
from dbt.adapters.base.meta import available
from dbt.adapters.postgres import PostgresAdapter
from dbt.adapters.timescaledb.timescaledb_connection_manager import (
NO_TRANSACTION_MARKER,
TimescaleDBConnectionManager,
)
from dbt.adapters.timescaledb.timescaledb_index_config import TimescaleDBIndexConfig | 699 |
class TimescaleDBAdapter(PostgresAdapter):
ConnectionManager = TimescaleDBConnectionManager
@available
def parse_index(self, raw_index: Any) -> Optional[TimescaleDBIndexConfig]:
return TimescaleDBIndexConfig.parse(raw_index)
@available
def marker_run_outside_transaction(self) -> str:
|
class TimescaleDBAdapter(PostgresAdapter):
ConnectionManager = TimescaleDBConnectionManager
@available
def parse_index(self, raw_index: Any) -> Optional[TimescaleDBIndexConfig]:
return TimescaleDBIndexConfig.parse(raw_index)
@available
def marker_run_outside_transaction(self) -> str: | return NO_TRANSACTION_MARKER | 0 | 2023-11-07 21:54:46+00:00 | 2k |
jax-ml/bayeux | bayeux/_src/shared.py | [
{
"identifier": "debug",
"path": "bayeux/_src/debug.py",
"snippet": "def debug(model, seed, verbosity, printer, kwargs, catch_exceptions: bool):\n \"\"\"Debugger that includes the inverse log det jacobian.\"\"\"\n checkers = [\n check_shapes,\n check_test_point_log_density,\n check_kwargs,\n check_init,\n check_init_nan,\n check_init_log_density,\n check_transform,\n check_transformed_log_density,\n compute_gradients]\n if kwargs is None:\n kwargs = {}\n return ModelDebug(model, seed, checkers, kwargs)(\n verbosity=verbosity, catch_exceptions=catch_exceptions, printer=printer)"
},
{
"identifier": "initialization",
"path": "bayeux/_src/initialization.py",
"snippet": "def uniform(\n *,\n test_point: Point,\n inverse_transform_fn: Callable[[Point], Point],\n transform_fn: Callable[[Point], Point],\n num_points: int,\n key: jax.Array\n) -> Point:"
},
{
"identifier": "types",
"path": "bayeux/_src/types.py",
"snippet": ""
}
] | import dataclasses
import functools
import inspect
import jax
import jax.numpy as jnp
import oryx
from typing import Callable, Optional
from bayeux._src import debug
from bayeux._src import initialization
from bayeux._src import types | 1,108 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared functionality for MCMC sampling."""
def map_fn(chain_method, fn):
if chain_method == "parallel":
return jax.pmap(fn)
elif chain_method == "vectorized":
return jax.vmap(fn)
elif chain_method == "sequential":
return functools.partial(jax.tree_map, fn)
raise ValueError(f"Chain method {chain_method} not supported.")
def _default_init(
*,
initial_state,
test_point,
inverse_transform_fn,
transform_fn,
num_points,
key
):
"""Initialization in case there is no explicit init provided."""
if initial_state is None:
return initialization.uniform(
test_point=test_point,
inverse_transform_fn=inverse_transform_fn,
transform_fn=transform_fn,
num_points=num_points,
key=key)
else:
return initial_state
def constrain(
transform_fn: types.JAXFn,
inverse_log_det_jacobian: Optional[types.JAXFn] = None,
) -> Callable[[types.LogDensityFn], types.LogDensityFn]:
"""Returns a log density function that operates in an unconstrained space.
Adapted from oryx (https://github.com/jax-ml/oryx)
Args:
transform_fn: Constraining bijector, mapping from R^n to the support of the
target log density.
inverse_log_det_jacobian: Optional inverse log det jacobian, if known.
"""
if inverse_log_det_jacobian is None:
inverse_log_det_jacobian = oryx.core.ildj(transform_fn)
def wrap_log_density(target_log_density):
def wrapped(args):
mapped_args = transform_fn(args)
ildjs = inverse_log_det_jacobian(mapped_args)
return target_log_density(mapped_args) - jnp.sum(
jnp.array(jax.tree_util.tree_leaves(ildjs)))
return wrapped
return wrap_log_density
def get_default_signature(fn):
defaults = {}
required = set()
for key, val in inspect.signature(fn).parameters.items():
if val.default is inspect.Signature.empty:
required.add(key)
else:
defaults[key] = val.default
return defaults, required
def _nothing(x: types.Point) -> types.Point:
return x
@dataclasses.dataclass
class Base:
"""Base class for MCMC sampling."""
log_density: types.LogDensityFn
test_point: types.Point
transform_fn: types.JAXFn = _nothing
inverse_transform_fn: Optional[types.JAXFn] = None
inverse_log_det_jacobian: Optional[types.JAXFn] = None
initial_state: Optional[types.Point] = None
def get_initial_state(self, key, num_chains=8):
return _default_init(
initial_state=self.initial_state,
test_point=self.test_point,
inverse_transform_fn=self.inverse_transform_fn,
transform_fn=self.transform_fn,
num_points=num_chains,
key=key)
def get_kwargs(self, **kwargs):
raise NotImplementedError()
def constrained_log_density(self):
return constrain(
self.transform_fn,
inverse_log_det_jacobian=self.inverse_log_det_jacobian,
)(self.log_density)
| # Copyright 2023 The bayeux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared functionality for MCMC sampling."""
def map_fn(chain_method, fn):
if chain_method == "parallel":
return jax.pmap(fn)
elif chain_method == "vectorized":
return jax.vmap(fn)
elif chain_method == "sequential":
return functools.partial(jax.tree_map, fn)
raise ValueError(f"Chain method {chain_method} not supported.")
def _default_init(
*,
initial_state,
test_point,
inverse_transform_fn,
transform_fn,
num_points,
key
):
"""Initialization in case there is no explicit init provided."""
if initial_state is None:
return initialization.uniform(
test_point=test_point,
inverse_transform_fn=inverse_transform_fn,
transform_fn=transform_fn,
num_points=num_points,
key=key)
else:
return initial_state
def constrain(
transform_fn: types.JAXFn,
inverse_log_det_jacobian: Optional[types.JAXFn] = None,
) -> Callable[[types.LogDensityFn], types.LogDensityFn]:
"""Returns a log density function that operates in an unconstrained space.
Adapted from oryx (https://github.com/jax-ml/oryx)
Args:
transform_fn: Constraining bijector, mapping from R^n to the support of the
target log density.
inverse_log_det_jacobian: Optional inverse log det jacobian, if known.
"""
if inverse_log_det_jacobian is None:
inverse_log_det_jacobian = oryx.core.ildj(transform_fn)
def wrap_log_density(target_log_density):
def wrapped(args):
mapped_args = transform_fn(args)
ildjs = inverse_log_det_jacobian(mapped_args)
return target_log_density(mapped_args) - jnp.sum(
jnp.array(jax.tree_util.tree_leaves(ildjs)))
return wrapped
return wrap_log_density
def get_default_signature(fn):
defaults = {}
required = set()
for key, val in inspect.signature(fn).parameters.items():
if val.default is inspect.Signature.empty:
required.add(key)
else:
defaults[key] = val.default
return defaults, required
def _nothing(x: types.Point) -> types.Point:
return x
@dataclasses.dataclass
class Base:
"""Base class for MCMC sampling."""
log_density: types.LogDensityFn
test_point: types.Point
transform_fn: types.JAXFn = _nothing
inverse_transform_fn: Optional[types.JAXFn] = None
inverse_log_det_jacobian: Optional[types.JAXFn] = None
initial_state: Optional[types.Point] = None
def get_initial_state(self, key, num_chains=8):
return _default_init(
initial_state=self.initial_state,
test_point=self.test_point,
inverse_transform_fn=self.inverse_transform_fn,
transform_fn=self.transform_fn,
num_points=num_chains,
key=key)
def get_kwargs(self, **kwargs):
raise NotImplementedError()
def constrained_log_density(self):
return constrain(
self.transform_fn,
inverse_log_det_jacobian=self.inverse_log_det_jacobian,
)(self.log_density)
| def debug( | 0 | 2023-11-02 16:52:57+00:00 | 2k |
zamaniamin/fastapi-shop | apps/main.py | [
{
"identifier": "DatabaseManager",
"path": "config/database.py",
"snippet": "class DatabaseManager:\n \"\"\"\n A utility class for managing database operations using SQLAlchemy.\n\n The DatabaseManager simplifies the process of initializing and managing database connections, creating database\n tables based on SQLAlchemy models, and providing a session for performing database operations.\n\n Attributes:\n engine (Engine): The SQLAlchemy engine for the configured database.\n session (Session): The SQLAlchemy session for database interactions.\n\n Methods:\n __init__():\n Initializes the DatabaseManager by creating an SQLAlchemy engine and a session based on the\n specified database configuration from the 'settings' module.\n\n create_database_tables():\n Detects 'models.py' files in subdirectories of the 'apps' directory and creates corresponding\n database tables based on SQLAlchemy models.\n\n Example Usage:\n db_manager = DatabaseManager()\n\n # Create database tables for all detected models\n db_manager.create_database_tables()\n\n Example Usage2:\n DatabaseManager().create_database_tables()\n \"\"\"\n engine: create_engine = None\n session: Session = None\n\n @classmethod\n def __init__(cls):\n \"\"\"\n Initializes the DatabaseManager.\n\n This method creates an SQLAlchemy engine and a session based on the specified database configuration\n from the 'settings' module.\n \"\"\"\n global testing # Access the global testing flag\n db_config = settings.DATABASES.copy()\n if testing:\n db_config[\"database\"] = \"test_\" + db_config[\"database\"]\n\n if db_config[\"drivername\"] == \"sqlite\":\n project_root = Path(__file__).parent.parent # Assuming this is where your models are located\n db_config[\"database\"] = os.path.join(project_root, db_config[\"database\"])\n\n url = URL.create(**db_config)\n cls.engine = create_engine(url, connect_args={\"check_same_thread\": False})\n else:\n # for postgres\n cls.engine = create_engine(URL.create(**db_config))\n\n session = sessionmaker(autocommit=False, autoflush=False, bind=cls.engine)\n cls.session = session()\n\n @classmethod\n def create_test_database(cls):\n \"\"\"\n Create and configure a test database for use in tests.\n \"\"\"\n\n # Set the testing flag to True\n global testing\n testing = True\n\n # Reinitialize the DatabaseManager for testing\n cls.__init__()\n DatabaseManager.create_database_tables()\n\n @classmethod\n def drop_all_tables(cls):\n \"\"\"\n Drop all tables in the current database.\n \"\"\"\n # TODO drop tables for postgres too\n if cls.engine:\n metadata = MetaData()\n metadata.reflect(bind=cls.engine)\n for table_name, table in metadata.tables.items():\n table.drop(cls.engine)\n\n @classmethod\n def create_database_tables(cls):\n \"\"\"\n Create database tables based on SQLAlchemy models.\n\n This method detects 'models.py' files in subdirectories of the 'apps'\n directory and creates corresponding database tables based on SQLAlchemy\n models defined within those files.\n\n Returns:\n None\n \"\"\"\n script_directory = os.path.dirname(os.path.abspath(__file__))\n project_root = Path(script_directory).parent\n apps_directory = project_root / \"apps\"\n\n for app_dir in apps_directory.iterdir():\n if app_dir.is_dir():\n models_file = app_dir / \"models.py\"\n if models_file.exists():\n module_name = f\"apps.{app_dir.name}.models\"\n try:\n module = importlib.import_module(module_name)\n if hasattr(module, \"FastModel\") and hasattr(module.FastModel, \"metadata\"):\n module.FastModel.metadata.create_all(bind=cls.engine)\n except ImportError:\n pass\n\n @classmethod\n def get_testing_mode(cls):\n return testing"
},
{
"identifier": "RouterManager",
"path": "config/routers.py",
"snippet": "class RouterManager:\n \"\"\"\n A utility class for managing FastAPI routers.\n\n This class detects and imports FastAPI routers from 'routers.py' files in\n the subdirectories of the 'apps' directory. It allows you to easily include\n routers in your FastAPI application.\n\n Attributes:\n None\n\n Methods:\n import_routers():\n Detects 'routers.py' files in subdirectories of the 'apps' directory\n and imports the 'router' variable from each file.\n\n Example Usage:\n router_manager = RouterManager()\n\n # Import routers from detected 'routers.py' files\n router_manager.import_routers()\n \"\"\"\n\n def __init__(self, app):\n self.script_directory = os.path.dirname(os.path.abspath(__file__))\n self.project_root = Path(self.script_directory).parent\n self.app = app\n\n def import_routers(self):\n apps_directory = self.project_root / \"apps\"\n\n for app_dir in apps_directory.iterdir():\n if app_dir.is_dir():\n routers_file = app_dir / \"routers.py\"\n if routers_file.exists():\n module_name = f\"apps.{app_dir.name}.routers\"\n try:\n module = importlib.import_module(module_name)\n if hasattr(module, \"router\"):\n # Add the imported router to your FastAPI application\n self.app.include_router(module.router)\n except ImportError as e:\n # Log the ImportError message for debugging purposes\n logging.error(f\"Error importing module {module_name}: {e}\")"
},
{
"identifier": "MEDIA_DIR",
"path": "config/settings.py",
"snippet": "MEDIA_DIR = BASE_DIR / \"media\""
}
] | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from config.database import DatabaseManager
from config.routers import RouterManager
from config.settings import MEDIA_DIR | 1,496 |
# -------------------
# --- Init Models ---
# -------------------
DatabaseManager().create_database_tables()
# --------------------
# --- Init FastAPI ---
# --------------------
app = FastAPI()
# ------------------
# --- Middleware ---
# ------------------
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"])
# -------------------
# --- Static File ---
# -------------------
# add static-file support, for see images by URL
app.mount("/media", StaticFiles(directory=MEDIA_DIR), name="media")
# --------------------
# --- Init Routers ---
# --------------------
|
# -------------------
# --- Init Models ---
# -------------------
DatabaseManager().create_database_tables()
# --------------------
# --- Init FastAPI ---
# --------------------
app = FastAPI()
# ------------------
# --- Middleware ---
# ------------------
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"])
# -------------------
# --- Static File ---
# -------------------
# add static-file support, for see images by URL
app.mount("/media", StaticFiles(directory=MEDIA_DIR), name="media")
# --------------------
# --- Init Routers ---
# --------------------
| RouterManager(app).import_routers() | 1 | 2023-11-06 04:46:03+00:00 | 2k |
jkulhanek/nerfbaselines | tests/test_utils.py | [
{
"identifier": "Indices",
"path": "nerfbaselines/utils.py",
"snippet": "class Indices:\n def __init__(self, steps):\n self._steps = steps\n self.total: Optional[int] = None\n\n def __contains__(self, x):\n if isinstance(self._steps, list):\n steps = self._steps\n if any(x < 0 for x in self._steps):\n assert self.total is not None, \"total must be specified for negative steps\"\n steps = set(x if x >= 0 else self.total + x for x in self._steps)\n return x in steps\n elif isinstance(self._steps, slice):\n start: int = self._steps.start or 0\n if start < 0:\n assert self.total is not None, \"total must be specified for negative start\"\n start = self.total - start\n stop: Optional[int] = self._steps.stop or self.total\n if stop is not None and stop < 0:\n assert self.total is not None, \"total must be specified for negative stop\"\n stop = self.total - stop\n step: int = self._steps.step or 1\n return x >= start and (stop is None or x < stop) and (x - start) % step == 0\n\n @classmethod\n def every_iters(cls, iters: int, zero: bool = False):\n start = iters if zero else 0\n return cls(slice(start, None, iters))\n\n def __repr__(self):\n if isinstance(self._steps, list):\n return \",\".join(map(str, self._steps))\n elif isinstance(self._steps, slice):\n out = f\"{self._steps.start or ''}:{self._steps.stop or ''}\"\n if self._steps.step is not None:\n out += f\":{self._steps.step}\"\n return out\n else:\n return repr(self._steps)\n\n def __str__(self):\n return repr(self)"
},
{
"identifier": "cancellable",
"path": "nerfbaselines/utils.py",
"snippet": "def cancellable(fn=None, mark_only=False):\n def wrap(fn):\n if getattr(fn, \"__cancellable__\", False):\n return fn\n if mark_only:\n fn.__cancellable__ = True\n return fn\n\n if inspect.isgeneratorfunction(fn):\n\n @wraps(fn)\n def wrapped(*args, cancellation_token: Optional[CancellationToken] = None, **kwargs):\n if cancellation_token is not None:\n yield from cancellation_token.invoke(fn, *args, **kwargs)\n else:\n yield from fn(*args, **kwargs)\n\n else:\n\n @wraps(fn)\n def wrapped(*args, cancellation_token: Optional[CancellationToken] = None, **kwargs):\n if cancellation_token is not None:\n return cancellation_token.invoke(fn, *args, **kwargs)\n else:\n return fn(*args, **kwargs)\n\n wrapped.__cancellable__ = True\n return wrapped\n\n return wrap if fn is None else wrap(fn)"
},
{
"identifier": "CancellationToken",
"path": "nerfbaselines/utils.py",
"snippet": "class CancellationToken:\n def __init__(self):\n self._cancelled = False\n\n def cancel(self):\n self._cancelled = True\n\n @property\n def cancelled(self):\n return self._cancelled\n\n def _trace(self, frame, event, arg):\n if event == \"line\":\n if self.cancelled:\n raise CancelledException\n return self._trace\n\n def _invoke_generator(self, fn, *args, **kwargs):\n try:\n sys.settrace(self._trace)\n for r in fn(*args, **kwargs):\n yield r\n finally:\n sys.settrace(None)\n\n def invoke(self, fn, *args, **kwargs):\n if inspect.isgeneratorfunction(fn):\n return self._invoke_generator(fn, *args, **kwargs)\n\n try:\n sys.settrace(self._trace)\n return fn(*args, **kwargs)\n finally:\n sys.settrace(None)"
},
{
"identifier": "CancelledException",
"path": "nerfbaselines/utils.py",
"snippet": "class CancelledException(Exception):\n pass"
}
] | import pytest
from time import sleep, perf_counter
from nerfbaselines.utils import Indices
from nerfbaselines.utils import cancellable, CancellationToken, CancelledException
from nerfbaselines.utils import get_resources_utilization_info | 1,160 |
def test_indices_last():
indices = Indices([-1])
indices.total = 12
for i in range(12):
if i == indices.total - 1:
assert i in indices
else:
assert i not in indices
class TimeLimitCancellationToken(CancellationToken):
def __init__(self, timeout=0.003):
super().__init__()
self.timeout = timeout
self.start = perf_counter()
@property
def cancelled(self):
return super().cancelled or perf_counter() - self.start > self.timeout
def test_cancellable():
was_called = False
|
def test_indices_last():
indices = Indices([-1])
indices.total = 12
for i in range(12):
if i == indices.total - 1:
assert i in indices
else:
assert i not in indices
class TimeLimitCancellationToken(CancellationToken):
def __init__(self, timeout=0.003):
super().__init__()
self.timeout = timeout
self.start = perf_counter()
@property
def cancelled(self):
return super().cancelled or perf_counter() - self.start > self.timeout
def test_cancellable():
was_called = False
| @cancellable | 1 | 2023-11-07 20:22:35+00:00 | 2k |
microsoft/Everything-of-Thoughts-XoT | xot_all_in_one/xot/prompter/prompter_cube.py | [
{
"identifier": "doAlgStr",
"path": "xot_all_in_one/xot/prompter/utils/py222.py",
"snippet": "def doAlgStr(s, alg):\n # print('',alg)\n moves = alg.split(\" \")\n # print('moves',moves)\n for m in moves:\n if m in moveInds:\n s = doMove(s, moveInds[m])\n return s"
},
{
"identifier": "getCube",
"path": "xot_all_in_one/xot/prompter/utils/py222.py",
"snippet": "def getCube(s):\n cube_string = \"\"\n cube_string += \"Upper:\\n\" \n cube_string += \"{} {}\\n\".format(s[0], s[1]) \n cube_string += \"{} {}\\n\".format(s[2], s[3]) \n\n cube_string += \"Front:\\n\" \n cube_string += \"{} {}\\n\".format(s[8], s[9]) \n cube_string += \"{} {}\\n\".format(s[10], s[11]) \n\n cube_string += \"Down:\\n\" \n cube_string += \"{} {}\\n\".format(s[12], s[13]) \n cube_string += \"{} {}\\n\".format(s[14], s[15]) \n\n cube_string += \"Left:\\n\" \n cube_string += \"{} {}\\n\".format(s[16], s[17]) \n cube_string += \"{} {}\\n\".format(s[18], s[19]) \n\n cube_string += \"Right:\\n\" \n cube_string += \"{} {}\\n\".format(s[4], s[5]) \n cube_string += \"{} {}\\n\".format(s[6], s[7]) \n\n cube_string += \"Back:\\n\" \n cube_string += \"{} {}\\n\".format(s[20], s[21]) \n cube_string += \"{} {}\\n\".format(s[22], s[23]) \n\n return cube_string"
}
] | import re
import os
import sympy
import numpy as np
import pandas as pd
from .prompts.prompts_cube import *
from .utils.py222 import doAlgStr, getCube | 755 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class CubePrompter():
"""
CubePrompter provides the generation of prompts specific to the cube
example for the language models.
"""
def __init__(self, last_step=True):
self.last_step = int(last_step)
self.value_cache = {}
def get_current_state(self, x, y: str) -> str:
moves = y.strip().replace('\n', ' ')
s = doAlgStr(np.array(x), moves)
return s.tolist()
def count_inconsistencies(self, input_):
color_key = ['Upper', 'Right', 'Front', 'Down', 'Left', 'Back']
color_dict = dict()
for i in range(6):
order = i*4
color = input_[order:order+4]
color_dict[color_key[i]] = len(set(color))
return color_dict
def format_thoughts(self, x: str, y: str) -> str:
current_s = x
moves = ''
thoughts = y.strip().split('\n')
for i, m in enumerate(thoughts):
s2 = doAlgStr(np.array(current_s), m)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class CubePrompter():
"""
CubePrompter provides the generation of prompts specific to the cube
example for the language models.
"""
def __init__(self, last_step=True):
self.last_step = int(last_step)
self.value_cache = {}
def get_current_state(self, x, y: str) -> str:
moves = y.strip().replace('\n', ' ')
s = doAlgStr(np.array(x), moves)
return s.tolist()
def count_inconsistencies(self, input_):
color_key = ['Upper', 'Right', 'Front', 'Down', 'Left', 'Back']
color_dict = dict()
for i in range(6):
order = i*4
color = input_[order:order+4]
color_dict[color_key[i]] = len(set(color))
return color_dict
def format_thoughts(self, x: str, y: str) -> str:
current_s = x
moves = ''
thoughts = y.strip().split('\n')
for i, m in enumerate(thoughts):
s2 = doAlgStr(np.array(current_s), m) | state2 = getCube(s2) | 1 | 2023-11-08 09:48:34+00:00 | 2k |
ultraleap/leapc-python-bindings | leapc-python-api/src/leap/device.py | [
{
"identifier": "LeapCStruct",
"path": "leapc-python-api/src/leap/datatypes.py",
"snippet": "class FrameData:\nclass FrameHeader(LeapCStruct):\nclass Vector(LeapCStruct):\nclass Quaternion(LeapCStruct):\nclass Palm(LeapCStruct):\nclass Bone(LeapCStruct):\nclass Digit(LeapCStruct):\nclass Hand(LeapCStruct):\nclass Image(LeapCStruct):\n def __init__(self, size):\n def __getattr__(self, name):\n def __getitem__(self, key):\n def frame_ptr(self):\n def frame_id(self):\n def timestamp(self):\n def __getitem__(self, idx):\n def __iter__(self):\n def x(self):\n def y(self):\n def z(self):\n def __getitem__(self, idx):\n def __iter__(self):\n def x(self):\n def y(self):\n def z(self):\n def w(self):\n def position(self):\n def stabilized_position(self):\n def velocity(self):\n def normal(self):\n def width(self):\n def direction(self):\n def orientation(self):\n def prev_joint(self):\n def next_joint(self):\n def width(self):\n def rotation(self):\n def finger_id(self):\n def bones(self):\n def metacarpal(self):\n def proximal(self):\n def intermediate(self):\n def distal(self):\n def is_extended(self):\n def id(self):\n def flags(self):\n def type(self):\n def confidence(self):\n def visible_time(self):\n def pinch_distance(self):\n def grab_angle(self):\n def pinch_strength(self):\n def grab_strength(self):\n def palm(self):\n def thumb(self):\n def index(self):\n def middle(self):\n def ring(self):\n def pinky(self):\n def digits(self):\n def arm(self):\n def matrix_version(self):"
},
{
"identifier": "get_enum_entries",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "def get_enum_entries(enum_type, flags):\n \"\"\"Interpret the flags as a bitwise combination of enum values\n\n Returns a list of enum entries which are present in the 'flags'.\n \"\"\"\n return list(filter(lambda entry: entry.value & flags != 0, enum_type))"
},
{
"identifier": "DevicePID",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class DevicePID(metaclass=LeapEnum):\n pass"
},
{
"identifier": "DeviceStatus",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class DeviceStatus(metaclass=LeapEnum):\n pass"
},
{
"identifier": "success_or_raise",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "def success_or_raise(func, *args):\n \"\"\"Call the function with the args, and raise an exception if the result is not success\n\n The function must be a LeapC cffi function which returns a LeapRS object.\n \"\"\"\n result = LeapRS(func(*args))\n if result != LeapRS.Success:\n raise create_exception(result)"
},
{
"identifier": "LeapError",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapError(Exception):\n pass"
},
{
"identifier": "LeapCannotOpenDeviceError",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapCannotOpenDeviceError(LeapError):\n pass"
}
] | from contextlib import contextmanager
from leapc_cffi import ffi, libleapc
from .datatypes import LeapCStruct
from .enums import get_enum_entries, DevicePID, DeviceStatus
from .exceptions import success_or_raise, LeapError, LeapCannotOpenDeviceError | 863 |
class DeviceNotOpenException(LeapError):
pass
class DeviceStatusInfo:
def __init__(self, status: ffi.CData):
"""Create the DeviceStatusInfo
:param status: The CData defining the status
"""
|
class DeviceNotOpenException(LeapError):
pass
class DeviceStatusInfo:
def __init__(self, status: ffi.CData):
"""Create the DeviceStatusInfo
:param status: The CData defining the status
""" | self._status_flags = get_enum_entries(DeviceStatus, status) | 3 | 2023-11-08 13:35:40+00:00 | 2k |
UMass-Foundation-Model/CoVLM | open_flamingo/src/flamingo_lm.py | [
{
"identifier": "getattr_recursive",
"path": "open_flamingo/src/utils.py",
"snippet": "def getattr_recursive(obj, att):\n \"\"\"\n Return nested attribute of obj\n Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c\n \"\"\"\n if att == \"\":\n return obj\n i = att.find(\".\")\n if i < 0:\n return getattr(obj, att)\n else:\n return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])"
},
{
"identifier": "setattr_recursive",
"path": "open_flamingo/src/utils.py",
"snippet": "def setattr_recursive(obj, att, val):\n \"\"\"\n Set nested attribute of obj\n Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val\n \"\"\"\n if \".\" in att:\n obj = getattr_recursive(obj, \".\".join(att.split(\".\")[:-1]))\n setattr(obj, att.split(\".\")[-1], val)"
}
] | import random
import torch
import torch.nn as nn
import numpy as np
from .utils import getattr_recursive, setattr_recursive | 1,155 |
class FlamingoLayer(nn.Module):
def __init__(self, decoder_layer):
super().__init__()
self.decoder_layer = decoder_layer
self.vis_x = None
self.image_nums = None
self.image_start_index_list = None
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x, image_nums=None, image_start_index_list=None, num_beams=None, visual_tokens=None, data_list=None):
self.vis_x = vis_x
self.image_nums = image_nums
self.image_start_index_list = image_start_index_list
self.num_beams = num_beams
self.visual_tokens = visual_tokens
self.data_list = data_list
def forward(
self,
hidden_states, # alignment with hugging face name
attention_mask=None,
**decoder_layer_kwargs,
):
if self.vis_x is not None:
if self.training:
single_length = self.vis_x.shape[-2]
image_nums = self.image_nums
image_start_index_list = self.image_start_index_list
image_nums = [0] + np.cumsum(image_nums).tolist()
for i, (image_num_begin, image_num_end, start_indices) in enumerate(zip(image_nums[:-1], image_nums[1:], image_start_index_list)):
for index in start_indices:
if image_num_begin < image_num_end:
hidden_states[i, index:index+single_length] = self.vis_x[image_num_begin]
image_num_begin += 1
if self.visual_tokens is not None and len(self.visual_tokens) != 0:
for i, (x, y) in enumerate(self.data_list):
if len(self.visual_tokens[i].shape) > 1:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y+1-self.visual_tokens[i].shape[0]:y+1] = self.visual_tokens[i]
else:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y] = self.visual_tokens[i]
elif not self.training:
if (
("past_key_value" in decoder_layer_kwargs and decoder_layer_kwargs["past_key_value"] is None) or
("layer_past" in decoder_layer_kwargs and decoder_layer_kwargs["layer_past"] is None)
):
single_length = self.vis_x.shape[-2]
image_nums = self.image_nums
image_start_index_list = self.image_start_index_list
image_nums = [0] + np.cumsum(image_nums).tolist()
for i, (image_num_begin, image_num_end, start_indices) in enumerate(zip(image_nums[:-1], image_nums[1:], image_start_index_list)):
for index in start_indices:
if image_num_begin < image_num_end:
hidden_states[i, index:index+single_length] = self.vis_x[image_num_begin]
image_num_begin += 1
if self.visual_tokens is not None and len(self.visual_tokens) != 0:
for i, (x, y) in enumerate(self.data_list):
# import pdb; pdb.set_trace()
# print(x, y, self.visual_tokens[i].shape)
if len(self.visual_tokens[i].shape) > 1:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y+1-self.visual_tokens[i].shape[0]:y+1] = self.visual_tokens[i]
else:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y] = self.visual_tokens[i]
hidden_states = self.decoder_layer(
hidden_states, attention_mask=attention_mask, **decoder_layer_kwargs
)
return hidden_states
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
|
class FlamingoLayer(nn.Module):
def __init__(self, decoder_layer):
super().__init__()
self.decoder_layer = decoder_layer
self.vis_x = None
self.image_nums = None
self.image_start_index_list = None
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x, image_nums=None, image_start_index_list=None, num_beams=None, visual_tokens=None, data_list=None):
self.vis_x = vis_x
self.image_nums = image_nums
self.image_start_index_list = image_start_index_list
self.num_beams = num_beams
self.visual_tokens = visual_tokens
self.data_list = data_list
def forward(
self,
hidden_states, # alignment with hugging face name
attention_mask=None,
**decoder_layer_kwargs,
):
if self.vis_x is not None:
if self.training:
single_length = self.vis_x.shape[-2]
image_nums = self.image_nums
image_start_index_list = self.image_start_index_list
image_nums = [0] + np.cumsum(image_nums).tolist()
for i, (image_num_begin, image_num_end, start_indices) in enumerate(zip(image_nums[:-1], image_nums[1:], image_start_index_list)):
for index in start_indices:
if image_num_begin < image_num_end:
hidden_states[i, index:index+single_length] = self.vis_x[image_num_begin]
image_num_begin += 1
if self.visual_tokens is not None and len(self.visual_tokens) != 0:
for i, (x, y) in enumerate(self.data_list):
if len(self.visual_tokens[i].shape) > 1:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y+1-self.visual_tokens[i].shape[0]:y+1] = self.visual_tokens[i]
else:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y] = self.visual_tokens[i]
elif not self.training:
if (
("past_key_value" in decoder_layer_kwargs and decoder_layer_kwargs["past_key_value"] is None) or
("layer_past" in decoder_layer_kwargs and decoder_layer_kwargs["layer_past"] is None)
):
single_length = self.vis_x.shape[-2]
image_nums = self.image_nums
image_start_index_list = self.image_start_index_list
image_nums = [0] + np.cumsum(image_nums).tolist()
for i, (image_num_begin, image_num_end, start_indices) in enumerate(zip(image_nums[:-1], image_nums[1:], image_start_index_list)):
for index in start_indices:
if image_num_begin < image_num_end:
hidden_states[i, index:index+single_length] = self.vis_x[image_num_begin]
image_num_begin += 1
if self.visual_tokens is not None and len(self.visual_tokens) != 0:
for i, (x, y) in enumerate(self.data_list):
# import pdb; pdb.set_trace()
# print(x, y, self.visual_tokens[i].shape)
if len(self.visual_tokens[i].shape) > 1:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y+1-self.visual_tokens[i].shape[0]:y+1] = self.visual_tokens[i]
else:
# print(self.visual_tokens[i].shape[0], "embedding")
hidden_states[x, y] = self.visual_tokens[i]
hidden_states = self.decoder_layer(
hidden_states, attention_mask=attention_mask, **decoder_layer_kwargs
)
return hidden_states
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self): | return getattr_recursive(self, self.decoder_layers_attr_name) | 0 | 2023-11-07 04:23:57+00:00 | 2k |
nouu-me/document_vector_search_benchmark | tools/run_benchmark.py | [
{
"identifier": "DATASET_REGISTRY",
"path": "dvsb/data/dataset.py",
"snippet": "DATASET_REGISTRY = Registry[Dataset]()"
},
{
"identifier": "Dataset",
"path": "dvsb/data/dataset.py",
"snippet": "class Dataset(ABC):\n @abstractmethod\n def get_name(self) -> str:\n \"\"\"Returns name of the dataset.\"\"\"\n ...\n\n @abstractmethod\n def get_queries(self) -> list[str]:\n \"\"\"Returns a list of all query strings.\"\"\"\n ...\n\n @abstractmethod\n def get_contexts(self) -> list[str]:\n \"\"\"Returns a list of all context strings.\"\"\"\n ...\n\n @abstractmethod\n def get_related_context_locations(self) -> list[list[int]]:\n \"\"\"Returns a list of all related context locations.\n The i-th returning value (say ans[i], a list of integers) is the related context locations for i-th query.\"\"\"\n ...\n\n def get_stats(self) -> dict:\n locations = self.get_related_context_locations()\n return {\n \"name\": self.get_name(),\n \"num_queries\": len(self.get_queries()),\n \"num_contexts\": len(self.get_contexts()),\n \"avg_num_related_contexts\": sum([len(locs) for locs in locations], 0) / len(locations),\n }"
},
{
"identifier": "EMBEDDING_REGISTRY",
"path": "dvsb/embedding/embedding.py",
"snippet": "EMBEDDING_REGISTRY = Registry[Embedding]()"
},
{
"identifier": "Embedding",
"path": "dvsb/embedding/embedding.py",
"snippet": "class Embedding(ABC):\n @abstractmethod\n def load(self) -> None:\n ...\n\n @abstractmethod\n def get_name(self) -> str:\n ...\n\n @abstractmethod\n def get_embeddings(self, texts: list[str], mode: str) -> npt.NDArray[np.float64]:\n ..."
},
{
"identifier": "METRIC_REGISTRY",
"path": "dvsb/metric/metric.py",
"snippet": "METRIC_REGISTRY = Registry[Metric]()"
},
{
"identifier": "Metric",
"path": "dvsb/metric/metric.py",
"snippet": "class Metric(ABC):\n @abstractmethod\n def get_name(self) -> str:\n ...\n\n @abstractmethod\n def compute(self, y_true: list[list[int]], scores: npt.NDArray[np.float64]) -> float:\n ..."
},
{
"identifier": "RELEVANCE_REGISTRY",
"path": "dvsb/relevance/relevance.py",
"snippet": "RELEVANCE_REGISTRY = Registry[Relevance]()"
},
{
"identifier": "Relevance",
"path": "dvsb/relevance/relevance.py",
"snippet": "class Relevance(ABC):\n @abstractmethod\n def get_name(self) -> str:\n ...\n\n @abstractmethod\n def compute(\n self, query_vectors: npt.NDArray[np.float64], context_vectors: npt.NDArray[np.float64]\n ) -> npt.NDArray[np.float64]:\n ..."
}
] | import argparse
import json
import numpy as np
import numpy.typing as npt
import pandas as pd
import yaml
from pathlib import Path
from typing import Iterable
from dvsb.data import DATASET_REGISTRY, Dataset
from dvsb.embedding import EMBEDDING_REGISTRY, Embedding
from dvsb.metric import METRIC_REGISTRY, Metric
from dvsb.relevance import RELEVANCE_REGISTRY, Relevance
from loguru import logger
from tqdm import tqdm | 776 |
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser("run_benchmark")
parser.add_argument("-n", "--name", help="config name", required=False, default="default")
parser.add_argument("--no-cache", action="store_true")
return parser
|
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser("run_benchmark")
parser.add_argument("-n", "--name", help="config name", required=False, default="default")
parser.add_argument("--no-cache", action="store_true")
return parser
| def load_dataset(dataset_config: dict, cache: bool) -> Dataset: | 1 | 2023-11-09 00:04:51+00:00 | 2k |
HKU-BAL/ClairS-TO | src/nonsomatic_tagging.py | [
{
"identifier": "VcfReader",
"path": "shared/vcf.py",
"snippet": "class TruthStdout(object):\nclass VcfWriter(object):\nclass VcfReader(object):\n def __init__(self, handle):\n def __del__(self):\n def __init__(self,\n vcf_fn,\n ctg_name=None,\n ref_fn=None,\n sample_name=\"SAMPLE\",\n write_header=True,\n header=None,\n cmdline=None,\n show_ref_calls=False):\n def close(self):\n def write_header(self, ctg_name=None, ref_fn=None, header=None, cmdline=None):\n def write_row(self,\n POS=None,\n REF=None,\n ALT=None,\n QUAL=0,\n GT='0/0',\n DP=0,\n AF=0,\n AD=None,\n CHROM=None,\n GQ=None,\n ID='.',\n FILTER=\".\",\n INFO='.',\n TAF=None,\n VT=None,\n TDP=None,\n AU=None,\n CU=None,\n GU=None,\n TU=None,\n row_str=None):\n def __init__(self, vcf_fn,\n ctg_name=None,\n ctg_start=None,\n ctg_end=None,\n is_var_format=False,\n is_happy_format=False,\n is_fp=None,\n show_ref=True,\n direct_open=False,\n keep_row_str=False,\n skip_genotype=False,\n filter_tag=None,\n taf_filter=None,\n save_header=False,\n min_qual=None,\n max_qual=None,\n discard_indel=False,\n keep_af=False):\n def read_vcf(self):\n def get_alt_info(self, pos, extra_info=\"\"):\n GQ = GQ if GQ else QUAL\n CHROM = CHROM if CHROM else self.ctg_name\n FORMAT = \"GT:GQ:DP:AF\"\n FORMAT_V = \"%s:%.4f:%d:%.4f\" % (GT, GQ, DP, AF)\n FILTER = columns[6] if len(columns) >= 7 else None"
},
{
"identifier": "str2bool",
"path": "shared/utils.py",
"snippet": "def str2bool(v):\n if v is None:\n return v\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'ture', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'flase', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
},
{
"identifier": "str_none",
"path": "shared/utils.py",
"snippet": "def str_none(v):\n if v is None:\n return None\n if v.upper() == \"NONE\":\n return None\n if isinstance(v, str):\n return v"
},
{
"identifier": "reference_sequence_from",
"path": "shared/utils.py",
"snippet": "def reference_sequence_from(samtools_execute_command, fasta_file_path, regions):\n refernce_sequences = []\n region_value_for_faidx = \" \".join(regions)\n\n samtools_faidx_process = subprocess_popen(\n shlex.split(\"{} faidx {} {}\".format(samtools_execute_command, fasta_file_path, region_value_for_faidx))\n )\n while True:\n row = samtools_faidx_process.stdout.readline()\n is_finish_reading_output = row == '' and samtools_faidx_process.poll() is not None\n if is_finish_reading_output:\n break\n if row:\n refernce_sequences.append(row.rstrip())\n\n # first line is reference name \">xxxx\", need to be ignored\n reference_sequence = \"\".join(refernce_sequences[1:])\n\n # uppercase for masked sequences\n reference_sequence = reference_sequence.upper()\n\n samtools_faidx_process.stdout.close()\n samtools_faidx_process.wait()\n if samtools_faidx_process.returncode != 0:\n return None\n\n return reference_sequence"
},
{
"identifier": "subprocess_popen",
"path": "shared/utils.py",
"snippet": "def subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):\n return Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, bufsize=bufsize, universal_newlines=True)"
}
] | import os
import shlex
from argparse import ArgumentParser, SUPPRESS
from collections import defaultdict
from shared.vcf import VcfReader, VcfWriter, Position
from shared.utils import str2bool, str_none, reference_sequence_from, subprocess_popen | 1,163 |
major_contigs_order = ["chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]] + [str(a) for a in
list(range(1, 23)) + ["X", "Y"]]
class VcfReader_Database(object):
def __init__(self, vcf_fn,
ctg_name=None,
direct_open=False,
keep_row_str=False,
save_header=False):
self.vcf_fn = vcf_fn
self.ctg_name = ctg_name
|
major_contigs_order = ["chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]] + [str(a) for a in
list(range(1, 23)) + ["X", "Y"]]
class VcfReader_Database(object):
def __init__(self, vcf_fn,
ctg_name=None,
direct_open=False,
keep_row_str=False,
save_header=False):
self.vcf_fn = vcf_fn
self.ctg_name = ctg_name | self.variant_dict = defaultdict(Position) | 0 | 2023-11-07 04:39:16+00:00 | 2k |
the-siesta-group/edfio | tests/test_utils.py | [
{
"identifier": "decode_edfplus_date",
"path": "edfio/_utils.py",
"snippet": "def decode_edfplus_date(date: str) -> datetime.date:\n day, month, year = date.split(\"-\")\n try:\n month_int = _MONTH_NAMES.index(month.upper()) + 1\n except ValueError:\n raise ValueError(f\"Invalid month: {month}, options: {_MONTH_NAMES}\") from None\n return datetime.date(int(year), month_int, int(day))"
},
{
"identifier": "encode_annotation_duration",
"path": "edfio/_utils.py",
"snippet": "def encode_annotation_duration(duration: float) -> str:\n if duration < 0:\n raise ValueError(f\"Annotation duration must be positive, is {duration}\")\n string = f\"{duration:.12f}\".rstrip(\"0\")\n if string[-1] == \".\":\n return string[:-1]\n return string"
},
{
"identifier": "encode_annotation_onset",
"path": "edfio/_utils.py",
"snippet": "def encode_annotation_onset(onset: float) -> str:\n string = f\"{onset:+.12f}\".rstrip(\"0\")\n if string[-1] == \".\":\n return string[:-1]\n return string"
},
{
"identifier": "encode_edfplus_date",
"path": "edfio/_utils.py",
"snippet": "def encode_edfplus_date(date: datetime.date) -> str:\n return f\"{date.day:02}-{_MONTH_NAMES[date.month - 1]}-{date.year:02}\""
},
{
"identifier": "round_float_to_8_characters",
"path": "edfio/_utils.py",
"snippet": "def round_float_to_8_characters(\n value: float,\n round_func: Callable[[float], int],\n) -> float:\n if isinstance(value, int) or value.is_integer():\n return value\n length = 8\n integer_part_length = str(value).find(\".\")\n if integer_part_length == length:\n return round_func(value)\n factor = 10 ** (length - 1 - integer_part_length)\n return round_func(value * factor) / factor"
}
] | import datetime
import math
import pytest
from edfio._utils import (
decode_edfplus_date,
encode_annotation_duration,
encode_annotation_onset,
encode_edfplus_date,
round_float_to_8_characters,
) | 1,114 |
VALID_EDFPLUS_DATE_PAIRS = (
("02-MAY-1951", datetime.date(1951, 5, 2)),
("02-DEC-1951", datetime.date(1951, 12, 2)),
("02-AUG-1951", datetime.date(1951, 8, 2)),
("02-MAY-2051", datetime.date(2051, 5, 2)),
)
@pytest.mark.parametrize(("string", "datetime_"), VALID_EDFPLUS_DATE_PAIRS)
def test_decode_edfplus_date(string: str, datetime_: datetime.date):
assert decode_edfplus_date(string) == datetime_
@pytest.mark.parametrize(("string", "datetime_"), VALID_EDFPLUS_DATE_PAIRS)
def test_encode_edfplus_date(string: str, datetime_: datetime.date):
assert encode_edfplus_date(datetime_) == string
def test_decode_edfplus_date_invalid_month_name():
with pytest.raises(ValueError, match="Invalid month"):
decode_edfplus_date("02-MAI-1951")
@pytest.mark.parametrize(
("onset", "expected"),
[
(0, "+0"),
(0.0, "+0"),
(0.1, "+0.1"),
(0.01, "+0.01"),
(0.001, "+0.001"),
(0.0001, "+0.0001"),
(0.00001, "+0.00001"),
(0.000001, "+0.000001"),
(0.0000001, "+0.0000001"),
(0.00000001, "+0.00000001"),
(0.00000000001, "+0.00000000001"),
(100000000000.0, "+100000000000"),
(-0.1, "-0.1"),
(-0.0000001, "-0.0000001"),
(-0.0000000001, "-0.0000000001"),
(-100000000000.0, "-100000000000"),
],
)
def test_encode_annotation_onset(onset: float, expected: str):
assert encode_annotation_onset(onset) == expected
@pytest.mark.parametrize(
("duration", "expected"),
[
(0, "0"),
(0.0, "0"),
(0.1, "0.1"),
(0.01, "0.01"),
(0.001, "0.001"),
(0.0001, "0.0001"),
(0.00001, "0.00001"),
(0.000001, "0.000001"),
(0.0000001, "0.0000001"),
(0.00000000001, "0.00000000001"),
(100000000000.0, "100000000000"),
],
)
def test_encode_annotation_duration(duration: float, expected: str):
|
VALID_EDFPLUS_DATE_PAIRS = (
("02-MAY-1951", datetime.date(1951, 5, 2)),
("02-DEC-1951", datetime.date(1951, 12, 2)),
("02-AUG-1951", datetime.date(1951, 8, 2)),
("02-MAY-2051", datetime.date(2051, 5, 2)),
)
@pytest.mark.parametrize(("string", "datetime_"), VALID_EDFPLUS_DATE_PAIRS)
def test_decode_edfplus_date(string: str, datetime_: datetime.date):
assert decode_edfplus_date(string) == datetime_
@pytest.mark.parametrize(("string", "datetime_"), VALID_EDFPLUS_DATE_PAIRS)
def test_encode_edfplus_date(string: str, datetime_: datetime.date):
assert encode_edfplus_date(datetime_) == string
def test_decode_edfplus_date_invalid_month_name():
with pytest.raises(ValueError, match="Invalid month"):
decode_edfplus_date("02-MAI-1951")
@pytest.mark.parametrize(
("onset", "expected"),
[
(0, "+0"),
(0.0, "+0"),
(0.1, "+0.1"),
(0.01, "+0.01"),
(0.001, "+0.001"),
(0.0001, "+0.0001"),
(0.00001, "+0.00001"),
(0.000001, "+0.000001"),
(0.0000001, "+0.0000001"),
(0.00000001, "+0.00000001"),
(0.00000000001, "+0.00000000001"),
(100000000000.0, "+100000000000"),
(-0.1, "-0.1"),
(-0.0000001, "-0.0000001"),
(-0.0000000001, "-0.0000000001"),
(-100000000000.0, "-100000000000"),
],
)
def test_encode_annotation_onset(onset: float, expected: str):
assert encode_annotation_onset(onset) == expected
@pytest.mark.parametrize(
("duration", "expected"),
[
(0, "0"),
(0.0, "0"),
(0.1, "0.1"),
(0.01, "0.01"),
(0.001, "0.001"),
(0.0001, "0.0001"),
(0.00001, "0.00001"),
(0.000001, "0.000001"),
(0.0000001, "0.0000001"),
(0.00000000001, "0.00000000001"),
(100000000000.0, "100000000000"),
],
)
def test_encode_annotation_duration(duration: float, expected: str): | assert encode_annotation_duration(duration) == expected | 1 | 2023-11-09 09:53:27+00:00 | 2k |
microsoft/folx | folx/operators.py | [
{
"identifier": "Array",
"path": "folx/api.py",
"snippet": "T = TypeVar(\"T\", bound=PyTree[Array])\nR = TypeVar(\"R\", bound=PyTree[Array])\nJAC_DIM = 0 # should be either 0 or -1. TODO: switching is not support.\n GENERAL = 0\n LINEAR_IN_FIRST = 1\n LINEAR_IN_ONE = 2 | LINEAR_IN_FIRST\n LINEAR = 4 | LINEAR_IN_ONE\n REDUCTION = 8\n MULTIPLICATION = 16 | LINEAR_IN_ONE\n DOT_PRODUCT = 32 | REDUCTION | MULTIPLICATION\n INDEXING = 64 | LINEAR\n SCATTER = 128\n JOIN_JVP = 256\nclass FwdJacobian(NamedTuple):\nclass FwdLaplArray(NamedTuple):\nclass FwdLaplArgs(NamedTuple):\nclass MergeFn(Protocol):\nclass ForwardLaplacianFns(NamedTuple):\nclass JvpFn(Protocol):\nclass CustomTraceJacHessianJac(Protocol):\nclass ForwardLaplacian(Protocol):\nclass FunctionFlags(IntFlag):\n def weak(self) -> bool:\n def unique_idx(self):\n def materialize_for_idx(self, idx, max_idx: int | None = None):\n def aggregate(x, indices):\n def get_index_mask(self, outputs):\n def get_indices(mask, out_mask):\n def data_shape(self):\n def construct_jac_for(self, idx):\n def dense_array(self) -> Array:\n def max_n(self) -> int:\n def as_dense(self):\n def dense_or_sparse(self) -> Array:\n def sparse(self) -> Array:\n def mask(self) -> np.ndarray:\n def ndim(self) -> int:\n def from_dense(cls, array):\n def __add__(self, other):\n def astype(self, dtype):\n def shape(self):\n def ndim(self):\n def dense_jacobian(self):\n def is_jacobian_weak(self):\n def sparse_jacobian(self):\n def jacobian_mask(self):\n def dense(self):\n def astype(self, dtype):\ndef IS_LPL_ARR(x):\ndef IS_LEAF(x):\n def x(self) -> Arrays:\n def jacobian(self) -> tuple[FwdJacobian, ...]:\n def dense_jacobian(self) -> Arrays:\n def sparse_jacobian(self) -> Arrays:\n def jacobian_mask(self):\n def all_jacobian_weak(self) -> bool:\n def any_jacobian_weak(self) -> bool:\n def dense(self):\n def laplacian(self) -> Arrays:\n def one_hot_sparse_jacobian(self):\n def __len__(self) -> int:\n def __call__(self, args: Arrays, extra: ExtraArgs) -> Arrays:\n def __call__(self, primals: Arrays, tangents: Arrays) -> tuple[Array, Array]:\n def __call__(self, args: FwdLaplArgs, extra_args: ExtraArgs, merge: MergeFn, materialize_idx: Array) -> PyTree[Array]:\n def __call__(self, *args: ArrayOrFwdLaplArray, sparsity_threshold: int , **kwargs) -> PyTree[ArrayOrFwdLaplArray]:"
},
{
"identifier": "forward_laplacian",
"path": "folx/interpreter.py",
"snippet": "def forward_laplacian(\n fn: Callable[P, PyTree[Array]],\n sparsity_threshold: int | float = 0,\n disable_jit: bool = False\n) -> Callable[P, PyTree[FwdLaplArray]]:\n \"\"\"\n This function takes a function and returns a function that computes the Laplacian of the function.\n The returned function will be jitted by default as running it in eager execution will typically be a lot slower.\n\n Args:\n - fn: function to compute the Laplacian of\n - sparsity_threshold: threshold for sparsity propagation.\n If the number of non-zero elements in the input is larger than this threshold,we will not propagate sparsity.\n If the value is between 0 and 1, it will be interpreted as a fraction of the total number of elements.\n If the value is larger than 1, it will be interpreted as an absolute number of elements.\n If enabling sparsity, we recommend relatively large values like 0.6 as frequent materializations are slow.\n \"\"\"\n def wrapped(*args: P.args, **kwargs: P.kwargs):\n closed_jaxpr = jax.make_jaxpr(fn)(*args, **kwargs)\n flat_args = jtu.tree_leaves(args)\n if 0 < sparsity_threshold < 1:\n threshold = int(sparsity_threshold * sum(x.size for x in flat_args))\n else:\n threshold = int(sparsity_threshold)\n lapl_args = init_forward_laplacian_state(*flat_args, sparsity=threshold > 0)\n out = eval_jaxpr_with_forward_laplacian(\n closed_jaxpr.jaxpr, closed_jaxpr.literals, *lapl_args, sparsity_threshold=threshold\n )\n out_structure = jtu.tree_structure(jax.eval_shape(fn, *args, **kwargs))\n return out_structure.unflatten(out)\n \n if disable_jit:\n return wrapped\n\n return jax.jit(wrapped) # type: ignore"
}
] | from dataclasses import dataclass
from typing import Callable, Protocol
from .api import Array
from .interpreter import forward_laplacian
import jax
import jax.numpy as jnp | 1,332 |
__all__ = [
"Laplacian",
"LaplacianOperator",
"ForwardLaplacianOperator",
"LoopLaplacianOperator",
"ParallelLaplacianOperator",
]
class Laplacian(Protocol):
|
__all__ = [
"Laplacian",
"LaplacianOperator",
"ForwardLaplacianOperator",
"LoopLaplacianOperator",
"ParallelLaplacianOperator",
]
class Laplacian(Protocol): | def __call__(self, x: Array) -> tuple[Array, Array]: | 0 | 2023-11-07 16:32:46+00:00 | 2k |
shuttworth/NICE-SLAM-Easyread | visualizer.py | [
{
"identifier": "config",
"path": "src/config.py",
"snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg, nice=True):"
},
{
"identifier": "SLAMFrontend",
"path": "src/tools/viz.py",
"snippet": "class SLAMFrontend:\n def __init__(self, output, init_pose, cam_scale=1, save_rendering=False,\n near=0, estimate_c2w_list=None, gt_c2w_list=None):\n self.queue = Queue()\n self.p = Process(target=draw_trajectory, args=(\n self.queue, output, init_pose, cam_scale, save_rendering,\n near, estimate_c2w_list, gt_c2w_list))\n\n def update_pose(self, index, pose, gt=False):\n if isinstance(pose, torch.Tensor):\n pose = pose.cpu().numpy()\n\n pose[:3, 2] *= -1\n self.queue.put_nowait(('pose', index, pose, gt))\n \n def update_mesh(self, path):\n self.queue.put_nowait(('mesh', path))\n\n def update_cam_trajectory(self, c2w_list, gt):\n self.queue.put_nowait(('traj', c2w_list, gt))\n\n def reset(self):\n self.queue.put_nowait(('reset', ))\n\n def start(self):\n self.p.start()\n return self\n\n def join(self):\n self.p.join()"
},
{
"identifier": "get_dataset",
"path": "src/utils/datasets.py",
"snippet": "def get_dataset(cfg, args, scale, device='cuda:0'):\n return dataset_dict[cfg['dataset']](cfg, args, scale, device=device)"
}
] | import argparse
import os
import time
import numpy as np
import torch
import cv2
from tqdm import tqdm
from torch.utils.data import DataLoader
from src import config
from src.tools.viz import SLAMFrontend
from src.utils.datasets import get_dataset | 985 |
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Arguments to visualize the SLAM process.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
help='output folder, this have higher priority, can overwrite the one inconfig file')
nice_parser = parser.add_mutually_exclusive_group(required=False)
nice_parser.add_argument('--nice', dest='nice', action='store_true')
nice_parser.add_argument('--imap', dest='nice', action='store_false')
parser.set_defaults(nice=True)
parser.add_argument('--save_rendering',
action='store_true', help='save rendering video to `vis.mp4` in output folder ')
parser.add_argument('--vis_input_frame',
action='store_true', help='visualize input frames')
parser.add_argument('--no_gt_traj',
action='store_true', help='not visualize gt trajectory')
args = parser.parse_args()
cfg = config.load_config(
args.config, 'configs/nice_slam.yaml' if args.nice else 'configs/imap.yaml')
scale = cfg['scale']
output = cfg['data']['output'] if args.output is None else args.output
if args.vis_input_frame:
frame_reader = get_dataset(cfg, args, scale, device='cpu')
frame_loader = DataLoader(
frame_reader, batch_size=1, shuffle=False, num_workers=4)
ckptsdir = f'{output}/ckpts'
if os.path.exists(ckptsdir):
ckpts = [os.path.join(ckptsdir, f)
for f in sorted(os.listdir(ckptsdir)) if 'tar' in f]
if len(ckpts) > 0:
ckpt_path = ckpts[-1]
print('Get ckpt :', ckpt_path)
ckpt = torch.load(ckpt_path, map_location=torch.device('cpu'))
estimate_c2w_list = ckpt['estimate_c2w_list']
gt_c2w_list = ckpt['gt_c2w_list']
N = ckpt['idx']
estimate_c2w_list[:, :3, 3] /= scale
gt_c2w_list[:, :3, 3] /= scale
estimate_c2w_list = estimate_c2w_list.cpu().numpy()
gt_c2w_list = gt_c2w_list.cpu().numpy()
|
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Arguments to visualize the SLAM process.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
help='output folder, this have higher priority, can overwrite the one inconfig file')
nice_parser = parser.add_mutually_exclusive_group(required=False)
nice_parser.add_argument('--nice', dest='nice', action='store_true')
nice_parser.add_argument('--imap', dest='nice', action='store_false')
parser.set_defaults(nice=True)
parser.add_argument('--save_rendering',
action='store_true', help='save rendering video to `vis.mp4` in output folder ')
parser.add_argument('--vis_input_frame',
action='store_true', help='visualize input frames')
parser.add_argument('--no_gt_traj',
action='store_true', help='not visualize gt trajectory')
args = parser.parse_args()
cfg = config.load_config(
args.config, 'configs/nice_slam.yaml' if args.nice else 'configs/imap.yaml')
scale = cfg['scale']
output = cfg['data']['output'] if args.output is None else args.output
if args.vis_input_frame:
frame_reader = get_dataset(cfg, args, scale, device='cpu')
frame_loader = DataLoader(
frame_reader, batch_size=1, shuffle=False, num_workers=4)
ckptsdir = f'{output}/ckpts'
if os.path.exists(ckptsdir):
ckpts = [os.path.join(ckptsdir, f)
for f in sorted(os.listdir(ckptsdir)) if 'tar' in f]
if len(ckpts) > 0:
ckpt_path = ckpts[-1]
print('Get ckpt :', ckpt_path)
ckpt = torch.load(ckpt_path, map_location=torch.device('cpu'))
estimate_c2w_list = ckpt['estimate_c2w_list']
gt_c2w_list = ckpt['gt_c2w_list']
N = ckpt['idx']
estimate_c2w_list[:, :3, 3] /= scale
gt_c2w_list[:, :3, 3] /= scale
estimate_c2w_list = estimate_c2w_list.cpu().numpy()
gt_c2w_list = gt_c2w_list.cpu().numpy()
| frontend = SLAMFrontend(output, init_pose=estimate_c2w_list[0], cam_scale=0.3, | 1 | 2023-11-07 05:09:36+00:00 | 2k |
mileswyn/SAMIHS | models/segment_anything/modeling/image_encoder.py | [
{
"identifier": "LayerNorm2d",
"path": "models/segment_anything/modeling/common.py",
"snippet": "class LayerNorm2d(nn.Module):\n def __init__(self, num_channels: int, eps: float = 1e-6) -> None:\n super().__init__()\n self.weight = nn.Parameter(torch.ones(num_channels))\n self.bias = nn.Parameter(torch.zeros(num_channels))\n self.eps = eps\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n u = x.mean(1, keepdim=True)\n s = (x - u).pow(2).mean(1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n x = self.weight[:, None, None] * x + self.bias[:, None, None]\n return x"
},
{
"identifier": "MLPBlock",
"path": "models/segment_anything/modeling/common.py",
"snippet": "class MLPBlock(nn.Module):\n def __init__(\n self,\n embedding_dim: int,\n mlp_dim: int,\n act: Type[nn.Module] = nn.GELU,\n ) -> None:\n super().__init__()\n self.lin1 = nn.Linear(embedding_dim, mlp_dim)\n self.lin2 = nn.Linear(mlp_dim, embedding_dim)\n self.act = act()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.lin2(self.act(self.lin1(x)))"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock | 1,138 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
), | LayerNorm2d(out_chans), | 0 | 2023-11-09 07:26:33+00:00 | 2k |
AlexandrErohin/home-assistant-tplink-router | custom_components/tplink_router/switch.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/tplink_router/const.py",
"snippet": "DOMAIN = \"tplink_router\""
},
{
"identifier": "TPLinkRouterCoordinator",
"path": "custom_components/tplink_router/coordinator.py",
"snippet": "class TPLinkRouterCoordinator(DataUpdateCoordinator):\n def __init__(\n self,\n hass: HomeAssistant,\n router: TplinkRouter,\n update_interval: int,\n info: tuple[Firmware, Status],\n logger,\n ) -> None:\n self.router = router\n self.firmware = info[0]\n self.status = info[1]\n self.device_info = DeviceInfo(\n configuration_url=router.host,\n connections={(CONNECTION_NETWORK_MAC, self.status.macaddr)},\n identifiers={(DOMAIN, self.status.macaddr)},\n manufacturer=\"TPLink\",\n model=self.firmware.model,\n name=\"TPLinkRouter\",\n sw_version=self.firmware.firmware_version,\n hw_version=self.firmware.hardware_version,\n )\n\n super().__init__(\n hass,\n logger,\n name=DOMAIN,\n update_interval=timedelta(seconds=update_interval),\n )\n\n async def reboot(self) -> None:\n await self.hass.async_add_executor_job(self.router.reboot)\n\n async def set_wifi(self, wifi: Wifi, enable: bool) -> None:\n await self.hass.async_add_executor_job(self.router.set_wifi, wifi, enable)\n\n async def _async_update_data(self):\n \"\"\"Asynchronous update of all data.\"\"\"\n self.status = await self.hass.async_add_executor_job(self.router.get_status)"
}
] | from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .coordinator import TPLinkRouterCoordinator
from tplinkrouterc6u import Wifi, Status
from homeassistant.helpers.device_registry import DeviceInfo | 1,050 | from __future__ import annotations
@dataclass
class TPLinkRouterSwitchEntityDescriptionMixin:
method: Callable[[TPLinkRouterCoordinator, bool], Any]
property: str
@dataclass
class TPLinkRouterSwitchEntityDescription(SwitchEntityDescription, TPLinkRouterSwitchEntityDescriptionMixin):
"""A class that describes sensor entities."""
SWITCH_TYPES = (
TPLinkRouterSwitchEntityDescription(
key="wifi_guest_24g",
name="Guest WIFI 2.4G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='guest_2g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_GUEST_2G, value),
),
TPLinkRouterSwitchEntityDescription(
key="wifi_guest_5g",
name="Guest WIFI 5G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='guest_5g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_GUEST_5G, value),
),
TPLinkRouterSwitchEntityDescription(
key="wifi_24g",
name="WIFI 2.4G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='wifi_2g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_2G, value),
),
TPLinkRouterSwitchEntityDescription(
key="wifi_5g",
name="WIFI 5G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='wifi_5g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_5G, value),
),
TPLinkRouterSwitchEntityDescription(
key="iot_24g",
name="IoT WIFI 2.4G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='iot_2g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_IOT_2G, value),
),
TPLinkRouterSwitchEntityDescription(
key="iot_5g",
name="IoT WIFI 5G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='iot_5g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_IOT_5G, value),
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
| from __future__ import annotations
@dataclass
class TPLinkRouterSwitchEntityDescriptionMixin:
method: Callable[[TPLinkRouterCoordinator, bool], Any]
property: str
@dataclass
class TPLinkRouterSwitchEntityDescription(SwitchEntityDescription, TPLinkRouterSwitchEntityDescriptionMixin):
"""A class that describes sensor entities."""
SWITCH_TYPES = (
TPLinkRouterSwitchEntityDescription(
key="wifi_guest_24g",
name="Guest WIFI 2.4G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='guest_2g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_GUEST_2G, value),
),
TPLinkRouterSwitchEntityDescription(
key="wifi_guest_5g",
name="Guest WIFI 5G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='guest_5g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_GUEST_5G, value),
),
TPLinkRouterSwitchEntityDescription(
key="wifi_24g",
name="WIFI 2.4G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='wifi_2g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_2G, value),
),
TPLinkRouterSwitchEntityDescription(
key="wifi_5g",
name="WIFI 5G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='wifi_5g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_5G, value),
),
TPLinkRouterSwitchEntityDescription(
key="iot_24g",
name="IoT WIFI 2.4G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='iot_2g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_IOT_2G, value),
),
TPLinkRouterSwitchEntityDescription(
key="iot_5g",
name="IoT WIFI 5G",
icon="mdi:wifi",
entity_category=EntityCategory.CONFIG,
property='iot_5g_enable',
method=lambda coordinator, value: coordinator.set_wifi(Wifi.WIFI_IOT_5G, value),
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None: | coordinator = hass.data[DOMAIN][entry.entry_id] | 0 | 2023-11-09 17:38:33+00:00 | 2k |
DaveParr/starpilot | tests/test_utils.py | [
{
"identifier": "get_repo_contents",
"path": "starpilot/utils/utils.py",
"snippet": "def get_repo_contents(\n repos: List[Repository], g: Github, include_readmes: bool = False\n) -> List[Dict]:\n repo_infos = []\n for repo in track(repos, description=\"Reading the stars...\"):\n repo_info = {}\n repo_slug = repo.full_name\n repo_info[\"id\"] = repo_slug\n repo_info[\"name\"] = repo.name\n repo_info[\"url\"] = repo.html_url\n\n if (owner := repo.owner.name) is not None:\n repo_info[\"owner\"] = owner\n\n if (repo.organization) is not None:\n if (organization := repo.organization.name) is not None:\n repo_info[\"organization\"] = organization\n else:\n logger.info(\"No organization name\", repo=repo_slug)\n\n # get the repo languages\n repo_info[\"languages\"] = []\n for language in repo.get_languages():\n repo_info[\"languages\"].append(language)\n\n if len(repo_info[\"languages\"]) == 0:\n logger.info(\"No languages\", repo=repo_slug)\n\n if (description := repo.description) is not None:\n repo_info[\"description\"] = description\n else:\n logger.info(\"No description\", repo=repo_slug)\n\n if not (topics := repo.get_topics()) == []:\n repo_info[\"topics\"] = topics\n\n if include_readmes:\n repo_info[\"readme\"] = {}\n try:\n readme = repo.get_contents(\"README.md\")\n repo_info[\"readme\"][\"type\"] = \"md\"\n repo_info[\"readme\"][\"content\"] = readme.decoded_content.decode(\"utf-8\")\n except UnknownObjectException:\n try:\n readme = repo.get_contents(\"README.rst\")\n repo_info[\"readme\"][\"type\"] = \"rst\"\n repo_info[\"readme\"][\"content\"] = readme.decoded_content.decode(\n \"utf-8\"\n )\n except UnknownObjectException:\n continue\n\n repo_info[\"vectorstore_document\"] = []\n\n # use description as main content, and include topics and languages only if present\n\n if repo_info.get(\"description\"):\n content = {\n \"name\": repo_info.get(\"name\"),\n \"description\": repo_info.get(\"description\"),\n }\n if repo_info.get(\"topics\"):\n content[\"topics\"] = repo_info.get(\"topics\")\n if repo_info.get(\"languages\"):\n content[\"languages\"] = repo_info.get(\"languages\")\n\n repo_info[\"vectorstore_document\"].append(\n {\n # use description as content, and topics and languages if present\n \"content\": content,\n \"url\": repo_info.get(\"url\"),\n \"description\": repo_info.get(\"description\"),\n \"name\": repo_info.get(\"name\"),\n \"topics\": repo_info.get(\"topics\"),\n \"languages\": repo_info.get(\"languages\"),\n }\n )\n\n repo_infos.append(repo_info)\n logger.debug(\"Using repo\", repo=repo_slug)\n else:\n logger.warning(\"Repo has no relevant information to use\", repo=repo_slug)\n\n return repo_infos"
},
{
"identifier": "get_user_starred_repos",
"path": "starpilot/utils/utils.py",
"snippet": "def get_user_starred_repos(\n user: str, g: Github, num_repos: Optional[int] = None\n) -> List[Repository]:\n \"\"\"\n Get the starred repos for a user\n\n If there is no github api key set, this will start to work, but will be rapidly rate limited.\n \"\"\"\n starred_repos = []\n for repo in track(\n g.get_user(user).get_starred(), description=\"Spotting the stars...\"\n ):\n starred_repos.append(repo)\n\n # IDEA: there could be a threshold for star count below which repos are removed\n starred_repos.sort(key=lambda repo: repo.stargazers_count, reverse=True)\n\n if num_repos is not None:\n starred_repos = starred_repos[:num_repos]\n\n return starred_repos"
}
] | from unittest.mock import Mock
from starpilot.utils.utils import get_repo_contents, get_user_starred_repos
import pytest
import os
import github | 1,103 |
def test_get_user_starred_repos_mocked():
# Mock the necessary objects
class MockRepo:
def __init__(self, stargazers_count):
self.stargazers_count = stargazers_count
class MockUser:
def get_starred(self):
return [MockRepo(10), MockRepo(5), MockRepo(8), MockRepo(3), MockRepo(7)]
class MockGithub:
def get_user(self, user):
return MockUser()
# Call the function under test
|
def test_get_user_starred_repos_mocked():
# Mock the necessary objects
class MockRepo:
def __init__(self, stargazers_count):
self.stargazers_count = stargazers_count
class MockUser:
def get_starred(self):
return [MockRepo(10), MockRepo(5), MockRepo(8), MockRepo(3), MockRepo(7)]
class MockGithub:
def get_user(self, user):
return MockUser()
# Call the function under test | result = get_user_starred_repos("testuser", MockGithub(), num_repos=3) | 1 | 2023-11-07 20:03:08+00:00 | 2k |
xarray-contrib/xdggs | xdggs/h3.py | [
{
"identifier": "DGGSIndex",
"path": "xdggs/index.py",
"snippet": "class DGGSIndex(Index):\n _dim: str\n _pd_index: PandasIndex\n\n def __init__(self, cell_ids: Any | PandasIndex, dim: str):\n self._dim = dim\n\n if isinstance(cell_ids, PandasIndex):\n self._pd_index = cell_ids\n else:\n self._pd_index = PandasIndex(cell_ids, dim)\n\n @classmethod\n def from_variables(\n cls: type[\"DGGSIndex\"],\n variables: Mapping[Any, xr.Variable],\n *,\n options: Mapping[str, Any],\n ) -> \"DGGSIndex\":\n _, var, _ = _extract_cell_id_variable(variables)\n\n grid_name = var.attrs[\"grid_name\"]\n cls = GRID_REGISTRY[grid_name]\n\n return cls.from_variables(variables, options=options)\n\n def create_variables(\n self, variables: Mapping[Any, xr.Variable] | None = None\n ) -> dict[Hashable, xr.Variable]:\n return self._pd_index.create_variables(variables)\n\n def isel(\n self: \"DGGSIndex\", indexers: Mapping[Any, int | np.ndarray | xr.Variable]\n ) -> Union[\"DGGSIndex\", None]:\n new_pd_index = self._pd_index.isel(indexers)\n if new_pd_index is not None:\n return self._replace(new_pd_index)\n else:\n return None\n\n def sel(self, labels, method=None, tolerance=None):\n if method == \"nearest\":\n raise ValueError(\"finding nearest grid cell has no meaning\")\n return self._pd_index.sel(labels, method=method, tolerance=tolerance)\n\n def _replace(self, new_pd_index: PandasIndex):\n raise NotImplementedError()\n\n def _latlon2cellid(self, lat: Any, lon: Any) -> np.ndarray:\n \"\"\"convert latitude / longitude points to cell ids.\"\"\"\n raise NotImplementedError()\n\n def _cellid2latlon(self, cell_ids: Any) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"convert cell ids to latitude / longitude (cell centers).\"\"\"\n raise NotImplementedError()\n\n @property\n def cell_centers(self) -> tuple[np.ndarray, np.ndarray]:\n return self._cellid2latlon(self._pd_index.index.values)"
},
{
"identifier": "_extract_cell_id_variable",
"path": "xdggs/utils.py",
"snippet": "def _extract_cell_id_variable(variables):\n # TODO: only one variable supported (raise otherwise)\n name, var = next(iter(variables.items()))\n\n # TODO: only 1-d variable supported (raise otherwise)\n dim = next(iter(var.dims))\n\n return name, var, dim"
},
{
"identifier": "register_dggs",
"path": "xdggs/utils.py",
"snippet": "def register_dggs(name):\n def inner(cls):\n GRID_REGISTRY[name] = cls\n return cls\n\n return inner"
}
] | from collections.abc import Mapping
from typing import Any
from h3ronpy.arrow.vector import cells_to_coordinates, coordinates_to_cells
from xarray.indexes import PandasIndex
from xdggs.index import DGGSIndex
from xdggs.utils import _extract_cell_id_variable, register_dggs
import numpy as np
import xarray as xr | 883 |
@register_dggs("h3")
class H3Index(DGGSIndex):
_resolution: int
def __init__(
self,
cell_ids: Any | PandasIndex,
dim: str,
resolution: int,
):
super().__init__(cell_ids, dim)
self._resolution = int(resolution)
@classmethod
def from_variables(
cls: type["H3Index"],
variables: Mapping[Any, xr.Variable],
*,
options: Mapping[str, Any],
) -> "H3Index":
|
@register_dggs("h3")
class H3Index(DGGSIndex):
_resolution: int
def __init__(
self,
cell_ids: Any | PandasIndex,
dim: str,
resolution: int,
):
super().__init__(cell_ids, dim)
self._resolution = int(resolution)
@classmethod
def from_variables(
cls: type["H3Index"],
variables: Mapping[Any, xr.Variable],
*,
options: Mapping[str, Any],
) -> "H3Index": | _, var, dim = _extract_cell_id_variable(variables) | 1 | 2023-11-06 16:11:15+00:00 | 2k |
ApolloAuto/apollo-model-centerpoint | paddle3d/utils/checkpoint.py | [
{
"identifier": "PRETRAINED_HOME",
"path": "paddle3d/env.py",
"snippet": "PRETRAINED_HOME = get_sub_home('pretrained')"
},
{
"identifier": "TMP_HOME",
"path": "paddle3d/env.py",
"snippet": "TMP_HOME = get_sub_home('tmp')"
},
{
"identifier": "download_with_progress",
"path": "paddle3d/utils/download.py",
"snippet": "def download_with_progress(url: str,\n path: str = None) -> Generator[str, int, int]:\n '''Download a file and return the downloading progress -> Generator[filename, download_size, total_size]\n\n Args:\n url (str) : url to be downloaded\n path (str, optional) : path to store downloaded products, default is current work directory\n\n Examples:\n .. code-block:: python\n url = 'https://xxxxx.xx/xx.tar.gz'\n for filename, download_size, total_szie in download_with_progress(url, path='./output'):\n print(filename, download_size, total_size)\n '''\n path = os.getcwd() if not path else path\n if not os.path.exists(path):\n os.makedirs(path)\n\n parse_result = urlparse(url)\n savename = parse_result.path.split('/')[-1]\n savename = os.path.join(path, savename)\n\n res = requests.get(url, stream=True)\n download_size = 0\n total_size = int(res.headers.get('content-length'))\n with open(savename, 'wb') as _file:\n for data in res.iter_content(chunk_size=4096):\n _file.write(data)\n download_size += len(data)\n yield savename, download_size, total_size"
},
{
"identifier": "logger",
"path": "paddle3d/utils/logger.py",
"snippet": "class Logger(object):\nclass ProgressBar(object):\n def __init__(self, name: str = None):\n def format(self):\n def disable(self):\n def enable(self):\n def enabled(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, flush_interval: float = 0.1):\n def _printer():\n def progressbar(self, msg: str, flush_interval: float = 0.1):\n def range(self, stop: int, msg: str):\n def enumerate(self, iterable: Iterable, msg: str):\n def __init__(self, logger: Logger, flush_interval: float = 0.1):\n def update(self, progress: float):"
},
{
"identifier": "unarchive_with_progress",
"path": "paddle3d/utils/xarfile.py",
"snippet": "def unarchive_with_progress(name: str, path: str) -> Generator[str, int, int]:\n '''\n Unarchive a file and return the unarchiving progress -> Generator[filename, extrace_size, total_size]\n Args:\n name(str) : file or directory name to be unarchived\n path(str) : storage name of archive file\n Examples:\n .. code-block:: python\n unarchive_path = 'test.tar.gz'\n for filename, extract_size, total_szie in unarchive_with_progress(unarchive_path, path='./output'):\n print(filename, extract_size, total_size)\n '''\n with open(name, mode='r') as file:\n total_size = extract_size = 0\n for filename in file.getnames():\n total_size += file.getxarinfo(filename).size\n\n for filename in file.getnames():\n file.extract(filename, path)\n extract_size += file.getxarinfo(filename).size\n yield filename, extract_size, total_size"
}
] | import os
import filelock
import paddle
from typing import Union
from urllib.parse import unquote, urlparse
from paddle3d.env import PRETRAINED_HOME, TMP_HOME
from paddle3d.utils.download import download_with_progress
from paddle3d.utils.logger import logger
from paddle3d.utils.xarfile import unarchive_with_progress | 1,120 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def load_pretrained_model_from_url(model: paddle.nn.Layer,
url: str,
overwrite: bool = False):
"""
"""
pretrained_model = unquote(url)
savename = pretrained_model.split('/')[-1]
savedir = os.path.join(PRETRAINED_HOME, savename.split('.')[0])
os.makedirs(savedir, exist_ok=True)
savepath = os.path.join(savedir, savename)
if os.path.exists(savepath) and not overwrite:
| # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def load_pretrained_model_from_url(model: paddle.nn.Layer,
url: str,
overwrite: bool = False):
"""
"""
pretrained_model = unquote(url)
savename = pretrained_model.split('/')[-1]
savedir = os.path.join(PRETRAINED_HOME, savename.split('.')[0])
os.makedirs(savedir, exist_ok=True)
savepath = os.path.join(savedir, savename)
if os.path.exists(savepath) and not overwrite: | logger.warning( | 3 | 2023-11-08 07:08:03+00:00 | 2k |
camlsys/fl-project-template | project/task/default/train_test.py | [
{
"identifier": "ClientConfig",
"path": "project/client/client.py",
"snippet": "class ClientConfig(BaseModel):\n \"\"\"Fit/eval config, allows '.' member acces and static checking.\n\n Used to check weather each component has its own independent config present. Each\n component should then use its own Pydantic model to validate its config. For\n anything extra, use the extra field as a simple dict.\n \"\"\"\n\n # Instantiate model\n net_config: dict\n # Instantiate dataloader\n dataloader_config: dict\n # For train/test\n run_config: dict\n # Additional params used like a Dict\n extra: dict\n\n class Config:\n \"\"\"Setting to allow any types, including library ones like torch.device.\"\"\"\n\n arbitrary_types_allowed = True"
},
{
"identifier": "generic_set_parameters",
"path": "project/fed/utils/utils.py",
"snippet": "def generic_set_parameters(\n net: nn.Module,\n parameters: NDArrays,\n to_copy: bool = True,\n) -> None:\n \"\"\"Set the parameters of a network.\n\n Parameters\n ----------\n net : nn.Module\n The network whose parameters should be set.\n parameters : NDArrays\n The parameters to set.\n to_copy : bool (default=False)\n Whether to copy the parameters or use them directly.\n\n Returns\n -------\n None\n \"\"\"\n params_dict = zip(\n net.state_dict().keys(),\n parameters,\n strict=True,\n )\n state_dict = OrderedDict(\n {k: torch.Tensor(v if not to_copy else v.copy()) for k, v in params_dict},\n )\n net.load_state_dict(state_dict, strict=True)"
},
{
"identifier": "FedDataloaderGen",
"path": "project/types/common.py",
"snippet": ""
},
{
"identifier": "obtain_device",
"path": "project/utils/utils.py",
"snippet": "def obtain_device() -> torch.device:\n \"\"\"Get the device (CPU or GPU) for torch.\n\n Returns\n -------\n torch.device\n The device.\n \"\"\"\n return torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\",\n )"
}
] | from collections.abc import Sized
from pathlib import Path
from typing import cast
from flwr.common import NDArrays
from pydantic import BaseModel
from torch import nn
from torch.utils.data import DataLoader
from project.client.client import ClientConfig
from project.fed.utils.utils import generic_set_parameters
from project.types.common import (
FedDataloaderGen,
FedEvalFN,
NetGen,
OnFitConfigFN,
TestFunc,
)
from project.utils.utils import obtain_device
import torch | 1,246 |
class TrainConfig(BaseModel):
"""Training configuration, allows '.' member acces and static checking.
Guarantees that all necessary components are present, fails early if config is
mismatched to client.
"""
device: torch.device
# epochs: int
# learning_rate: float
class Config:
"""Setting to allow any types, including library ones like torch.device."""
arbitrary_types_allowed = True
def train(
net: nn.Module,
trainloader: DataLoader,
_config: dict,
_working_dir: Path,
) -> tuple[int, dict]:
"""Train the network on the training set.
Parameters
----------
net : nn.Module
The neural network to train.
trainloader : DataLoader
The DataLoader containing the data to train the network on.
_config : Dict
The configuration for the training.
Contains the device, number of epochs and learning rate.
Static type checking is done by the TrainConfig class.
Returns
-------
Tuple[int, Dict]
The number of samples used for training,
the loss, and the accuracy of the input model on the given data.
"""
if len(cast(Sized, trainloader.dataset)) == 0:
raise ValueError(
"Trainloader can't be 0, exiting...",
)
config: TrainConfig = TrainConfig(**_config)
del _config
net.to(config.device)
net.train()
return len(cast(Sized, trainloader.dataset)), {}
class TestConfig(BaseModel):
"""Testing configuration, allows '.' member acces and static checking.
Guarantees that all necessary components are present, fails early if config is
mismatched to client.
"""
device: torch.device
class Config:
"""Setting to allow any types, including library ones like torch.device."""
arbitrary_types_allowed = True
def test(
net: nn.Module,
testloader: DataLoader,
_config: dict,
_working_dir: Path,
) -> tuple[float, int, dict]:
"""Evaluate the network on the test set.
Parameters
----------
net : nn.Module
The neural network to test.
testloader : DataLoader
The DataLoader containing the data to test the network on.
config : Dict
The configuration for the testing.
Contains the device.
Static type checking is done by the TestConfig class.
Returns
-------
Tuple[float, int, float]
The loss, number of test samples,
and the accuracy of the input model on the given data.
"""
if len(cast(Sized, testloader.dataset)) == 0:
raise ValueError(
"Testloader can't be 0, exiting...",
)
config: TestConfig = TestConfig(**_config)
del _config
net.to(config.device)
net.eval()
return (
0.0,
len(cast(Sized, testloader.dataset)),
{},
)
def get_fed_eval_fn(
net_generator: NetGen,
| """Default training and testing functions, local and federated."""
class TrainConfig(BaseModel):
"""Training configuration, allows '.' member acces and static checking.
Guarantees that all necessary components are present, fails early if config is
mismatched to client.
"""
device: torch.device
# epochs: int
# learning_rate: float
class Config:
"""Setting to allow any types, including library ones like torch.device."""
arbitrary_types_allowed = True
def train(
net: nn.Module,
trainloader: DataLoader,
_config: dict,
_working_dir: Path,
) -> tuple[int, dict]:
"""Train the network on the training set.
Parameters
----------
net : nn.Module
The neural network to train.
trainloader : DataLoader
The DataLoader containing the data to train the network on.
_config : Dict
The configuration for the training.
Contains the device, number of epochs and learning rate.
Static type checking is done by the TrainConfig class.
Returns
-------
Tuple[int, Dict]
The number of samples used for training,
the loss, and the accuracy of the input model on the given data.
"""
if len(cast(Sized, trainloader.dataset)) == 0:
raise ValueError(
"Trainloader can't be 0, exiting...",
)
config: TrainConfig = TrainConfig(**_config)
del _config
net.to(config.device)
net.train()
return len(cast(Sized, trainloader.dataset)), {}
class TestConfig(BaseModel):
"""Testing configuration, allows '.' member acces and static checking.
Guarantees that all necessary components are present, fails early if config is
mismatched to client.
"""
device: torch.device
class Config:
"""Setting to allow any types, including library ones like torch.device."""
arbitrary_types_allowed = True
def test(
net: nn.Module,
testloader: DataLoader,
_config: dict,
_working_dir: Path,
) -> tuple[float, int, dict]:
"""Evaluate the network on the test set.
Parameters
----------
net : nn.Module
The neural network to test.
testloader : DataLoader
The DataLoader containing the data to test the network on.
config : Dict
The configuration for the testing.
Contains the device.
Static type checking is done by the TestConfig class.
Returns
-------
Tuple[float, int, float]
The loss, number of test samples,
and the accuracy of the input model on the given data.
"""
if len(cast(Sized, testloader.dataset)) == 0:
raise ValueError(
"Testloader can't be 0, exiting...",
)
config: TestConfig = TestConfig(**_config)
del _config
net.to(config.device)
net.eval()
return (
0.0,
len(cast(Sized, testloader.dataset)),
{},
)
def get_fed_eval_fn(
net_generator: NetGen, | fed_dataloater_generator: FedDataloaderGen, | 2 | 2023-11-08 15:31:44+00:00 | 2k |
alibaba/CloudEval-YAML | evaluate.py | [
{
"identifier": "bleu",
"path": "metrics/bleu.py",
"snippet": "def test(result_str=\"\", reference_str=\"\"):"
},
{
"identifier": "edit_distance",
"path": "metrics/edit_distance.py",
"snippet": "def test(result_str=\"\", reference_str=\"\"):"
},
{
"identifier": "exact_match",
"path": "metrics/exact_match.py",
"snippet": "def test(result_str=\"\", reference_str=\"\"):"
},
{
"identifier": "kv_match",
"path": "metrics/kv_match.py",
"snippet": "def test(result_str=\"\", reference_str=\"\"):"
},
{
"identifier": "kv_wildcard",
"path": "metrics/kv_wildcard.py",
"snippet": "def append_labels_to_keys(kv_labeled_str):\ndef match_dict(target_dict, kv_labeled_dict):\ndef get_leaf_nodes(target_dict):\ndef calc_intersection(target_leaf_nodes, reference_leaf_nodes):\ndef test(result_str=\"\", kv_labeled_str=\"\"):"
},
{
"identifier": "unit_test",
"path": "metrics/unit_test.py",
"snippet": "def run_bash(bash_script, cwd=None):\ndef clean_dir(dir_path):\ndef test(file_content, bash_script, problem_key, config):"
},
{
"identifier": "unit_test_pred",
"path": "metrics/unit_test_pred.py",
"snippet": "def test(result_str=\"\", reference_str=\"\"):"
}
] | import loader
import prompt
import query
import json
import ray
import os
import openai
import time
import importlib
import sys
import random
from tqdm import tqdm
from metrics import bleu, edit_distance, exact_match, kv_match
from metrics import kv_wildcard, unit_test, unit_test_pred | 663 |
metric_map = {
'bleu': bleu,
'edit_distance': edit_distance,
'exact_match': exact_match,
'kv_match': kv_match,
}
def import_module_from_string(module_name, module_code):
module_spec = importlib.util.spec_from_loader(module_name, loader=None)
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_name] = module
exec(module_code, module.__dict__)
return module
def evaluate(config):
problem_keys = json.loads(loader.read_memory(config, 'problem_keys'))
if 'Kubernetes' in config['libs'] and config['metrics']['unit_test']:
# restart minikube
print("Preparing minikube for unit tests ...")
unit_test.run_bash('minikube delete && minikube start && sleep 20')
for problem_key in tqdm(problem_keys, desc='Evaluating'):
reference_code = loader.read_memory_problem(config, problem_key, f'reference_code')
unit_test_code = loader.read_memory_problem(config, problem_key, f'unit_test_code')
if unit_test_code is not None:
unit_test_code = unit_test_code
scores_dict = {}
for metric_name in config['metrics'].keys():
scores_dict[metric_name] = []
for sample_id in range(config['num_samples']):
generated_code = loader.read_memory_problem(config, problem_key, f'generated_code_{sample_id}')
for metric_name in config['metrics'].keys():
if config['metrics'][metric_name]:
# if the metric is enabled
if metric_name in metric_map.keys():
# general scoring
score = metric_map[metric_name].test(generated_code, reference_code)
# problem-specific scoring
elif metric_name == 'kv_wildcard':
try:
|
metric_map = {
'bleu': bleu,
'edit_distance': edit_distance,
'exact_match': exact_match,
'kv_match': kv_match,
}
def import_module_from_string(module_name, module_code):
module_spec = importlib.util.spec_from_loader(module_name, loader=None)
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_name] = module
exec(module_code, module.__dict__)
return module
def evaluate(config):
problem_keys = json.loads(loader.read_memory(config, 'problem_keys'))
if 'Kubernetes' in config['libs'] and config['metrics']['unit_test']:
# restart minikube
print("Preparing minikube for unit tests ...")
unit_test.run_bash('minikube delete && minikube start && sleep 20')
for problem_key in tqdm(problem_keys, desc='Evaluating'):
reference_code = loader.read_memory_problem(config, problem_key, f'reference_code')
unit_test_code = loader.read_memory_problem(config, problem_key, f'unit_test_code')
if unit_test_code is not None:
unit_test_code = unit_test_code
scores_dict = {}
for metric_name in config['metrics'].keys():
scores_dict[metric_name] = []
for sample_id in range(config['num_samples']):
generated_code = loader.read_memory_problem(config, problem_key, f'generated_code_{sample_id}')
for metric_name in config['metrics'].keys():
if config['metrics'][metric_name]:
# if the metric is enabled
if metric_name in metric_map.keys():
# general scoring
score = metric_map[metric_name].test(generated_code, reference_code)
# problem-specific scoring
elif metric_name == 'kv_wildcard':
try: | score = kv_wildcard.test(generated_code, reference_code) | 4 | 2023-11-08 08:13:39+00:00 | 2k |
KAIST-AILab/palr | rlkit/torch/sac/sac.py | [
{
"identifier": "LossFunction",
"path": "rlkit/core/loss.py",
"snippet": "class LossFunction(object, metaclass=abc.ABCMeta):\n def compute_loss(self, batch, skip_statistics=False, **kwargs):"
},
{
"identifier": "create_stats_ordered_dict",
"path": "rlkit/core/eval_util.py",
"snippet": "def create_stats_ordered_dict(\n name,\n data,\n stat_prefix=None,\n always_show_all_stats=True,\n exclude_max_min=False,\n):\n if stat_prefix is not None:\n name = \"{}{}\".format(stat_prefix, name)\n if isinstance(data, Number):\n return OrderedDict({name: data})\n\n if len(data) == 0:\n return OrderedDict()\n\n if isinstance(data, tuple):\n ordered_dict = OrderedDict()\n for number, d in enumerate(data):\n sub_dict = create_stats_ordered_dict(\n \"{0}_{1}\".format(name, number),\n d,\n )\n ordered_dict.update(sub_dict)\n return ordered_dict\n\n if isinstance(data, list):\n try:\n iter(data[0])\n except TypeError:\n pass\n else:\n data = np.concatenate(data)\n\n if (isinstance(data, np.ndarray) and data.size == 1\n and not always_show_all_stats):\n return OrderedDict({name: float(data)})\n\n stats = OrderedDict([\n (name + ' Mean', np.mean(data)),\n (name + ' Std', np.std(data)),\n ])\n if not exclude_max_min:\n stats[name + ' Max'] = np.max(data)\n stats[name + ' Min'] = np.min(data)\n return stats"
},
{
"identifier": "TorchTrainer",
"path": "rlkit/torch/torch_rl_algorithm.py",
"snippet": "class TorchTrainer(Trainer, metaclass=abc.ABCMeta):\n def __init__(self):\n self._num_train_steps = 0\n\n def train(self, np_batch):\n self._num_train_steps += 1\n batch = np_to_pytorch_batch(np_batch)\n self.train_from_torch(batch)\n\n def get_diagnostics(self):\n return OrderedDict([\n ('num train calls', self._num_train_steps),\n ])\n\n @abc.abstractmethod\n def train_from_torch(self, batch):\n pass\n\n @property\n @abc.abstractmethod\n def networks(self) -> Iterable[nn.Module]:\n pass"
},
{
"identifier": "add_prefix",
"path": "rlkit/core/logging.py",
"snippet": "def add_prefix(log_dict: OrderedDict, prefix: str, divider=''):\n with_prefix = OrderedDict()\n for key, val in log_dict.items():\n with_prefix[prefix + divider + key] = val\n return with_prefix"
},
{
"identifier": "MINE_DV",
"path": "mine/mine.py",
"snippet": "class MINE_DV(MINE_Base):\n _ANNEAL_PERIOD = 0\n _EMA_ANNEAL_PERIOD = 0\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._decay = 0.994 # decay for ema (not tuned)\n self._ema = None\n\n def _update_ema(self, t_margin):\n with torch.no_grad():\n exp_t = t_margin.exp().mean(dim=0)\n if self._ema is not None:\n self._ema = self._decay * self._ema + (1-self._decay) * exp_t\n else:\n self._ema = exp_t\n\n def get_mi_bound(self, x, z, z_margin=None, update_ema=False):\n t_joint = self._T(x, z).mean(dim=0)\n if z_margin is not None:\n t_margin = self._T(x, z_margin)\n else:\n t_margin = self._T(x, z[torch.randperm(x.shape[0])])\n # maintain an exponential moving average of exp_t under the marginal distribution\n # done to reduce bias in the estimator\n if ((self.variant == 'unbiased' and update_ema) and\n self._current_epoch > self._EMA_ANNEAL_PERIOD):\n self._update_ema(t_margin)\n # Calculate biased or unbiased estimate\n if self.variant == 'unbiased' and self._current_epoch > self._ANNEAL_PERIOD:\n log_exp_t = UnbiasedLogMeanExp.apply(t_margin, self._ema)\n else:\n log_exp_t = t_margin.logsumexp(\n dim=0).subtract(math.log(x.shape[0]))\n # mi lower bound\n return t_joint - log_exp_t"
}
] | from collections import OrderedDict, namedtuple
from typing import Tuple
from rlkit.core.loss import LossFunction, LossStatistics
from torch import nn as nn
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.core.logging import add_prefix
from mine.mine import MINE_DV
import numpy as np
import torch
import torch.optim as optim
import rlkit.torch.pytorch_util as ptu
import gtimer as gt | 1,192 |
SACLosses = namedtuple(
'SACLosses',
'policy_loss qf1_loss qf2_loss alpha_loss',
)
|
SACLosses = namedtuple(
'SACLosses',
'policy_loss qf1_loss qf2_loss alpha_loss',
)
| class SACTrainer(TorchTrainer, LossFunction): | 0 | 2023-11-06 08:35:34+00:00 | 2k |
JustlfC03/SCUNet-plusplus | trainer.py | [
{
"identifier": "DiceLoss",
"path": "utils.py",
"snippet": "class DiceLoss(nn.Module):\n def __init__(self, n_classes):\n super(DiceLoss, self).__init__()\n self.n_classes = n_classes\n\n def _one_hot_encoder(self, input_tensor):\n tensor_list = []\n for i in range(self.n_classes):\n temp_prob = input_tensor == i\n tensor_list.append(temp_prob.unsqueeze(1))\n output_tensor = torch.cat(tensor_list, dim=1)\n return output_tensor.float()\n\n def _dice_loss(self, score, target):\n target = target.float()\n smooth = 1e-5\n intersect = torch.sum(score * target)\n y_sum = torch.sum(target * target)\n z_sum = torch.sum(score * score)\n loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)\n loss = 1 - loss\n return loss\n\n def forward(self, inputs, target, weight=None, softmax=False):\n if softmax:\n inputs = torch.softmax(inputs, dim=1)\n target = self._one_hot_encoder(target)\n if weight is None:\n weight = [1] * self.n_classes\n assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(),\n target.size())\n class_wise_dice = []\n loss = 0.0\n for i in range(0, self.n_classes):\n dice = self._dice_loss(inputs[:, i], target[:, i])\n class_wise_dice.append(1.0 - dice.item())\n loss += dice * weight[i]\n return loss / self.n_classes"
},
{
"identifier": "test_single_volume",
"path": "utils.py",
"snippet": "def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):\n image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()\n _, x, y = image.shape\n\n if x != patch_size[0] or y != patch_size[1]:\n image = zoom(image, (1, patch_size[0] / x, patch_size[1] / y), order=3)\n input = torch.from_numpy(image).unsqueeze(0).float().cuda()\n net.eval()\n with torch.no_grad():\n out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)\n out = out.cpu().detach().numpy()\n if x != patch_size[0] or y != patch_size[1]:\n prediction = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)\n else:\n prediction = out\n metric_list = []\n for i in range(1, classes):\n metric_list.append(calculate_metric_percase(prediction == i, label == i))\n\n # if test_save_path is not None:\n # prediction = Image.fromarray(np.uint8(prediction)).convert('L')\n # prediction.save(test_save_path + '/' + case + '.png')\n\n if test_save_path is not None:\n a1 = copy.deepcopy(prediction)\n a2 = copy.deepcopy(prediction)\n a3 = copy.deepcopy(prediction)\n a1[a1 == 1] = 0\n a2[a2 == 1] = 255\n a3[a3 == 1] = 0\n a1 = Image.fromarray(np.uint8(a1)).convert('L')\n a2 = Image.fromarray(np.uint8(a2)).convert('L')\n a3 = Image.fromarray(np.uint8(a3)).convert('L')\n prediction = Image.merge('RGB', [a1, a2, a3])\n prediction.save(test_save_path + '/' + case + '.png')\n\n return metric_list"
}
] | import argparse
import logging
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import DiceLoss
from torchvision import transforms
from utils import test_single_volume
from datasets.dataset_synapse import Synapse_dataset, RandomGenerator | 1,250 |
def trainer_synapse(args, model, snapshot_path):
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
max_iterations = args.max_iterations
db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
ce_loss = CrossEntropyLoss()
|
def trainer_synapse(args, model, snapshot_path):
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
max_iterations = args.max_iterations
db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
ce_loss = CrossEntropyLoss() | dice_loss = DiceLoss(num_classes) | 0 | 2023-11-04 11:42:02+00:00 | 2k |
corcel-api/cortex.t | validators/image_validator.py | [
{
"identifier": "get_question",
"path": "template/utils.py",
"snippet": "async def get_question(category, num_questions_needed):\n if category not in [\"text\", \"images\"]:\n raise ValueError(\"Invalid category. Must be 'text' or 'images'.\")\n\n question = await update_counters_and_get_new_list(category, \"questions\", num_questions_needed)\n return question"
},
{
"identifier": "ImageResponse",
"path": "template/protocol.py",
"snippet": "class ImageResponse(bt.Synapse):\n \"\"\" A class to represent the response for an image-related request. \"\"\"\n # https://platform.stability.ai/docs/api-reference#tag/v1generation/operation/textToImage\n\n completion: Optional[Dict] = pydantic.Field(\n None,\n title=\"Completion\",\n description=\"The completion data of the image response.\"\n )\n\n messages: str = pydantic.Field(\n ...,\n title=\"Messages\",\n description=\"Messages related to the image response.\"\n )\n\n provider: str = pydantic.Field(\n default=\"OpenAI\",\n title=\"Provider\",\n description=\"The provider to use when calling for your response.\"\n )\n\n seed: int = pydantic.Field(\n default=1234,\n title=\"Seed\",\n description=\"The seed that which to generate the image with\"\n )\n\n samples: int = pydantic.Field(\n default=1,\n title=\"Samples\",\n description=\"The number of samples to generate\"\n )\n\n cfg_scale: float = pydantic.Field(\n default=8.0,\n title=\"cfg_scale\",\n description=\"The cfg_scale to use for image generation\"\n )\n\n # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m, k_dpmpp_sde)\n sampler: str = pydantic.Field(\n default=\"\",\n title=\"Sampler\",\n description=\"The sampler to use for image generation\"\n )\n\n steps: int = pydantic.Field(\n default=30,\n title=\"Seed\",\n description=\"The steps to take in generating the image\"\n )\n\n model: str = pydantic.Field(\n default=\"dall-e-2\",\n title=\"Model\",\n description=\"The model used for generating the image.\"\n )\n\n style: str = pydantic.Field(\n default=\"vivid\",\n title=\"Style\",\n description=\"The style of the image.\"\n )\n\n size: str = pydantic.Field(\n default=\"1024x1024\",\n title=\"The size of the image, used for Openai generation. Options are 1024x1024, 1792x1024, 1024x1792 for dalle3\",\n description=\"The size of the image.\"\n )\n\n height: int = pydantic.Field(\n default=1024,\n title=\"Height used for non Openai images\",\n description=\"height\"\n )\n\n width: int = pydantic.Field(\n default=1024,\n title=\"Width used for non Openai images\",\n description=\"width\"\n )\n\n quality: str = pydantic.Field(\n default=\"standard\",\n title=\"Quality\",\n description=\"The quality of the image.\"\n )\n\n required_hash_fields: List[str] = pydantic.Field(\n [\"messages\"],\n title=\"Required Hash Fields\",\n description=\"A list of fields required for the hash.\"\n )\n\n def deserialize(self) -> Optional[Dict]:\n \"\"\" Deserialize the completion data of the image response. \"\"\"\n return self.completion"
}
] | import io
import torch
import wandb
import random
import asyncio
import aiohttp
import base64
import traceback
import template.reward
import bittensor as bt
from PIL import Image
from io import BytesIO
from template.utils import get_question
from base_validator import BaseValidator
from template.protocol import ImageResponse | 1,332 |
class ImageValidator(BaseValidator):
def __init__(self, dendrite, config, subtensor, wallet):
super().__init__(dendrite, config, subtensor, wallet, timeout=25)
self.streaming = False
self.query_type = "images"
self.model = "dall-e-2"
self.weight = .5
self.provider = "OpenAI"
self.size = "1024x1024"
self.width = 1024
self.height = 1024
self.quality = "standard"
self.style = "vivid"
self.steps = 30
self.seed = 123456
self.wandb_data = {
"modality": "images",
"prompts": {},
"responses": {},
"images": {},
"scores": {},
"timestamps": {},
}
async def start_query(self, available_uids, metagraph):
try:
query_tasks = []
uid_to_question = {}
# Randomly choose the provider based on specified probabilities
providers = ["OpenAI"] * 6 + ["Stability"] * 4
self.provider = random.choice(providers)
if self.provider == "Stability":
self.seed = random.randint(1000, 1000000)
self.model = "stable-diffusion-xl-1024-v1-0"
elif self.provider == "OpenAI":
self.model = "dall-e-2"
# Query all images concurrently
for uid in available_uids:
messages = await get_question("images", len(available_uids))
uid_to_question[uid] = messages # Store messages for each UID
|
class ImageValidator(BaseValidator):
def __init__(self, dendrite, config, subtensor, wallet):
super().__init__(dendrite, config, subtensor, wallet, timeout=25)
self.streaming = False
self.query_type = "images"
self.model = "dall-e-2"
self.weight = .5
self.provider = "OpenAI"
self.size = "1024x1024"
self.width = 1024
self.height = 1024
self.quality = "standard"
self.style = "vivid"
self.steps = 30
self.seed = 123456
self.wandb_data = {
"modality": "images",
"prompts": {},
"responses": {},
"images": {},
"scores": {},
"timestamps": {},
}
async def start_query(self, available_uids, metagraph):
try:
query_tasks = []
uid_to_question = {}
# Randomly choose the provider based on specified probabilities
providers = ["OpenAI"] * 6 + ["Stability"] * 4
self.provider = random.choice(providers)
if self.provider == "Stability":
self.seed = random.randint(1000, 1000000)
self.model = "stable-diffusion-xl-1024-v1-0"
elif self.provider == "OpenAI":
self.model = "dall-e-2"
# Query all images concurrently
for uid in available_uids:
messages = await get_question("images", len(available_uids))
uid_to_question[uid] = messages # Store messages for each UID
| syn = ImageResponse(messages=messages, model=self.model, size=self.size, quality=self.quality, style=self.style, provider=self.provider, seed=self.seed, steps=self.steps) | 1 | 2023-11-06 10:35:34+00:00 | 2k |
flatypus/flowchat | flowchat/chain.py | [
{
"identifier": "autodedent",
"path": "flowchat/autodedent.py",
"snippet": "def autodedent(*text_lines) -> str:\n \"\"\"Format multiline strings, including with multiple levels of indentation, to align with the first line.\n\n Example:\n\n code = '''\n def add(a, b):\n return a + b\n '''\n\n autodedent(\n \"What does this code do?\",\n code,\n \"Suggest a comment that describes what this code does.\"\n )\n \"\"\"\n text_lines = [i if isinstance(i, str) else str(i) for i in text_lines]\n return dedent('\\n'.join(text_lines)).strip(\"\\n\")"
},
{
"identifier": "_encode_image",
"path": "flowchat/private/_private_helpers.py",
"snippet": "def _encode_image(image, format_type=\"PNG\"):\n buffered = BytesIO()\n image.save(buffered, format=format_type)\n img_str = base64.b64encode(buffered.getvalue())\n return f\"data:image/png;base64,{img_str.decode('utf-8')}\""
}
] | from .autodedent import autodedent
from .private._private_helpers import _encode_image
from retry import retry
from typing import List, TypedDict, Union, Callable, Dict, Literal, Any
from wrapt_timeout_decorator import timeout
import json
import openai
import os
import logging | 1,205 |
logging.basicConfig(level=logging.WARNING,
format='[%(asctime)s] %(levelname)s: %(message)s')
Message = TypedDict('Message', {'role': str, 'content': str | List[Any]})
ResponseFormat = TypedDict(
'ResponseFormat', {'type': Literal['text', 'json_object']})
ImageFormat = TypedDict('ImageFormat', {
'url': str,
'format_type': str,
'detail': Literal['low', 'high']
})
class Chain:
def __init__(self, model: str, api_key: str = None, environ_key="OPENAI_API_KEY"):
super().__init__()
if type(model) is not str:
raise TypeError(
f"Model argument must be a string, not {type(model)}"
)
if api_key is not None and type(api_key) is not str:
raise TypeError(
f"API key argument must be a string, not {type(api_key)}"
)
if type(environ_key) is not str:
raise TypeError(
f"Environment key argument must be a string, not {type(environ_key)}"
)
if api_key is None:
api_key = os.environ.get(environ_key)
if not api_key:
raise ValueError(
"OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, "
"pass in an api_key parameter, or set the environ_key parameter to the environment "
"variable that contains your API key."
)
openai.api_key = api_key
self.model = model
self.system = None
self.user_prompt = []
self.model_response = None
self.prompt_tokens = 0
self.completion_tokens = 0
def _query_api(self, function: callable, *args, max_query_time=None, **kwargs):
"""Call the API for max_query_time seconds, and if it times out, it will retry."""
timeouted_function = timeout(
dec_timeout=max_query_time, use_signals=False)(function)
return timeouted_function(*args, **kwargs)
def _try_query_and_parse(self, function: callable, json_schema, *args, max_query_time=None, **kwargs):
"""Query and try to parse the response, and if it fails, it will retry."""
completion = self._query_api(
function, *args, max_query_time=max_query_time, **kwargs)
if completion is None:
return None
if kwargs.get('stream', False):
return completion
message = completion.choices[0].message.content
if not json_schema is None:
open_bracket = message.find('{')
close_bracket = message.rfind('}')
message = message[open_bracket:close_bracket+1]
try:
message = json.loads(message)
except json.JSONDecodeError:
raise Exception(
"Response was not in the expected JSON format. Please try again. Check that you haven't accidentally lowered the max_tokens parameter so that the response is truncated."
)
self.prompt_tokens += completion.usage.prompt_tokens
self.completion_tokens += completion.usage.completion_tokens
return message
def _ask(
self,
system: Message,
user_messages: List[Message],
json_schema: Any = None,
max_query_time=None,
tries=-1,
**params
):
"""Ask a question to the chatbot with a system prompt and return the response."""
if not user_messages:
return None
messages = [
system,
*user_messages
] if system else user_messages
message = retry(delay=1, logger=logging, tries=tries)(self._try_query_and_parse)(
openai.chat.completions.create,
json_schema=json_schema,
messages=messages,
max_query_time=max_query_time,
**params
)
return message
def _format_images(self, image: str | ImageFormat | Any):
"""Format whatever image format we receive into the specific format that OpenAI's API expects."""
if isinstance(image, str):
return {"url": image}
elif not isinstance(image, dict):
# not string or dict so assume PIL image
# no specific file format, so default to PNG
|
logging.basicConfig(level=logging.WARNING,
format='[%(asctime)s] %(levelname)s: %(message)s')
Message = TypedDict('Message', {'role': str, 'content': str | List[Any]})
ResponseFormat = TypedDict(
'ResponseFormat', {'type': Literal['text', 'json_object']})
ImageFormat = TypedDict('ImageFormat', {
'url': str,
'format_type': str,
'detail': Literal['low', 'high']
})
class Chain:
def __init__(self, model: str, api_key: str = None, environ_key="OPENAI_API_KEY"):
super().__init__()
if type(model) is not str:
raise TypeError(
f"Model argument must be a string, not {type(model)}"
)
if api_key is not None and type(api_key) is not str:
raise TypeError(
f"API key argument must be a string, not {type(api_key)}"
)
if type(environ_key) is not str:
raise TypeError(
f"Environment key argument must be a string, not {type(environ_key)}"
)
if api_key is None:
api_key = os.environ.get(environ_key)
if not api_key:
raise ValueError(
"OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, "
"pass in an api_key parameter, or set the environ_key parameter to the environment "
"variable that contains your API key."
)
openai.api_key = api_key
self.model = model
self.system = None
self.user_prompt = []
self.model_response = None
self.prompt_tokens = 0
self.completion_tokens = 0
def _query_api(self, function: callable, *args, max_query_time=None, **kwargs):
"""Call the API for max_query_time seconds, and if it times out, it will retry."""
timeouted_function = timeout(
dec_timeout=max_query_time, use_signals=False)(function)
return timeouted_function(*args, **kwargs)
def _try_query_and_parse(self, function: callable, json_schema, *args, max_query_time=None, **kwargs):
"""Query and try to parse the response, and if it fails, it will retry."""
completion = self._query_api(
function, *args, max_query_time=max_query_time, **kwargs)
if completion is None:
return None
if kwargs.get('stream', False):
return completion
message = completion.choices[0].message.content
if not json_schema is None:
open_bracket = message.find('{')
close_bracket = message.rfind('}')
message = message[open_bracket:close_bracket+1]
try:
message = json.loads(message)
except json.JSONDecodeError:
raise Exception(
"Response was not in the expected JSON format. Please try again. Check that you haven't accidentally lowered the max_tokens parameter so that the response is truncated."
)
self.prompt_tokens += completion.usage.prompt_tokens
self.completion_tokens += completion.usage.completion_tokens
return message
def _ask(
self,
system: Message,
user_messages: List[Message],
json_schema: Any = None,
max_query_time=None,
tries=-1,
**params
):
"""Ask a question to the chatbot with a system prompt and return the response."""
if not user_messages:
return None
messages = [
system,
*user_messages
] if system else user_messages
message = retry(delay=1, logger=logging, tries=tries)(self._try_query_and_parse)(
openai.chat.completions.create,
json_schema=json_schema,
messages=messages,
max_query_time=max_query_time,
**params
)
return message
def _format_images(self, image: str | ImageFormat | Any):
"""Format whatever image format we receive into the specific format that OpenAI's API expects."""
if isinstance(image, str):
return {"url": image}
elif not isinstance(image, dict):
# not string or dict so assume PIL image
# no specific file format, so default to PNG | return {"url": _encode_image(image, "PNG")} | 1 | 2023-11-08 00:45:21+00:00 | 2k |
WHU-USI3DV/PatchAugNet | place_recognition/Minkloc3D_V2/misc/utils.py | [
{
"identifier": "PolarQuantizer",
"path": "place_recognition/Minkloc3D_V2/misc/quantization.py",
"snippet": "class PolarQuantizer(Quantizer):\n def __init__(self, quant_step: List[float]):\n assert len(quant_step) == 3, '3 quantization steps expected: for sector (in degrees), ring and z-coordinate (in meters)'\n self.quant_step = torch.tensor(quant_step, dtype=torch.float)\n self.theta_range = int(360. // self.quant_step[0])\n self.quant_step = torch.tensor(quant_step, dtype=torch.float)\n\n def __call__(self, pc):\n # Convert to polar coordinates and quantize with different step size for each coordinate\n # pc: (N, 3) point cloud with Cartesian coordinates (X, Y, Z)\n assert pc.shape[1] == 3\n\n # theta is an angle in degrees in 0..360 range\n theta = 180. + torch.atan2(pc[:, 1], pc[:, 0]) * 180./np.pi\n # dist is a distance from a coordinate origin\n dist = torch.sqrt(pc[:, 0]**2 + pc[:, 1]**2)\n z = pc[:, 2]\n polar_pc = torch.stack([theta, dist, z], dim=1)\n # Scale each coordinate so after quantization with step 1. we got the required quantization step in each dim\n polar_pc = polar_pc / self.quant_step\n quantized_polar_pc, ndx = ME.utils.sparse_quantize(polar_pc, quantization_size=1., return_index=True)\n # Return quantized coordinates and indices of selected elements\n return quantized_polar_pc, ndx"
},
{
"identifier": "CartesianQuantizer",
"path": "place_recognition/Minkloc3D_V2/misc/quantization.py",
"snippet": "class CartesianQuantizer(Quantizer):\n def __init__(self, quant_step: float):\n self.quant_step = quant_step\n\n def __call__(self, pc):\n # Converts to polar coordinates and quantizes with different step size for each coordinate\n # pc: (N, 3) point cloud with Cartesian coordinates (X, Y, Z)\n assert pc.shape[1] == 3\n quantized_pc, ndx = ME.utils.sparse_quantize(pc, quantization_size=self.quant_step, return_index=True)\n # Return quantized coordinates and index of selected elements\n return quantized_pc, ndx"
}
] | import os
import configparser
import time
import numpy as np
from place_recognition.Minkloc3D_V2.misc.quantization import PolarQuantizer, CartesianQuantizer | 838 | # Warsaw University of Technology
class ModelParams:
def __init__(self, model_params_path):
config = configparser.ConfigParser()
config.read(model_params_path)
params = config['MODEL']
self.model_params_path = model_params_path
self.model = params.get('model')
self.output_dim = params.getint('output_dim', 256) # Size of the final descriptor
#######################################################################
# Model dependent
#######################################################################
self.coordinates = params.get('coordinates', 'polar')
assert self.coordinates in ['polar', 'cartesian'], f'Unsupported coordinates: {self.coordinates}'
if 'polar' in self.coordinates:
# 3 quantization steps for polar coordinates: for sectors (in degrees), rings (in meters) and z coordinate (in meters)
self.quantization_step = tuple([float(e) for e in params['quantization_step'].split(',')])
assert len(self.quantization_step) == 3, f'Expected 3 quantization steps: for sectors (degrees), rings (meters) and z coordinate (meters)'
| # Warsaw University of Technology
class ModelParams:
def __init__(self, model_params_path):
config = configparser.ConfigParser()
config.read(model_params_path)
params = config['MODEL']
self.model_params_path = model_params_path
self.model = params.get('model')
self.output_dim = params.getint('output_dim', 256) # Size of the final descriptor
#######################################################################
# Model dependent
#######################################################################
self.coordinates = params.get('coordinates', 'polar')
assert self.coordinates in ['polar', 'cartesian'], f'Unsupported coordinates: {self.coordinates}'
if 'polar' in self.coordinates:
# 3 quantization steps for polar coordinates: for sectors (in degrees), rings (in meters) and z coordinate (in meters)
self.quantization_step = tuple([float(e) for e in params['quantization_step'].split(',')])
assert len(self.quantization_step) == 3, f'Expected 3 quantization steps: for sectors (degrees), rings (meters) and z coordinate (meters)' | self.quantizer = PolarQuantizer(quant_step=self.quantization_step) | 0 | 2023-11-02 13:52:20+00:00 | 2k |
WeiLab-Biology/DeepProSite | DeepProSite-main/edge_features.py | [
{
"identifier": "gather_edges",
"path": "self_attention.py",
"snippet": "def gather_edges(edges, neighbor_idx):\n # Features [B,N,N,C] at Neighbor indices [B,N,K] => Neighbor features [B,N,K,C]\n neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1))\n edge_features = torch.gather(edges, 2, neighbors)\n return edge_features"
},
{
"identifier": "gather_nodes",
"path": "self_attention.py",
"snippet": "def gather_nodes(nodes, neighbor_idx):\n # Features [B,N,C] at Neighbor indices [B,N,K] => [B,N,K,C]\n # Flatten and expand indices per batch [B,N,K] => [B,NK] => [B,NK,C]\n neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))\n neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n # Gather and re-pack\n neighbor_features = torch.gather(nodes, 1, neighbors_flat)\n neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])\n return neighbor_features"
},
{
"identifier": "Normalize",
"path": "self_attention.py",
"snippet": "class Normalize(nn.Module): \n def __init__(self, features, epsilon=1e-6):\n super(Normalize, self).__init__()\n self.gain = nn.Parameter(torch.ones(features))\n self.bias = nn.Parameter(torch.zeros(features))\n self.epsilon = epsilon\n\n def forward(self, x, dim=-1):\n mu = x.mean(dim, keepdim=True)\n sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)\n gain = self.gain\n bias = self.bias\n # Reshape\n if dim != -1:\n shape = [1] * len(mu.size())\n shape[dim] = self.gain.size()[0]\n gain = gain.view(shape)\n bias = bias.view(shape)\n return gain * (x - mu) / (sigma + self.epsilon) + bias"
}
] | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from self_attention import gather_edges, gather_nodes, Normalize | 850 |
class PositionalEncodings(nn.Module):
def __init__(self, num_embeddings):
super(PositionalEncodings, self).__init__()
self.num_embeddings = num_embeddings
def forward(self, E_idx):
# i-j
N_batch = E_idx.size(0)
N_nodes = E_idx.size(1)
N_neighbors = E_idx.size(2)
ii = torch.arange(N_nodes, dtype=torch.float32).view((1, -1, 1)).cuda()
d = (E_idx.float() - ii).unsqueeze(-1)
# Original Transformer frequencies
frequency = torch.exp(
torch.arange(0, self.num_embeddings, 2, dtype=torch.float32)
* -(np.log(10000.0) / self.num_embeddings)).cuda()
angles = d * frequency.view((1,1,1,-1))
E = torch.cat((torch.cos(angles), torch.sin(angles)), -1)
return E # [N_batch, N_nodes, N_neighbors, num_embeddings]
class EdgeFeatures(nn.Module):
def __init__(self, edge_features, num_positional_embeddings=16,
num_rbf=16, top_k=30, augment_eps=0.):
super(EdgeFeatures, self).__init__()
self.top_k = top_k
self.augment_eps = augment_eps
self.num_rbf = num_rbf
# Positional encoding
self.PE = PositionalEncodings(num_positional_embeddings)
# Embedding and normalization
self.edge_embedding = nn.Linear(num_positional_embeddings + num_rbf + 7, edge_features, bias=True)
|
class PositionalEncodings(nn.Module):
def __init__(self, num_embeddings):
super(PositionalEncodings, self).__init__()
self.num_embeddings = num_embeddings
def forward(self, E_idx):
# i-j
N_batch = E_idx.size(0)
N_nodes = E_idx.size(1)
N_neighbors = E_idx.size(2)
ii = torch.arange(N_nodes, dtype=torch.float32).view((1, -1, 1)).cuda()
d = (E_idx.float() - ii).unsqueeze(-1)
# Original Transformer frequencies
frequency = torch.exp(
torch.arange(0, self.num_embeddings, 2, dtype=torch.float32)
* -(np.log(10000.0) / self.num_embeddings)).cuda()
angles = d * frequency.view((1,1,1,-1))
E = torch.cat((torch.cos(angles), torch.sin(angles)), -1)
return E # [N_batch, N_nodes, N_neighbors, num_embeddings]
class EdgeFeatures(nn.Module):
def __init__(self, edge_features, num_positional_embeddings=16,
num_rbf=16, top_k=30, augment_eps=0.):
super(EdgeFeatures, self).__init__()
self.top_k = top_k
self.augment_eps = augment_eps
self.num_rbf = num_rbf
# Positional encoding
self.PE = PositionalEncodings(num_positional_embeddings)
# Embedding and normalization
self.edge_embedding = nn.Linear(num_positional_embeddings + num_rbf + 7, edge_features, bias=True) | self.norm_edges = Normalize(edge_features) | 2 | 2023-11-04 15:32:31+00:00 | 2k |
gchada/ROAM | real/rail_real_walker/robots/go1_remote.py | [
{
"identifier": "Go1RemoteActionMsg",
"path": "real/rail_real_walker/robots/go1_remote_runner.py",
"snippet": "class Go1RemoteActionMsg:\n target_action : np.ndarray"
},
{
"identifier": "Go1RemoteObservation",
"path": "real/rail_real_walker/robots/go1_remote_runner.py",
"snippet": "class Go1RemoteObservation:\n angular_velocity: np.ndarray\n framequat_wijk: np.ndarray\n roll_pitch_yaw: np.ndarray\n acceleration_local: np.ndarray\n joint_pos: np.ndarray\n joint_vel: np.ndarray\n joint_torques: np.ndarray\n foot_contacts: np.ndarray\n left_joystick : np.ndarray\n right_joystick : np.ndarray\n motor_temperatures : np.ndarray"
},
{
"identifier": "Go1RemoteConfigMsg",
"path": "real/rail_real_walker/robots/go1_remote_runner.py",
"snippet": "class Go1RemoteConfigMsg:\n action_interpolation : bool\n Kp : float\n Kd : float\n ppf : float\n control_timestep : float"
},
{
"identifier": "empty_obs",
"path": "real/rail_real_walker/robots/go1_remote_runner.py",
"snippet": "def empty_obs():\n return Go1RemoteObservation(\n angular_velocity=np.zeros(3),\n framequat_wijk=np.zeros(4),\n roll_pitch_yaw=np.zeros(3),\n acceleration_local=np.zeros(3),\n joint_pos=np.zeros(12),\n joint_vel=np.zeros(12),\n joint_torques=np.zeros(12),\n foot_contacts=np.zeros(4),\n left_joystick=np.zeros(2),\n right_joystick=np.zeros(2),\n motor_temperatures=np.zeros(12)\n )"
},
{
"identifier": "DataPack",
"path": "real/rail_real_walker/robots/go1_remote_runner.py",
"snippet": "class DataPack:\n def __init__(self, deal_command_callback) -> None:\n self.remaining_data = b\"\"\n self.deal_command_callback = deal_command_callback\n \n def feed_data(self, dat: bytes, custom : Any) -> Any:\n self.remaining_data += dat\n return self.stream_decode(custom)\n \n def clear_data(self) -> None:\n self.remaining_data = b\"\"\n\n def stream_decode(self, custom : Any) -> Any:\n decoded = True\n ret = None\n while decoded:\n decoded, ret = self.try_decode(custom)\n return ret\n\n def try_decode(self, custom: Any) -> Tuple[bool, Any]:\n if len(self.remaining_data) < 3:\n return False, None\n command = self.remaining_data[0:1]\n length = int.from_bytes(self.remaining_data[1:3], 'big')\n\n if len(self.remaining_data) < length + 3:\n return False, None\n \n if len(self.remaining_data) < length + 3 + 3:\n last_data = True\n else:\n next_length = int.from_bytes(self.remaining_data[length+3+1:length+3+3], 'big')\n last_data = not len(self.remaining_data) >= (length+3)+3+next_length\n\n data = self.remaining_data[3:length + 3]\n ret = self.deal_command_callback(command, data, last_data, custom)\n self.remaining_data = self.remaining_data[length + 3:]\n return True, ret\n \n @staticmethod\n def encode(command : bytes, data : bytes) -> bytes:\n length = len(data)\n enc = command + length.to_bytes(2, 'big') + data\n return enc"
},
{
"identifier": "REAL_CONTROL_TIMESTEP",
"path": "real/rail_real_walker/robots/go1_remote_runner.py",
"snippet": "REAL_CONTROL_TIMESTEP = 0.005"
}
] | from .go1_remote_runner import Go1RemoteActionMsg, Go1RemoteObservation, Go1RemoteConfigMsg, empty_obs, DataPack, REAL_CONTROL_TIMESTEP
from typing import Optional
from rail_walker_interface import BaseWalker, BaseWalkerWithFootContact, BaseWalkerWithJoystick, BaseWalkerWithJointTemperatureSensor, Walker3DVelocityEstimator
from serde.msgpack import from_msgpack, to_msgpack
from serde import SerdeError
from functools import cached_property
import numpy as np
import time
import socket
import msgpack.exceptions
import unitree_go1_wrapper.go1_constants as go1_constants
import transforms3d as tr3d
import errno
import traceback | 1,273 |
class Go1RealWalkerRemote(BaseWalker[Go1RemoteObservation], BaseWalkerWithFootContact, BaseWalkerWithJoystick, BaseWalkerWithJointTemperatureSensor):
def __init__(
self,
velocity_estimator: Walker3DVelocityEstimator,
power_protect_factor : float = 0.5,
foot_contact_threshold: np.ndarray = np.array([80, 170, 170, 170]),
action_interpolation: bool = True,
name: Optional[str] = "robot",
Kp: float = 40,
Kd: float = 5,
control_timestep : float = 0.05,
force_real_control_timestep : bool = True,
limit_action_range : float = 1.0,
server_addr = ("192.168.123.161",6001)
):
assert control_timestep >= 0.02
assert power_protect_factor > 0.0 and power_protect_factor <= 1.0
# Init communication with the robot
self.socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_client.setblocking(True)
self.socket_client.settimeout(0.5)
self.socket_client.connect(server_addr)
self.socket_client.setblocking(False)
self._server_addr = server_addr
|
class Go1RealWalkerRemote(BaseWalker[Go1RemoteObservation], BaseWalkerWithFootContact, BaseWalkerWithJoystick, BaseWalkerWithJointTemperatureSensor):
def __init__(
self,
velocity_estimator: Walker3DVelocityEstimator,
power_protect_factor : float = 0.5,
foot_contact_threshold: np.ndarray = np.array([80, 170, 170, 170]),
action_interpolation: bool = True,
name: Optional[str] = "robot",
Kp: float = 40,
Kd: float = 5,
control_timestep : float = 0.05,
force_real_control_timestep : bool = True,
limit_action_range : float = 1.0,
server_addr = ("192.168.123.161",6001)
):
assert control_timestep >= 0.02
assert power_protect_factor > 0.0 and power_protect_factor <= 1.0
# Init communication with the robot
self.socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_client.setblocking(True)
self.socket_client.settimeout(0.5)
self.socket_client.connect(server_addr)
self.socket_client.setblocking(False)
self._server_addr = server_addr | self.data_pack = DataPack(self.deal_with_data) | 4 | 2023-11-02 23:21:38+00:00 | 2k |
NUCCASJNR/PaystackPyAPI | paystackpyAPI/transaction.py | [
{
"identifier": "PaystackAPI",
"path": "paystackpyAPI/base.py",
"snippet": "class PaystackAPI:\n \n def __init__(self, api_key: str) -> None:\n self.api_key = api_key"
},
{
"identifier": "APIError",
"path": "errors.py",
"snippet": "class APIError(PaystackError):\n \"\"\"Exception raised for errors in the Paystack API.\n\n Attributes:\n status_code -- the HTTP status code indicating the error\n error_message -- a description of the error\n \"\"\"\n\n def __init__(self, status_code, error_message):\n self.status_code = status_code\n self.error_message = error_message\n super().__init__(self.error_message)"
}
] | import requests
import datetime
import webbrowser
from .base import PaystackAPI
from typing import Dict, Union
from errors import APIError
from decimal import Decimal | 681 | #!/usr/bin/env python3
"""Handles All Paystack related tasks"""
class Transaction(PaystackAPI):
INITIALIZATION_OPTIONAL_PARAMS = [
"currency",
"reference",
"callback_url",
"plan",
"invoice_limit",
"metadata",
"channels",
"split_code",
"subaccount",
"transaction_charge",
"bearer"
]
TRANSACTION_LIST_OPTIONAL_PARAMS = [
"customer",
"terminalid",
"status",
"from",
"to",
"amount"
]
CHARGE_AUTHORIZATION_OPTIONAL_PARAMS = [
"reference",
"currency",
"metadata",
"channels",
"subaccount",
"transaction_charge",
"bearer",
"queue"
]
EXPORT_OPTIONAL_PARAMS = [
'from',
'to',
'customer',
'status',
'currency',
'amount',
'settled',
'settlement',
'payment_page'
]
def __init__(self, api_key: str):
super().__init__(api_key)
self.paystack_initialization_url = "https://api.paystack.co/transaction/initialize"
self.paystack_verification_url = "https://api.paystack.co/transaction/verify"
self.list_transaction_url = "https://api.paystack.co/transaction"
self.fetch_transaction_url = "https://api.paystack.co/transaction"
self.charge_authorization_url = "https://api.paystack.co/transaction/charge_authorization"
self.transaction_timeline_url = "https://api.paystack.co/transaction/timeline"
self.transaction_totals_url = "https://api.paystack.co/transaction/totals"
self.export_transactions_url = "https://api.paystack.co/transaction/export"
def initialize_transaction(self, email: str, amount: int, **kwargs):
"""
Initialize a Paystack transaction.
:param email: Customer's email address.
:param amount: Transaction amount.
:param kwargs: Optional parameters for the transaction.
Example: `currency`, `callback_url`, etc.
:return: JSON response from Paystack API.
:raises APIError: If required parameters are missing or the API key is invalid.
"""
if not email or not amount:
| #!/usr/bin/env python3
"""Handles All Paystack related tasks"""
class Transaction(PaystackAPI):
INITIALIZATION_OPTIONAL_PARAMS = [
"currency",
"reference",
"callback_url",
"plan",
"invoice_limit",
"metadata",
"channels",
"split_code",
"subaccount",
"transaction_charge",
"bearer"
]
TRANSACTION_LIST_OPTIONAL_PARAMS = [
"customer",
"terminalid",
"status",
"from",
"to",
"amount"
]
CHARGE_AUTHORIZATION_OPTIONAL_PARAMS = [
"reference",
"currency",
"metadata",
"channels",
"subaccount",
"transaction_charge",
"bearer",
"queue"
]
EXPORT_OPTIONAL_PARAMS = [
'from',
'to',
'customer',
'status',
'currency',
'amount',
'settled',
'settlement',
'payment_page'
]
def __init__(self, api_key: str):
super().__init__(api_key)
self.paystack_initialization_url = "https://api.paystack.co/transaction/initialize"
self.paystack_verification_url = "https://api.paystack.co/transaction/verify"
self.list_transaction_url = "https://api.paystack.co/transaction"
self.fetch_transaction_url = "https://api.paystack.co/transaction"
self.charge_authorization_url = "https://api.paystack.co/transaction/charge_authorization"
self.transaction_timeline_url = "https://api.paystack.co/transaction/timeline"
self.transaction_totals_url = "https://api.paystack.co/transaction/totals"
self.export_transactions_url = "https://api.paystack.co/transaction/export"
def initialize_transaction(self, email: str, amount: int, **kwargs):
"""
Initialize a Paystack transaction.
:param email: Customer's email address.
:param amount: Transaction amount.
:param kwargs: Optional parameters for the transaction.
Example: `currency`, `callback_url`, etc.
:return: JSON response from Paystack API.
:raises APIError: If required parameters are missing or the API key is invalid.
"""
if not email or not amount: | raise APIError(400, "Missing required parameters: email and/or amount") | 1 | 2023-11-07 18:00:39+00:00 | 2k |
Dataherald/Assistant | assistant.py | [
{
"identifier": "Function",
"path": "function.py",
"snippet": "class Function(BaseModel, ABC):\n name: str\n description: Optional[str] = None\n parameters: Optional[List[Property]] = None\n\n def to_dict(self):\n if self.parameters is None:\n return {\n \"name\": self.name,\n \"description\": self.description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {},\n \"required\": [],\n },\n }\n return {\n \"name\": self.name,\n \"description\": self.description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n p.name: {\"type\": p.type, \"description\": p.description}\n for p in self.parameters\n },\n \"required\": [p.name for p in self.parameters if p.required],\n },\n }\n\n def run(self, function_call: FunctionCall = None):\n if function_call.arguments == {} and self.parameters is not None:\n raise Exception(\"Missing parameters\")\n if function_call.arguments == {} and self.parameters is None:\n return self.function()\n if function_call.arguments != {} and self.parameters is None:\n raise Exception(\"Unexpected parameters\")\n if function_call is not None and self.parameters is not None:\n for p in self.parameters:\n if p.name not in function_call.arguments and p.required:\n raise Exception(f\"Missing parameter {p.name}\")\n return self.function(**function_call.arguments)\n\n def run_catch_exceptions(self, function_call: FunctionCall = None):\n try:\n return self.run(function_call=function_call)\n except Exception as e:\n return str(e)\n\n @abstractmethod\n def function(self, **kwargs):\n pass"
},
{
"identifier": "FunctionCall",
"path": "function.py",
"snippet": "class FunctionCall(BaseModel):\n call_id: str\n name: str\n arguments: Optional[Dict] = None"
}
] | from openai import OpenAI
from openai import Client
from function import Function, FunctionCall
from openai.types.beta import Thread, Assistant
from openai.types.beta.threads import Run, ThreadMessage
from yaspin import yaspin
import json
import random
import time | 1,288 |
PRINT_COLORS = [
'\033[31m',
'\033[32m',
'\033[33m',
'\033[34m',
'\033[35m',
'\033[36m',
]
class Message:
thread_id: str
role: str
content: str
file_ids: list[str]
def __init__(
self, thread_id: str, role: str, content: str, file_ids: list[str] = None
):
self.thread_id = thread_id
self.role = role
self.content = content
self.file_ids = file_ids
class Conversation:
messages: list[Message]
def __init__(self, messages: list[Message]):
self.messages = messages
def print_conversation(self):
for message in self.messages:
print(f"{message.role}: {message.content}")
class AIAssistant:
assistant: Assistant
client: OpenAI
assistant_name: str
assistant_description: str
instruction: str
model: str
use_retrieval: bool
use_code_interpreter: bool
functions: list[Function]
threads: list[Thread]
tools: list[dict]
file_ids: list[str]
conversation: Conversation
verbose: bool
auto_delete: bool = True
def __init__(
self,
instruction: str,
model: str,
use_retrieval: bool = False,
use_code_interpreter: bool = False,
file_ids: list[str] = None,
functions: list[Function] = None,
assistant_name: str = "AI Assistant",
assistant_description: str = "An AI Assistant",
verbose: bool = False,
auto_delete: bool = True,
):
self.client = Client()
self.instruction = instruction
self.model = model
self.use_retrieval = use_retrieval
self.use_code_interpreter = use_code_interpreter
self.file_ids = file_ids
self.functions = functions
self.assistant_name = assistant_name
self.assistant_description = assistant_description
self.tools = [
{"type": "function", "function": f.to_dict()} for f in self.functions
] if self.functions else []
if self.use_retrieval:
self.tools.append({"type": "retrieval"})
if self.use_code_interpreter:
self.tools.append({"type": "code_interpreter"})
self.assistant = self.client.beta.assistants.create(
name=self.assistant_name,
description=self.assistant_description,
instructions=self.instruction,
model=self.model,
tools=self.tools,
file_ids=self.file_ids if self.file_ids else [],
)
self.threads = []
self.conversation = Conversation(messages=[])
self.verbose = verbose
self.auto_delete = auto_delete
def delete_assistant_file_by_id(self, file_id: str):
file_deletion_status = self.client.beta.assistants.files.delete(
assistant_id=self.assistant.id, file_id=file_id
)
return file_deletion_status
def create_thread(self) -> Thread:
thread = self.client.beta.threads.create()
self.threads.append(thread)
return thread
def create_tool_outputs(self, run: Run) -> list[dict]:
tool_outputs = []
for tool in run.required_action.submit_tool_outputs.tool_calls:
tool_found = False
function_name = tool.function.name
if tool.function.arguments:
function_arguments = json.loads(tool.function.arguments)
else:
function_arguments = {}
call_id = tool.id
|
PRINT_COLORS = [
'\033[31m',
'\033[32m',
'\033[33m',
'\033[34m',
'\033[35m',
'\033[36m',
]
class Message:
thread_id: str
role: str
content: str
file_ids: list[str]
def __init__(
self, thread_id: str, role: str, content: str, file_ids: list[str] = None
):
self.thread_id = thread_id
self.role = role
self.content = content
self.file_ids = file_ids
class Conversation:
messages: list[Message]
def __init__(self, messages: list[Message]):
self.messages = messages
def print_conversation(self):
for message in self.messages:
print(f"{message.role}: {message.content}")
class AIAssistant:
assistant: Assistant
client: OpenAI
assistant_name: str
assistant_description: str
instruction: str
model: str
use_retrieval: bool
use_code_interpreter: bool
functions: list[Function]
threads: list[Thread]
tools: list[dict]
file_ids: list[str]
conversation: Conversation
verbose: bool
auto_delete: bool = True
def __init__(
self,
instruction: str,
model: str,
use_retrieval: bool = False,
use_code_interpreter: bool = False,
file_ids: list[str] = None,
functions: list[Function] = None,
assistant_name: str = "AI Assistant",
assistant_description: str = "An AI Assistant",
verbose: bool = False,
auto_delete: bool = True,
):
self.client = Client()
self.instruction = instruction
self.model = model
self.use_retrieval = use_retrieval
self.use_code_interpreter = use_code_interpreter
self.file_ids = file_ids
self.functions = functions
self.assistant_name = assistant_name
self.assistant_description = assistant_description
self.tools = [
{"type": "function", "function": f.to_dict()} for f in self.functions
] if self.functions else []
if self.use_retrieval:
self.tools.append({"type": "retrieval"})
if self.use_code_interpreter:
self.tools.append({"type": "code_interpreter"})
self.assistant = self.client.beta.assistants.create(
name=self.assistant_name,
description=self.assistant_description,
instructions=self.instruction,
model=self.model,
tools=self.tools,
file_ids=self.file_ids if self.file_ids else [],
)
self.threads = []
self.conversation = Conversation(messages=[])
self.verbose = verbose
self.auto_delete = auto_delete
def delete_assistant_file_by_id(self, file_id: str):
file_deletion_status = self.client.beta.assistants.files.delete(
assistant_id=self.assistant.id, file_id=file_id
)
return file_deletion_status
def create_thread(self) -> Thread:
thread = self.client.beta.threads.create()
self.threads.append(thread)
return thread
def create_tool_outputs(self, run: Run) -> list[dict]:
tool_outputs = []
for tool in run.required_action.submit_tool_outputs.tool_calls:
tool_found = False
function_name = tool.function.name
if tool.function.arguments:
function_arguments = json.loads(tool.function.arguments)
else:
function_arguments = {}
call_id = tool.id | function_call = FunctionCall( | 1 | 2023-11-09 01:58:07+00:00 | 2k |
Skytliang/SpyGame | utils/agent.py | [
{
"identifier": "OutOfQuotaException",
"path": "utils/openai_utils.py",
"snippet": "class OutOfQuotaException(Exception):\n \"Raised when the key exceeded the current quota\"\n def __init__(self, key, cause=None):\n super().__init__(f\"No quota for key: {key}\")\n self.key = key\n self.cause = cause\n\n def __str__(self):\n if self.cause:\n return f\"{super().__str__()}. Caused by {self.cause}\"\n else:\n return super().__str__()"
},
{
"identifier": "AccessTerminatedException",
"path": "utils/openai_utils.py",
"snippet": "class AccessTerminatedException(Exception):\n \"Raised when the key has been terminated\"\n def __init__(self, key, cause=None):\n super().__init__(f\"Access terminated key: {key}\")\n self.key = key\n self.cause = cause\n\n def __str__(self):\n if self.cause:\n return f\"{super().__str__()}. Caused by {self.cause}\"\n else:\n return super().__str__()"
},
{
"identifier": "TimeOutException",
"path": "utils/openai_utils.py",
"snippet": "class TimeOutException(Exception):\n \"Raised when time out error\"\n def __init__(self, cause=None):\n super().__init__(f\"Time Out Error\")\n self.cause = cause\n\n def __str__(self):\n if self.cause:\n return f\"{super().__str__()}. Caused by {self.cause}\"\n else:\n return super().__str__()"
},
{
"identifier": "num_tokens_from_string",
"path": "utils/openai_utils.py",
"snippet": "def valid_location():\n def __init__(self, key, cause=None):\n def __str__(self):\n def __init__(self, key, cause=None):\n def __str__(self):\n def __init__(self, cause=None):\n def __str__(self):\ndef num_tokens_from_string(string: str, model_name: str) -> int:\nclass OutOfQuotaException(Exception):\nclass AccessTerminatedException(Exception):\nclass TimeOutException(Exception):"
}
] | import os
import openai
import backoff
import time
import random
import json
import copy
import numpy as np
from datetime import datetime
from openai.error import RateLimitError, APIError, ServiceUnavailableError, APIConnectionError, AuthenticationError
from utils.openai_utils import OutOfQuotaException, AccessTerminatedException, TimeOutException
from utils.openai_utils import num_tokens_from_string, model2max_context | 1,371 |
# from bardapi import Bard
# import requests
# import torch
# from transformers import AutoTokenizer, AutoModelForCausalLM
# from FastChat.fastchat.model.model_adapter import load_model, get_conversation_template, add_model_args
cycle_all_keys = True
current_path = os.path.abspath(__file__).rsplit('/', 1)[0]
gpt3_api_keys = json.load(open(f'{current_path}/gpt3_apikeys.json'))
gpt4_api_keys = json.load(open(f'{current_path}/gpt4_apikeys.json'))
random.shuffle(gpt3_api_keys)
random.shuffle(gpt4_api_keys)
def cycle_keys(openai_api_keys):
while True:
for key in openai_api_keys:
yield key
gpt3_key_generator = cycle_keys(gpt3_api_keys)
gpt4_key_generator = cycle_keys(gpt4_api_keys)
def key_generator(model_name):
if model_name in ["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "text-davinci-003", "text-davinci-002", "curie", "text-curie-001", "text-babbage-001", "text-ada-001"]:
return gpt3_key_generator
elif model_name in ["gpt-4", "gpt-4-0314", "gpt-4-0613"]:
return gpt4_key_generator
class Agent:
def __init__(self, model_name: str, temperature: float, sleep_time: float = 0) -> None:
"""Create an agent (gpt series by default)
Args:
model_name(str): model name
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
sleep_time (float): sleep against rate limiting
"""
self.model_name = model_name
self.temperature = temperature
self.sleep_time = sleep_time
self.memory_lst = []
self.memory_lst_idx = np.array([])
@backoff.on_exception(backoff.expo, (RateLimitError, APIError, ServiceUnavailableError, APIConnectionError, AuthenticationError), max_tries=5)
def query(self, messages: "list[dict]", max_tokens: int, api_key: str, temperature: float) -> str:
"""make a query
Args:
messages (list[dict]): chat history in turbo format
max_tokens (int): max token in api call
api_key (str): openai api key
temperature (float): sampling temperature
Raises:
OutOfQuotaException: the apikey has out of quota
AccessTerminatedException: the apikey has been ban
Returns:
str: the return msg
"""
# assert self.model_name in support_models, f"Not support {self.model_name}. Choices: {support_models}"
try:
openai.api_base = self.api_base
response = openai.ChatCompletion.create(
model=self.model_name,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=api_key,
presence_penalty=0.75
)
gen = response['choices'][0]['message']['content']
return gen
except RateLimitError as e:
if "You exceeded your current quota, please check your plan and billing details" in e.user_message:
self.openai_api_keys.remove(api_key)
print(f'Out Of Quota: {api_key}')
raise OutOfQuotaException(api_key)
elif "Your access was terminated due to violation of our policies" in e.user_message:
self.openai_api_keys.remove(api_key)
print(f'Access Terminated: {api_key}')
|
# from bardapi import Bard
# import requests
# import torch
# from transformers import AutoTokenizer, AutoModelForCausalLM
# from FastChat.fastchat.model.model_adapter import load_model, get_conversation_template, add_model_args
cycle_all_keys = True
current_path = os.path.abspath(__file__).rsplit('/', 1)[0]
gpt3_api_keys = json.load(open(f'{current_path}/gpt3_apikeys.json'))
gpt4_api_keys = json.load(open(f'{current_path}/gpt4_apikeys.json'))
random.shuffle(gpt3_api_keys)
random.shuffle(gpt4_api_keys)
def cycle_keys(openai_api_keys):
while True:
for key in openai_api_keys:
yield key
gpt3_key_generator = cycle_keys(gpt3_api_keys)
gpt4_key_generator = cycle_keys(gpt4_api_keys)
def key_generator(model_name):
if model_name in ["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "text-davinci-003", "text-davinci-002", "curie", "text-curie-001", "text-babbage-001", "text-ada-001"]:
return gpt3_key_generator
elif model_name in ["gpt-4", "gpt-4-0314", "gpt-4-0613"]:
return gpt4_key_generator
class Agent:
def __init__(self, model_name: str, temperature: float, sleep_time: float = 0) -> None:
"""Create an agent (gpt series by default)
Args:
model_name(str): model name
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
sleep_time (float): sleep against rate limiting
"""
self.model_name = model_name
self.temperature = temperature
self.sleep_time = sleep_time
self.memory_lst = []
self.memory_lst_idx = np.array([])
@backoff.on_exception(backoff.expo, (RateLimitError, APIError, ServiceUnavailableError, APIConnectionError, AuthenticationError), max_tries=5)
def query(self, messages: "list[dict]", max_tokens: int, api_key: str, temperature: float) -> str:
"""make a query
Args:
messages (list[dict]): chat history in turbo format
max_tokens (int): max token in api call
api_key (str): openai api key
temperature (float): sampling temperature
Raises:
OutOfQuotaException: the apikey has out of quota
AccessTerminatedException: the apikey has been ban
Returns:
str: the return msg
"""
# assert self.model_name in support_models, f"Not support {self.model_name}. Choices: {support_models}"
try:
openai.api_base = self.api_base
response = openai.ChatCompletion.create(
model=self.model_name,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=api_key,
presence_penalty=0.75
)
gen = response['choices'][0]['message']['content']
return gen
except RateLimitError as e:
if "You exceeded your current quota, please check your plan and billing details" in e.user_message:
self.openai_api_keys.remove(api_key)
print(f'Out Of Quota: {api_key}')
raise OutOfQuotaException(api_key)
elif "Your access was terminated due to violation of our policies" in e.user_message:
self.openai_api_keys.remove(api_key)
print(f'Access Terminated: {api_key}') | raise AccessTerminatedException(api_key) | 1 | 2023-11-01 03:42:10+00:00 | 2k |
SpectacularAI/point-cloud-tools | formats/auto.py | [
{
"identifier": "load_ply_to_dataframe",
"path": "formats/ply.py",
"snippet": "def load_ply_to_dataframe(ply_file):\n with open(ply_file, 'rb') as f:\n return load_ply_stream_to_dataframe(f)\n load_ply_to_dataframe"
},
{
"identifier": "dataframe_to_ply",
"path": "formats/ply.py",
"snippet": "def dataframe_to_ply(df, output_file):\n header = generate_ply_header(df)\n\n with open(output_file, 'wb') as f:\n f.write(header.encode())\n for _, row in df.iterrows():\n for column in df.columns:\n f.write(struct.pack('f', row[column]))"
},
{
"identifier": "splat_file_to_data_frame",
"path": "formats/splat.py",
"snippet": "def splat_file_to_data_frame(input_file):\n with open(input_file, 'rb') as f:\n return splat_stream_to_data_frame(f)"
},
{
"identifier": "dataframe_to_splat_file",
"path": "formats/splat.py",
"snippet": "def dataframe_to_splat_file(df, fn, **kwargs):\n with open(fn, 'wb') as f:\n dataframe_to_flat_array(df, **kwargs).tofile(f)"
},
{
"identifier": "dataframe_to_pcd",
"path": "formats/pcd.py",
"snippet": "def dataframe_to_pcd(df, out_file, **kwargs):\n with open(out_file, 'wt') as f:\n dataframe_to_pcd_stream(df, f, **kwargs)"
},
{
"identifier": "dataframe_to_gsplat_html",
"path": "formats/html.py",
"snippet": "def dataframe_to_gsplat_html(df, fn, html_template='', **kwargs):\n if len(html_template) == 0:\n html_template = os.path.join(os.path.dirname(__file__), DEFAULT_HTML_TEMPLATE)\n with open(html_template, 'rt') as f:\n html = f.read()\n\n splat_data = dataframe_to_flat_array(df, **kwargs)\n\n html = html.replace('CORS_SUCKS', base64.b64encode(splat_data.tobytes()).decode('utf-8'))\n\n with open(fn, 'wt') as f:\n f.write(html)"
}
] | import pandas as pd
from .ply import load_ply_to_dataframe, dataframe_to_ply
from .splat import splat_file_to_data_frame, dataframe_to_splat_file
from .pcd import dataframe_to_pcd
from .html import dataframe_to_gsplat_html | 673 |
def load_to_dataframe(fn):
ext = fn.split('.')[-1]
if ext == 'ply':
return load_ply_to_dataframe(fn)
elif ext == 'csv':
return pd.read_csv(fn)
elif ext == 'txt':
# assuming COLMAP CSV format
return pd.read_csv(fn, sep=' ', header=None, usecols=list(range(7)), names=['id'] + list('xyzrgb')).set_index('id')
elif ext == 'parquet':
return pd.read_parquet(fn)
elif ext == 'splat':
return splat_file_to_data_frame(fn)
elif ext == 'pcd':
raise RuntimeError("PCD import not implemented")
else:
raise RuntimeError("unrecognized extension ." + ext)
def save_to_dataframe(df, fn, args):
ext = fn.split('.')[-1]
if ext == 'ply':
|
def load_to_dataframe(fn):
ext = fn.split('.')[-1]
if ext == 'ply':
return load_ply_to_dataframe(fn)
elif ext == 'csv':
return pd.read_csv(fn)
elif ext == 'txt':
# assuming COLMAP CSV format
return pd.read_csv(fn, sep=' ', header=None, usecols=list(range(7)), names=['id'] + list('xyzrgb')).set_index('id')
elif ext == 'parquet':
return pd.read_parquet(fn)
elif ext == 'splat':
return splat_file_to_data_frame(fn)
elif ext == 'pcd':
raise RuntimeError("PCD import not implemented")
else:
raise RuntimeError("unrecognized extension ." + ext)
def save_to_dataframe(df, fn, args):
ext = fn.split('.')[-1]
if ext == 'ply': | dataframe_to_ply(df, fn) | 1 | 2023-11-02 14:16:49+00:00 | 2k |
jdelahayes/ha-voltalis | custom_components/voltalis/climate.py | [
{
"identifier": "DEFAULT_MAX_TEMP",
"path": "custom_components/voltalis/const.py",
"snippet": "DEFAULT_MAX_TEMP = 24"
},
{
"identifier": "DEFAULT_MIN_TEMP",
"path": "custom_components/voltalis/const.py",
"snippet": "DEFAULT_MIN_TEMP = 7"
},
{
"identifier": "DOMAIN",
"path": "custom_components/voltalis/const.py",
"snippet": "DOMAIN = \"voltalis\""
},
{
"identifier": "HA_PRESET_MODES",
"path": "custom_components/voltalis/const.py",
"snippet": "HA_PRESET_MODES = {\n \"ECO\": PRESET_ECO,\n \"CONFORT\": PRESET_COMFORT,\n \"TEMPERATURE\": PRESET_HOME,\n \"HORS_GEL\": PRESET_AWAY,\n}"
},
{
"identifier": "VOLTALIS_CONTROLLER",
"path": "custom_components/voltalis/const.py",
"snippet": "VOLTALIS_CONTROLLER = \"voltalis_controller\""
},
{
"identifier": "VOLTALIS_PRESET_MODES",
"path": "custom_components/voltalis/const.py",
"snippet": "VOLTALIS_PRESET_MODES = {\n PRESET_ECO: \"ECO\",\n PRESET_COMFORT: \"CONFORT\",\n PRESET_HOME: \"TEMPERATURE\",\n PRESET_AWAY: \"HORS_GEL\",\n}"
},
{
"identifier": "VOLTALIS_HEATER_TYPE",
"path": "custom_components/voltalis/const.py",
"snippet": "VOLTALIS_HEATER_TYPE = \"HEATER\""
},
{
"identifier": "VoltalisEntity",
"path": "custom_components/voltalis/entity.py",
"snippet": "class VoltalisEntity(CoordinatorEntity):\n \"\"\"Base class for Voltalis entities.\"\"\"\n\n def __init__(\n self,\n coordinator: DataUpdateCoordinator,\n appliance: VoltalisAppliance,\n entity_name,\n ) -> None:\n \"\"\"Initialize the entity.\n\n Given a appliance id and a short name for the entity, we provide basic device\n info, name, unique id, etc. for all derived entities.\n \"\"\"\n super().__init__(coordinator)\n self.appliance = appliance\n self._attr_unique_id = str(appliance.id)\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, str(appliance.id))},\n name=appliance.name.capitalize(),\n manufacturer=appliance.modulatorType,\n model=appliance.applianceType,\n )"
}
] | import logging
from typing import Any
from homeassistant.components.climate import (
ClimateEntity,
ClimateEntityFeature,
HVACAction,
HVACMode,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, UnitOfTemperature
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.unit_conversion import TemperatureConverter
from .const import (
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
DOMAIN,
HA_PRESET_MODES,
VOLTALIS_CONTROLLER,
VOLTALIS_PRESET_MODES,
VOLTALIS_HEATER_TYPE,
)
from .entity import VoltalisEntity | 670 | """Platform for climate integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up climate entity for Voltalis Appliance."""
| """Platform for climate integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up climate entity for Voltalis Appliance.""" | controller = hass.data[DOMAIN][entry.entry_id][VOLTALIS_CONTROLLER] | 4 | 2023-11-01 09:05:17+00:00 | 2k |
r-three/licensed-pile | ubuntu/to-dolma.py | [
{
"identifier": "PermissiveLicenses",
"path": "licensed_pile/licenses.py",
"snippet": "class PermissiveLicenses(StringEnum):\n PD = \"Public Domain\"\n CC0 = \"Creative Commons Zero - Public Domain - https://creativecommons.org/publicdomain/zero/1.0/\"\n CC_BY = (\n \"Creative Commons - Attribution - https://creativecommons.org/licenses/by/4.0/\"\n )\n CC_BY_3 = (\n \"Creative Commons - Attribution - https://creativecommons.org/licenses/by/3.0/\"\n )\n CC_BY_SA = \"Creative Commons - Attribution Share-Alike - https://creativecommons.org/licenses/by-sa/4.0/\"\n CC_BY_SA_3 = \"Creative Commons - Attribution Share-Alike - https://creativecommons.org/licenses/by-sa/3.0/\"\n CC_BY_SA_2_5 = \"Creative Commons - Attribution Share-Alike - https://creativecommons.org/licenses/by-sa/2.5/\"\n GFDL = \"GNU Free Documentation License\"\n APACHE_2 = \"Apache 2 License - https://www.apache.org/licenses/LICENSE-2.0\"\n MIT = \"MIT License\"\n BSD = \"BSD License\"\n\n # TODO: Fill out this function to match in more cases.\n # Note: This kind of function will always be messy and probably require\n # multiple checks that are common across branches. Instead of trying to\n # clean on the implementation, which would get complex (like the compositional\n # solution to fizzbuzz https://themonadreader.files.wordpress.com/2014/04/fizzbuzz.pdf)\n # we should just have a bit of a mess and lots of unittests.\n @classmethod\n def from_string(cls, s: str) -> \"PermissiveLicenses\":\n s = s.lower().strip()\n if re.match(r\".*/publicdomain/zero/1.0/?$\", s):\n return cls.CC0\n if m := re.match(r\".*/licenses/by(?P<share>-sa)?/(?P<version>\\d).0/?$\", s):\n if m.group(\"version\") == \"4\":\n if m.group(\"share\") is None:\n return cls.CC_BY_SA\n return cls.CC_BY\n elif m.group(1) == \"3\":\n if m.group(\"share\") is None:\n return cls.CC_BY_SA_3\n return cls.CC_BY_3\n else:\n raise ValueError(f\"Unable to understand license {s}\")\n raise ValueError(f\"Unable to understand license {s}\")"
},
{
"identifier": "to_dolma",
"path": "licensed_pile/write.py",
"snippet": "def to_dolma(\n examples: Sequence[Dict],\n path: str,\n filename: str,\n shard_size: int = 1,\n quiet: bool = False,\n):\n \"\"\"Write `examples` to `path` in the dolma format with `shard_size`GB shards.\"\"\"\n os.makedirs(path, exist_ok=True)\n shard_idx = 0\n size = 0\n # Gigabytes, not Gibibytes\n max_bytes = shard_size * 1000 * 1000 * 1000\n with ExitStack() as stack:\n wf = stack.enter_context(\n smart_open.open(os.path.join(path, shard_name(filename, shard_idx)), \"w\")\n )\n for example in tqdm.tqdm(examples, disable=quiet):\n data = json.dumps(example)\n # Assume one character is about 1 bytes, good enough as we use utf-8\n size += len(data)\n if size >= max_bytes:\n wf.close()\n shard_idx += 1\n wf = stack.enter_context(\n smart_open.open(\n os.path.join(path, shard_name(filename, shard_idx)), \"w\"\n )\n )\n size = 0\n wf.write(data + \"\\n\")"
}
] | import argparse
import datetime
import glob
import os
import urllib.parse
from charset_normalizer import from_bytes
from licensed_pile.licenses import PermissiveLicenses
from licensed_pile.write import to_dolma | 1,425 | """Convert the raw ubuntu data to the dolma format."""
SOURCE_NAME = "ubuntu-chat"
BASE_URL = "https://irclogs.ubuntu.com"
parser = argparse.ArgumentParser(description="Convert data to dolma.")
parser.add_argument(
"--data",
default="data/irclogs.ubuntu.com/",
help="Path to the directory containing ubuntu chat data.",
)
parser.add_argument(
"--output_dir",
default=f"data/{SOURCE_NAME}/raw/documents/",
help="Where the dolma formatted data goes.",
)
parser.add_argument(
"--filename", default="ubuntu.jsonl.gz", help="The base filename for our chat data."
)
parser.add_argument(
"--shard_size", type=int, default=1, help="Size, in GB, for each shard."
)
def format_dolma(chat: str, source_name: str = SOURCE_NAME, base_url: str = BASE_URL):
# Manually split because os.path.split only give (head, tail)
*_, year, month, day, channel = os.path.splitext(chat)[0].split(os.path.sep)
created = datetime.date(int(year), int(month), int(day))
with open(chat, "rb") as f:
# There is some encoding weirdness that this seems to fix.
text = str(from_bytes(f.read()).best())
return {
# We don't want each channel to be it own data source so add the date
# to the channel to make a unique string id.
"id": f"{created.isoformat()}-{channel}",
"text": text,
"source": source_name,
"added": datetime.datetime.utcnow().isoformat(),
"created": created.isoformat(),
"metadata": {
"license": str(PermissiveLicenses.PD),
# This will be added in the next phase of preprocessing.
"authors": [],
"url": urllib.parse.quote(
f"{base_url}/{year}/{month}/{day}/{channel}.txt", safe=":/"
),
"channel": channel,
},
}
def main(args):
# Use iterators so we don't have to load the whole dataset in memory.
# year month day channel
chats = map(
format_dolma, glob.iglob(os.path.join(args.data, "**", "**", "**", "*.txt"))
)
| """Convert the raw ubuntu data to the dolma format."""
SOURCE_NAME = "ubuntu-chat"
BASE_URL = "https://irclogs.ubuntu.com"
parser = argparse.ArgumentParser(description="Convert data to dolma.")
parser.add_argument(
"--data",
default="data/irclogs.ubuntu.com/",
help="Path to the directory containing ubuntu chat data.",
)
parser.add_argument(
"--output_dir",
default=f"data/{SOURCE_NAME}/raw/documents/",
help="Where the dolma formatted data goes.",
)
parser.add_argument(
"--filename", default="ubuntu.jsonl.gz", help="The base filename for our chat data."
)
parser.add_argument(
"--shard_size", type=int, default=1, help="Size, in GB, for each shard."
)
def format_dolma(chat: str, source_name: str = SOURCE_NAME, base_url: str = BASE_URL):
# Manually split because os.path.split only give (head, tail)
*_, year, month, day, channel = os.path.splitext(chat)[0].split(os.path.sep)
created = datetime.date(int(year), int(month), int(day))
with open(chat, "rb") as f:
# There is some encoding weirdness that this seems to fix.
text = str(from_bytes(f.read()).best())
return {
# We don't want each channel to be it own data source so add the date
# to the channel to make a unique string id.
"id": f"{created.isoformat()}-{channel}",
"text": text,
"source": source_name,
"added": datetime.datetime.utcnow().isoformat(),
"created": created.isoformat(),
"metadata": {
"license": str(PermissiveLicenses.PD),
# This will be added in the next phase of preprocessing.
"authors": [],
"url": urllib.parse.quote(
f"{base_url}/{year}/{month}/{day}/{channel}.txt", safe=":/"
),
"channel": channel,
},
}
def main(args):
# Use iterators so we don't have to load the whole dataset in memory.
# year month day channel
chats = map(
format_dolma, glob.iglob(os.path.join(args.data, "**", "**", "**", "*.txt"))
) | to_dolma(chats, args.output_dir, args.filename, args.shard_size) | 1 | 2023-11-06 16:04:10+00:00 | 2k |
UMass-Foundation-Model/genome | engine/util.py | [
{
"identifier": "Wizardlm",
"path": "engine/llm.py",
"snippet": "class Wizardlm():\n @classmethod\n def init(cls, base_model=\"WizardLM/WizardCoder-Python-34B-V1.0\", n_gpus=4, max_input_tokens=16384):\n cls.llm = LLM(model=base_model, tensor_parallel_size=n_gpus, max_num_batched_tokens=max_input_tokens)\n\n @classmethod\n def generate(cls, prompt, stop_token=None, temperature=0, top_p=1, max_new_tokens=2048):\n problem_instruction = [prompt]\n stop_tokens = ['</s>']\n if stop_token:\n stop_tokens.append(stop_token)\n sampling_params = SamplingParams(temperature=temperature, top_p=top_p, max_tokens=max_new_tokens, stop=stop_tokens)\n completions = cls.llm.generate(problem_instruction, sampling_params)\n return completions[0].outputs[0].text"
},
{
"identifier": "Codellama",
"path": "engine/llm.py",
"snippet": "class Codellama():\n @classmethod\n def init(cls, base_model=\"codellama/CodeLlama-34b-Python-hf\", n_gpus=4, max_input_tokens=8192):\n cls.llm = LLM(\n model=base_model,\n dtype=\"float16\",\n trust_remote_code=True,\n tensor_parallel_size=n_gpus,\n tokenizer=\"hf-internal-testing/llama-tokenizer\",\n max_num_batched_tokens=max_input_tokens)\n\n @classmethod\n def generate(cls, prompt, stop_token=None, temperature=0, top_p=1, max_new_tokens=2048):\n problem_instruction = [prompt]\n stop_tokens = ['</s>']\n if stop_token:\n stop_tokens.append(stop_token)\n sampling_params = SamplingParams(temperature=temperature, top_p=top_p, max_tokens=max_new_tokens, stop=stop_tokens)\n completions = cls.llm.generate(problem_instruction, sampling_params)\n return completions[0].outputs[0].text"
}
] | import os
import json
import openai
import pdb
from engine.llm import Wizardlm
from engine.llm import Codellama
from engine.datasets import get_dataset | 1,203 |
def strip_dict(dict):
for k, v in dict.items():
if isinstance(v, str):
dict[k] = v.strip()
return dict
def get_module_list(args):
if not args.use_new_module:
return []
module_save_dir = args.module_save_dir
if os.path.isdir(module_save_dir):
file_list = os.listdir(module_save_dir)
module_name_dict = {}
for filename in file_list:
# relieve the name constraint
if 'MODULE' in filename and 'json' in filename or 'json' in filename:
file_path = os.path.join(module_save_dir, filename)
try:
module_dict = json.load(open(file_path))
module_dict = strip_dict(module_dict)
name_key = "module_name" if "module_name" in module_dict else "name"
module_name = module_dict[name_key]
module_dict['module_name'] = module_name
if 'module_program' not in module_dict:
module_dict['module_program'] = module_dict['module']
#if 'annotations' not in module_dict:
# module_dict['annotations'] = module_dict['program']
if module_name not in module_name_dict or \
module_dict['test_accuracy'] > module_name_dict[module_name]['test_accuracy']:
module_name_dict[module_name] = module_dict
except:
pdb.set_trace()
module_list = []
for module_dict in module_name_dict.values():
if 'test_accuracy' not in module_dict:
module_list.append(module_dict)
elif module_dict['test_accuracy'] >= args.threshold:
module_list.append(module_dict)
else:
print("There is no available module directory: %s"%(module_save_dir))
module_list = []
return module_list
def save_output(args, output_dict, filename=None):
output_dir = args.output_dir
if args.stage == 1:
output_path = os.path.join(output_dir, output_dict['annotations'][0]['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
elif args.stage == 1.5:
if args.split_cases:
module_head_list = output_dict.pop('module_head_list')
for index, module_head in enumerate(module_head_list):
output_dict['module_head'] = module_head
output_path = os.path.join(output_dir, output_dict['annotations'][index]['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
else:
output_path = os.path.join(output_dir, output_dict['annotations'][0]['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
elif args.stage == 2:
if filename is None:
filename = 'MODULE_' + output_dict['module_name'] + '.json'
output_path = os.path.join(output_dir, filename)
json.dump(output_dict, open(output_path, 'w'), indent=2)
elif args.stage == 3:
output_path = os.path.join(output_dir, 'result_' + output_dict['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
pass
def init_gpt(args):
if args.model == 'wizardlm':
|
def strip_dict(dict):
for k, v in dict.items():
if isinstance(v, str):
dict[k] = v.strip()
return dict
def get_module_list(args):
if not args.use_new_module:
return []
module_save_dir = args.module_save_dir
if os.path.isdir(module_save_dir):
file_list = os.listdir(module_save_dir)
module_name_dict = {}
for filename in file_list:
# relieve the name constraint
if 'MODULE' in filename and 'json' in filename or 'json' in filename:
file_path = os.path.join(module_save_dir, filename)
try:
module_dict = json.load(open(file_path))
module_dict = strip_dict(module_dict)
name_key = "module_name" if "module_name" in module_dict else "name"
module_name = module_dict[name_key]
module_dict['module_name'] = module_name
if 'module_program' not in module_dict:
module_dict['module_program'] = module_dict['module']
#if 'annotations' not in module_dict:
# module_dict['annotations'] = module_dict['program']
if module_name not in module_name_dict or \
module_dict['test_accuracy'] > module_name_dict[module_name]['test_accuracy']:
module_name_dict[module_name] = module_dict
except:
pdb.set_trace()
module_list = []
for module_dict in module_name_dict.values():
if 'test_accuracy' not in module_dict:
module_list.append(module_dict)
elif module_dict['test_accuracy'] >= args.threshold:
module_list.append(module_dict)
else:
print("There is no available module directory: %s"%(module_save_dir))
module_list = []
return module_list
def save_output(args, output_dict, filename=None):
output_dir = args.output_dir
if args.stage == 1:
output_path = os.path.join(output_dir, output_dict['annotations'][0]['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
elif args.stage == 1.5:
if args.split_cases:
module_head_list = output_dict.pop('module_head_list')
for index, module_head in enumerate(module_head_list):
output_dict['module_head'] = module_head
output_path = os.path.join(output_dir, output_dict['annotations'][index]['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
else:
output_path = os.path.join(output_dir, output_dict['annotations'][0]['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
elif args.stage == 2:
if filename is None:
filename = 'MODULE_' + output_dict['module_name'] + '.json'
output_path = os.path.join(output_dir, filename)
json.dump(output_dict, open(output_path, 'w'), indent=2)
elif args.stage == 3:
output_path = os.path.join(output_dir, 'result_' + output_dict['id'] + '.json')
json.dump(output_dict, open(output_path, 'w'), indent=2)
pass
def init_gpt(args):
if args.model == 'wizardlm': | Wizardlm.init() | 0 | 2023-11-01 16:39:33+00:00 | 2k |
ml4bio/RhoFold | rhofold/model/primitives.py | [
{
"identifier": "permute_final_dims",
"path": "rhofold/utils/tensor_utils.py",
"snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])"
},
{
"identifier": "flatten_final_dims",
"path": "rhofold/utils/tensor_utils.py",
"snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))"
}
] | import math
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from typing import Optional, List, Tuple
from rhofold.utils.tensor_utils import (
permute_final_dims,
flatten_final_dims,
) | 757 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _prod(nums):
out = 1
for n in nums:
out = out * n
return out
class Linear(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just
like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found
in the code.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
bias: bool = True,
):
"""
Args:
in_dim:
The final dimension of inputs to the layer
out_dim:
The final dimension of layer outputs
bias:
Whether to learn an additive bias. True by default
"""
super(Linear, self).__init__(in_dim, out_dim, bias=bias)
if bias:
with torch.no_grad():
self.bias.fill_(0)
class LayerNorm(nn.Module):
def __init__(self, c_in, eps=1e-5):
super(LayerNorm, self).__init__()
self.c_in = (c_in,)
self.eps = eps
self.weight = nn.Parameter(torch.ones(c_in))
self.bias = nn.Parameter(torch.zeros(c_in))
def forward(self, x):
out = nn.functional.layer_norm(
x,
self.c_in,
self.weight,
self.bias,
self.eps,
)
return out
@torch.jit.ignore
def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
Softmax, but without automatic casting to fp32 when the input is of
type bfloat16
"""
s = torch.nn.functional.softmax(t, dim=dim)
return s
a_c = 0
attn_maps = []
attn_map_dir = None
seq_len = None
save_png = False
msa_depth = None
#@torch.jit.script
def _attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, biases: List[torch.Tensor]) -> torch.Tensor:
# [*, H, C_hidden, K]
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _prod(nums):
out = 1
for n in nums:
out = out * n
return out
class Linear(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just
like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found
in the code.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
bias: bool = True,
):
"""
Args:
in_dim:
The final dimension of inputs to the layer
out_dim:
The final dimension of layer outputs
bias:
Whether to learn an additive bias. True by default
"""
super(Linear, self).__init__(in_dim, out_dim, bias=bias)
if bias:
with torch.no_grad():
self.bias.fill_(0)
class LayerNorm(nn.Module):
def __init__(self, c_in, eps=1e-5):
super(LayerNorm, self).__init__()
self.c_in = (c_in,)
self.eps = eps
self.weight = nn.Parameter(torch.ones(c_in))
self.bias = nn.Parameter(torch.zeros(c_in))
def forward(self, x):
out = nn.functional.layer_norm(
x,
self.c_in,
self.weight,
self.bias,
self.eps,
)
return out
@torch.jit.ignore
def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
Softmax, but without automatic casting to fp32 when the input is of
type bfloat16
"""
s = torch.nn.functional.softmax(t, dim=dim)
return s
a_c = 0
attn_maps = []
attn_map_dir = None
seq_len = None
save_png = False
msa_depth = None
#@torch.jit.script
def _attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, biases: List[torch.Tensor]) -> torch.Tensor:
# [*, H, C_hidden, K] | key = permute_final_dims(key, (1, 0)) | 0 | 2023-11-01 10:29:08+00:00 | 2k |
ziqi-zhang/TAOISM | python/layers/flatten.py | [
{
"identifier": "SecretNonlinearLayer",
"path": "python/layers/nonlinear.py",
"snippet": "class SecretNonlinearLayer(SecretLayerBase):\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.InputShape = None\n self.OutputShape = None\n self.HandleShape = None\n self.NameTagRemap = {}\n\n def init_shape(self):\n raise NotImplementedError\n\n def forward(self):\n raise NotImplementedError\n\n def backward(self):\n raise NotImplementedError"
},
{
"identifier": "NamedTimerInstance",
"path": "python/utils/timer_utils.py",
"snippet": "class NamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n ...\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)\n ..."
},
{
"identifier": "VerboseLevel",
"path": "python/utils/timer_utils.py",
"snippet": "class VerboseLevel(IntEnum):\n EVERY = 1\n LAYER = 2\n RUN = 3\n EPOCH = 4"
},
{
"identifier": "compare_expected_actual",
"path": "python/utils/torch_utils.py",
"snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res"
},
{
"identifier": "ExecutionModeOptions",
"path": "python/utils/basic_utils.py",
"snippet": "class ExecutionModeOptions(Enum):\n Enclave = 1\n CPU = 2\n GPU = 3"
}
] | from python.layers.nonlinear import SecretNonlinearLayer
from python.utils.timer_utils import NamedTimerInstance, VerboseLevel
from python.utils.torch_utils import compare_expected_actual
from python.utils.basic_utils import ExecutionModeOptions | 1,305 |
# Assume the prev. layer is of 4d. It outputs a 2d mat
# This layer doesnt pull the input in enclave if it is not so reduce duplicated action
class SecretFlattenLayer(SecretNonlinearLayer):
batch_size = None
n_features = None
input_shape = None
output_shape = None
def __init__(
self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,
manually_register_prev=False, manually_register_next=False
):
super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)
self.StoreInEnclave = False
self.ForwardFuncName = "Flatten"
self.BackwardFuncName = "DerFlatten"
def init(self, start_enclave=True):
super().init(start_enclave)
self.ForwardFunc = lambda x: x.view(-1, self.n_features)
self.PlainFunc = lambda x: x.view(-1, self.n_features)
def init_shape(self):
self.input_shape = self.PrevLayer.get_output_shape()
if len(self.input_shape) != 4:
return ValueError("The dimension of the tensor form prev. layer has to be 4D.")
self.batch_size = self.input_shape[0]
self.n_features = self.input_shape[1] * self.input_shape[2] * self.input_shape[3]
self.output_shape = [self.batch_size, self.n_features]
def get_output_shape(self):
return self.output_shape
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
NeededTensorNames = [("output", self.output_shape, None),
("input", self.input_shape, None),
("DerInput", self.input_shape, None),
("DerOutput", self.output_shape, None)
]
self.tensor_name_list = NeededTensorNames
def forward(self):
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
|
# Assume the prev. layer is of 4d. It outputs a 2d mat
# This layer doesnt pull the input in enclave if it is not so reduce duplicated action
class SecretFlattenLayer(SecretNonlinearLayer):
batch_size = None
n_features = None
input_shape = None
output_shape = None
def __init__(
self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,
manually_register_prev=False, manually_register_next=False
):
super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)
self.StoreInEnclave = False
self.ForwardFuncName = "Flatten"
self.BackwardFuncName = "DerFlatten"
def init(self, start_enclave=True):
super().init(start_enclave)
self.ForwardFunc = lambda x: x.view(-1, self.n_features)
self.PlainFunc = lambda x: x.view(-1, self.n_features)
def init_shape(self):
self.input_shape = self.PrevLayer.get_output_shape()
if len(self.input_shape) != 4:
return ValueError("The dimension of the tensor form prev. layer has to be 4D.")
self.batch_size = self.input_shape[0]
self.n_features = self.input_shape[1] * self.input_shape[2] * self.input_shape[3]
self.output_shape = [self.batch_size, self.n_features]
def get_output_shape(self):
return self.output_shape
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
NeededTensorNames = [("output", self.output_shape, None),
("input", self.input_shape, None),
("DerInput", self.input_shape, None),
("DerOutput", self.output_shape, None)
]
self.tensor_name_list = NeededTensorNames
def forward(self):
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER): | if self.EnclaveMode == ExecutionModeOptions.Enclave: | 4 | 2023-11-01 10:37:37+00:00 | 2k |
rafaelleinio/biar | biar/model.py | [
{
"identifier": "ContentCallbackError",
"path": "biar/errors.py",
"snippet": "class ContentCallbackError(Exception):\n \"\"\"Base Exception for content callback errors.\"\"\""
},
{
"identifier": "ResponseEvaluationError",
"path": "biar/errors.py",
"snippet": "class ResponseEvaluationError(Exception):\n \"\"\"Base Exception for non-OK responses.\"\"\""
}
] | import asyncio
import aiohttp
import tenacity
from functools import cached_property
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type
from aiohttp import ClientResponseError
from loguru import logger
from pydantic import BaseModel, ConfigDict, Field, JsonValue, computed_field
from pyrate_limiter import Duration, InMemoryBucket, Limiter, Rate
from yarl import URL
from biar.errors import ContentCallbackError, ResponseEvaluationError | 724 |
class ProxyConfig(BaseModel):
"""Proxy configuration.
Attributes:
host: proxy address.
headers: additional configuration required by the proxy.
ssl_cadata: certificate as a string required by some proxies to use SSL.
"""
host: str
headers: Optional[Dict[str, Any]] = None
ssl_cadata: Optional[str] = None
class Response(BaseModel):
"""Attributes from the http request response.
Attributes:
url: final url after (possible) redirects.
status_code: HTTP status code.
headers: headers in the response.
json_content: response content as json dict.
text_content: raw response content as a string.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
url: URL
status_code: int
headers: Dict[str, Any] = Field(default_factory=dict)
json_content: Dict[str, JsonValue] = Field(default_factory=dict)
text_content: str = ""
class StructuredResponse(Response):
"""Attributes from the http request response.
Attributes:
url: final url after (possible) redirects.
status_code: HTTP status code.
headers: headers in the response.
json_content: response content as json dict.
text_content: raw response content as a string.
structured_content: response content as a pydantic model.
"""
structured_content: Any
class Retryer(BaseModel):
"""Retry logic with exponential backoff strategy.
Attributes:
attempts: number of attempts.
`attempts=1` means only one try and no subsequent retry attempts.
min_delay: number of seconds as the starting delay.
max_delay: number of seconds as the maximum achieving delay.
retry_if_exception_in: retry if exception found in this tuple.
A ResponseEvaluationError is always added dynamically to be retried.
"""
attempts: int = 1
min_delay: int = 0
max_delay: int = 10
retry_if_exception_in: Tuple[Type[BaseException], ...] = (
ClientResponseError,
asyncio.TimeoutError,
ContentCallbackError,
)
retry_based_on_content_callback: Optional[Callable[[BaseModel], bool]] = None
@property
def retrying_config(self) -> Dict[str, Any]:
"""Configuration for retrying logic.
Changing arguments at run time reference:
https://github.com/jd/tenacity#changing-arguments-at-run-time
Returns:
kwargs dictionary for tenacity.BaseRetrying.
"""
return dict(
stop=tenacity.stop_after_attempt(self.attempts),
retry=tenacity.retry_if_exception_type(
|
class ProxyConfig(BaseModel):
"""Proxy configuration.
Attributes:
host: proxy address.
headers: additional configuration required by the proxy.
ssl_cadata: certificate as a string required by some proxies to use SSL.
"""
host: str
headers: Optional[Dict[str, Any]] = None
ssl_cadata: Optional[str] = None
class Response(BaseModel):
"""Attributes from the http request response.
Attributes:
url: final url after (possible) redirects.
status_code: HTTP status code.
headers: headers in the response.
json_content: response content as json dict.
text_content: raw response content as a string.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
url: URL
status_code: int
headers: Dict[str, Any] = Field(default_factory=dict)
json_content: Dict[str, JsonValue] = Field(default_factory=dict)
text_content: str = ""
class StructuredResponse(Response):
"""Attributes from the http request response.
Attributes:
url: final url after (possible) redirects.
status_code: HTTP status code.
headers: headers in the response.
json_content: response content as json dict.
text_content: raw response content as a string.
structured_content: response content as a pydantic model.
"""
structured_content: Any
class Retryer(BaseModel):
"""Retry logic with exponential backoff strategy.
Attributes:
attempts: number of attempts.
`attempts=1` means only one try and no subsequent retry attempts.
min_delay: number of seconds as the starting delay.
max_delay: number of seconds as the maximum achieving delay.
retry_if_exception_in: retry if exception found in this tuple.
A ResponseEvaluationError is always added dynamically to be retried.
"""
attempts: int = 1
min_delay: int = 0
max_delay: int = 10
retry_if_exception_in: Tuple[Type[BaseException], ...] = (
ClientResponseError,
asyncio.TimeoutError,
ContentCallbackError,
)
retry_based_on_content_callback: Optional[Callable[[BaseModel], bool]] = None
@property
def retrying_config(self) -> Dict[str, Any]:
"""Configuration for retrying logic.
Changing arguments at run time reference:
https://github.com/jd/tenacity#changing-arguments-at-run-time
Returns:
kwargs dictionary for tenacity.BaseRetrying.
"""
return dict(
stop=tenacity.stop_after_attempt(self.attempts),
retry=tenacity.retry_if_exception_type( | exception_types=self.retry_if_exception_in + (ResponseEvaluationError,) | 1 | 2023-11-03 00:03:59+00:00 | 2k |
NVlabs/M2T2 | m2t2/action_decoder.py | [
{
"identifier": "MLP",
"path": "m2t2/model_utils.py",
"snippet": "class MLP(nn.Module):\n def __init__(\n self, input_dim, hidden_dim, output_dim, num_layers,\n activation=\"ReLU\", dropout=0.\n ):\n super().__init__()\n h = [hidden_dim] * (num_layers - 1)\n layers = []\n for m, n in zip([input_dim] + h[:-1], h):\n layers.extend([nn.Linear(m, n), get_activation_fn(activation)])\n if dropout > 0:\n layers.append(nn.Dropout(dropout))\n layers.append(nn.Linear(hidden_dim, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.mlp(x)"
},
{
"identifier": "repeat_new_axis",
"path": "m2t2/model_utils.py",
"snippet": "def repeat_new_axis(tensor, rep, dim):\n reps = [1] * len(tensor.shape)\n reps.insert(dim, rep)\n return tensor.unsqueeze(dim).repeat(*reps)"
}
] | import numpy as np
import torch
import torch.nn.functional as F
import trimesh.transformations as tra
from m2t2.model_utils import MLP, repeat_new_axis | 1,520 | # distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
"""
Modules to compute gripper poses from contact masks and parameters.
"""
def double_split(tensor, chunks):
tensor = list(tensor.split([sum(chunk) for chunk in chunks]))
tensor = [
list(elem.split([n for n in chunk]))
for elem, chunk in zip(tensor, chunks)
]
return tensor
def build_6d_grasp(
contact_pt, contact_dir, approach_dir, offset, gripper_depth=0.1034
):
grasp_tr = torch.stack([
contact_dir,
torch.cross(approach_dir, contact_dir),
approach_dir,
contact_pt + contact_dir * offset.unsqueeze(-1) / 2
- gripper_depth * approach_dir
], axis=-1)
last_row = torch.tensor([[0, 0, 0, 1]]).to(grasp_tr.device)
if len(grasp_tr.shape) > 2:
last_row = last_row * torch.ones(
*grasp_tr.shape[:-2], 1, 4, device=grasp_tr.device
)
grasp_tr = torch.cat([grasp_tr, last_row], dim=-2)
return grasp_tr
def build_6d_place(contact_pts, rot, offset, ee_pose):
# Transformation order: first rotate gripper to grasp pose,
# then add offset between gripper center and reference point,
# then rotate around object center, finally translate to contact point.
rot = rot @ ee_pose[..., :3, :3]
trans = (contact_pts + offset).unsqueeze(-1)
place_tr = torch.cat([rot, trans], axis=-1)
last_row = torch.tensor([[0, 0, 0, 1]]).to(place_tr.device)
if len(place_tr.shape) > 2:
last_row = last_row * torch.ones(
*place_tr.shape[:-2], 1, 4, device=place_tr.device
)
place_tr = torch.cat([place_tr, last_row], dim=-2)
return place_tr
def compute_offset(obj_pts, ee_pose, rot, grid_res=0, cam_pose=None):
# rot R is about object center o
# offset is ee position e - target position t
# R(e - o) - R(t - o) = -R(t - e)
if cam_pose is not None:
rot = cam_pose[:3, :3] @ rot
obj_pts_stable = (obj_pts - ee_pose[:3, 3]) @ rot.transpose(-1, -2)
if grid_res > 0:
obj_pts_grid = (obj_pts_stable[..., :2] / grid_res).round()
offset = obj_pts_stable.min(dim=0)[0]
offset[:2] = obj_pts_grid.unique(dim=0).mean(dim=0) * grid_res
else:
offset = obj_pts_stable.mean(dim=0)
offset[..., 2] = obj_pts_stable[..., 2].min(dim=1)[0]
offset = -offset
if cam_pose is not None:
offset = offset @ cam_pose[:3, :3]
return offset
def infer_placements(
xyz, logits, bottom_center, ee_poses, cam_poses, conf_thresh, height
):
rot_prompts = torch.stack([torch.from_numpy(
tra.euler_matrix(0, 0, 2 * np.pi / logits.shape[1] * i)
)[:3, :3].float() for i in range(logits.shape[1])]).to(xyz.device)
rot_prompts = repeat_new_axis(rot_prompts, xyz.shape[1], dim=1)
placements, confidence, contact_points = [], [], []
for i, (pts, bc, ee_pose, logit) in enumerate(zip(
xyz, bottom_center, ee_poses, logits
)):
conf = logit.sigmoid()
mask = conf > conf_thresh
num = list(mask.sum(dim=1))
rot = rot_prompts[mask]
offsets = (ee_pose[:3, 3] - bc) @ rot.transpose(1, 2)
if cam_poses is not None:
pts = pts @ cam_poses[i, :3, :3].T + cam_poses[i, :3, 3]
contacts = repeat_new_axis(pts, mask.shape[0], dim=0)[mask]
place = build_6d_place(contacts, rot, offsets, ee_pose)
place[:, 2, 3] = place[:, 2, 3] + height
if cam_poses is not None:
place = cam_poses[i].inverse() @ place
placements.append(list(place.split(num)))
confidence.append(list(conf[mask].split(num)))
contact_points.append(list(contacts.split(num)))
outputs = {
'placements': placements,
'placement_confidence': confidence,
'placement_contacts': contact_points
}
return outputs
class ActionDecoder(torch.nn.Module):
def __init__(
self, mask_dim, use_embed, embed_dim, max_num_pred, num_params,
hidden_dim, num_layers, activation, offset_bins
):
super(ActionDecoder, self).__init__()
feat_dim = mask_dim
if use_embed:
feat_dim += embed_dim
self.feat_dim = feat_dim
self.use_embed = use_embed
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
"""
Modules to compute gripper poses from contact masks and parameters.
"""
def double_split(tensor, chunks):
tensor = list(tensor.split([sum(chunk) for chunk in chunks]))
tensor = [
list(elem.split([n for n in chunk]))
for elem, chunk in zip(tensor, chunks)
]
return tensor
def build_6d_grasp(
contact_pt, contact_dir, approach_dir, offset, gripper_depth=0.1034
):
grasp_tr = torch.stack([
contact_dir,
torch.cross(approach_dir, contact_dir),
approach_dir,
contact_pt + contact_dir * offset.unsqueeze(-1) / 2
- gripper_depth * approach_dir
], axis=-1)
last_row = torch.tensor([[0, 0, 0, 1]]).to(grasp_tr.device)
if len(grasp_tr.shape) > 2:
last_row = last_row * torch.ones(
*grasp_tr.shape[:-2], 1, 4, device=grasp_tr.device
)
grasp_tr = torch.cat([grasp_tr, last_row], dim=-2)
return grasp_tr
def build_6d_place(contact_pts, rot, offset, ee_pose):
# Transformation order: first rotate gripper to grasp pose,
# then add offset between gripper center and reference point,
# then rotate around object center, finally translate to contact point.
rot = rot @ ee_pose[..., :3, :3]
trans = (contact_pts + offset).unsqueeze(-1)
place_tr = torch.cat([rot, trans], axis=-1)
last_row = torch.tensor([[0, 0, 0, 1]]).to(place_tr.device)
if len(place_tr.shape) > 2:
last_row = last_row * torch.ones(
*place_tr.shape[:-2], 1, 4, device=place_tr.device
)
place_tr = torch.cat([place_tr, last_row], dim=-2)
return place_tr
def compute_offset(obj_pts, ee_pose, rot, grid_res=0, cam_pose=None):
# rot R is about object center o
# offset is ee position e - target position t
# R(e - o) - R(t - o) = -R(t - e)
if cam_pose is not None:
rot = cam_pose[:3, :3] @ rot
obj_pts_stable = (obj_pts - ee_pose[:3, 3]) @ rot.transpose(-1, -2)
if grid_res > 0:
obj_pts_grid = (obj_pts_stable[..., :2] / grid_res).round()
offset = obj_pts_stable.min(dim=0)[0]
offset[:2] = obj_pts_grid.unique(dim=0).mean(dim=0) * grid_res
else:
offset = obj_pts_stable.mean(dim=0)
offset[..., 2] = obj_pts_stable[..., 2].min(dim=1)[0]
offset = -offset
if cam_pose is not None:
offset = offset @ cam_pose[:3, :3]
return offset
def infer_placements(
xyz, logits, bottom_center, ee_poses, cam_poses, conf_thresh, height
):
rot_prompts = torch.stack([torch.from_numpy(
tra.euler_matrix(0, 0, 2 * np.pi / logits.shape[1] * i)
)[:3, :3].float() for i in range(logits.shape[1])]).to(xyz.device)
rot_prompts = repeat_new_axis(rot_prompts, xyz.shape[1], dim=1)
placements, confidence, contact_points = [], [], []
for i, (pts, bc, ee_pose, logit) in enumerate(zip(
xyz, bottom_center, ee_poses, logits
)):
conf = logit.sigmoid()
mask = conf > conf_thresh
num = list(mask.sum(dim=1))
rot = rot_prompts[mask]
offsets = (ee_pose[:3, 3] - bc) @ rot.transpose(1, 2)
if cam_poses is not None:
pts = pts @ cam_poses[i, :3, :3].T + cam_poses[i, :3, 3]
contacts = repeat_new_axis(pts, mask.shape[0], dim=0)[mask]
place = build_6d_place(contacts, rot, offsets, ee_pose)
place[:, 2, 3] = place[:, 2, 3] + height
if cam_poses is not None:
place = cam_poses[i].inverse() @ place
placements.append(list(place.split(num)))
confidence.append(list(conf[mask].split(num)))
contact_points.append(list(contacts.split(num)))
outputs = {
'placements': placements,
'placement_confidence': confidence,
'placement_contacts': contact_points
}
return outputs
class ActionDecoder(torch.nn.Module):
def __init__(
self, mask_dim, use_embed, embed_dim, max_num_pred, num_params,
hidden_dim, num_layers, activation, offset_bins
):
super(ActionDecoder, self).__init__()
feat_dim = mask_dim
if use_embed:
feat_dim += embed_dim
self.feat_dim = feat_dim
self.use_embed = use_embed | self.contact_dir_head = MLP( | 0 | 2023-11-03 22:32:05+00:00 | 2k |
Codra-Ingenierie-Informatique/DataLab | cdl/tests/features/common/newobject_unit.py | [
{
"identifier": "execenv",
"path": "cdl/env.py",
"snippet": "DEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\n QUIET = \"quiet\"\n NORMAL = \"normal\"\n DEBUG = \"debug\"\n UNATTENDED_ARG = \"unattended\"\n VERBOSE_ARG = \"verbose\"\n SCREENSHOT_ARG = \"screenshot\"\n DELAY_ARG = \"delay\"\n XMLRPCPORT_ARG = \"xmlrpcport\"\n DONOTQUIT_ENV = \"CDL_DO_NOT_QUIT\"\n UNATTENDED_ENV = GuiDataExecEnv.UNATTENDED_ENV\n VERBOSE_ENV = GuiDataExecEnv.VERBOSE_ENV\n SCREENSHOT_ENV = GuiDataExecEnv.SCREENSHOT_ENV\n DELAY_ENV = GuiDataExecEnv.DELAY_ENV\n XMLRPCPORT_ENV = \"CDL_XMLRPCPORT\"\n CATCHER_TEST_ENV = \"CDL_CATCHER_TEST\"\nclass VerbosityLevels(enum.Enum):\nclass CDLExecEnv:\n def __init__(self):\n def to_dict(self):\n def __str__(self):\n def enable_demo_mode(self, delay: int):\n def __get_mode(env):\n def __set_mode(env, value):\n def do_not_quit(self):\n def do_not_quit(self, value):\n def unattended(self):\n def unattended(self, value):\n def catcher_test(self):\n def catcher_test(self, value):\n def screenshot(self):\n def screenshot(self, value):\n def verbose(self):\n def verbose(self, value):\n def delay(self):\n def delay(self, value: int):\n def xmlrpcport(self):\n def xmlrpcport(self, value: int):\n def parse_args(self):\n def set_env_from_args(self, args):\n def log(self, source: Any, *objects: Any) -> None:\n def print(self, *objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\n def pprint(\n self,\n obj,\n stream=None,\n indent=1,\n width=80,\n depth=None,\n compact=False,\n sort_dicts=True,\n ):"
},
{
"identifier": "Gauss2DParam",
"path": "cdl/obj.py",
"snippet": ""
},
{
"identifier": "view_curves",
"path": "cdl/utils/vistools.py",
"snippet": "def view_curves(data_or_datalist, name=None, title=None, xlabel=None, ylabel=None):\n \"\"\"Create a curve dialog and plot curves\"\"\"\n if isinstance(data_or_datalist, (tuple, list)):\n datalist = data_or_datalist\n else:\n datalist = [data_or_datalist]\n items = []\n for data in datalist:\n if len(data) == 2:\n xdata, ydata = data\n item = make.mcurve(xdata, ydata)\n else:\n item = make.mcurve(data)\n items.append(item)\n view_curve_items(items, name=name, title=title, xlabel=xlabel, ylabel=ylabel)"
},
{
"identifier": "view_images",
"path": "cdl/utils/vistools.py",
"snippet": "def view_images(data_or_datalist, name=None, title=None, xlabel=None, ylabel=None):\n \"\"\"Create an image dialog and show images\"\"\"\n if isinstance(data_or_datalist, (tuple, list)):\n datalist = data_or_datalist\n else:\n datalist = [data_or_datalist]\n items = []\n for data in datalist:\n item = make.image(data, interpolation=\"nearest\", eliminate_outliers=0.1)\n items.append(item)\n view_image_items(items, name=name, title=title, xlabel=xlabel, ylabel=ylabel)"
}
] | from collections.abc import Generator
from guidata.qthelpers import qt_app_context
from cdl.env import execenv
from cdl.obj import (
Gauss2DParam,
ImageDatatypes,
ImageObj,
ImageTypes,
NormalRandomParam,
SignalObj,
SignalTypes,
UniformRandomParam,
create_image_from_param,
create_signal_from_param,
new_image_param,
new_signal_param,
)
from cdl.utils.vistools import view_curves, view_images | 1,082 | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""
New signal/image test
Testing functions related to signal/image creation.
"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
# pylint: disable=duplicate-code
# guitest: show
from __future__ import annotations
def iterate_signal_creation(
data_size: int = 500, non_zero: bool = False, verbose: bool = True
) -> Generator[SignalObj, None, None]:
"""Iterate over all possible signals created from parameters"""
if verbose:
| # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""
New signal/image test
Testing functions related to signal/image creation.
"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
# pylint: disable=duplicate-code
# guitest: show
from __future__ import annotations
def iterate_signal_creation(
data_size: int = 500, non_zero: bool = False, verbose: bool = True
) -> Generator[SignalObj, None, None]:
"""Iterate over all possible signals created from parameters"""
if verbose: | execenv.print( | 0 | 2023-11-09 16:56:03+00:00 | 2k |
sxwyh/pytradecn | src/pytradecn/control/wrappersa.py | [
{
"identifier": "BaseUIAWrapper",
"path": "src/pytradecn/control/baseuiawrapper.py",
"snippet": "class BaseUIAWrapper(UIAWrapper):\n\n _control_types = ['BaseUIA']\n\n def __init__(self, element_info):\n super(BaseUIAWrapper, self).__init__(element_info)\n self._client = get_client(process=element_info.process_id)\n self._prompt = self._client.prompt\n self._win32structure = win32structure\n\n def _get_control(self, control_define):\n # control_define 为Client格式的字符串或字典,或者pywinauto格式的字典\n return get_control_specification(self._client.window(), control_define)\n\n def config(self, key): # 弹出框无法使用\n return self.element_info.config.get(key, None)\n\n def top_level_parent(self):\n # NOTE 官方top_level_parent()效率低且易出错,重写\n # return self._client.window().wrapper_object() # 注意:集成环境下仍然指向客户端主窗口\n return self._client.root_window().wrapper_object()\n\n def standard(self):\n \"\"\"返回此控件的pywinauto官方标准控件\"\"\"\n # NOTE 不要在条件中添加type和class,有可能失效\n return get_control_specification(self.element_info.parent, {'handle': self.element_info.handle})\n\n def own(self): # 弹出框无法使用\n \"\"\"返回此控件的另一个副本\"\"\"\n return get_control_specification(self.element_info.current_parent, self.element_info.control_define)\n\n def child(self, control_define):\n \"\"\"返回此控件的后代规范\"\"\"\n # control_define 为Client格式的字符串或字典,或者pywinauto格式的字典\n return get_control_specification(self.element_info, control_define)\n\n def texts(self):\n \"\"\"重写texts()\"\"\"\n rtn = [c.window_text() for c in self.descendants() if c.window_text() != '']\n return [self.window_text()] + list(map(lambda s: s.replace('\\r', '').replace('\\n', ''), rtn))\n\n def image_text(self, box=None, whitelist=None):\n \"\"\"返回控件的表面可见文本\"\"\"\n if whitelist is None:\n whitelist = ''\n return image_to_string(self.capture_as_image(box), tessedit_char_whitelist=whitelist)\n\n def exists(self, timeout=None):\n \"\"\"判断控件是否还存在\"\"\"\n return self._get_control({'handle': self.handle}).exists(timeout=timeout)"
},
{
"identifier": "RecordNotFoundError",
"path": "src/pytradecn/error.py",
"snippet": "class ElementAmbiguousError(Exception):\nclass ElementNotFoundError(Exception):\nclass ItemKeyError(Exception):\nclass ClientConfigError(Exception):\nclass TradeFailFError(Exception):\nclass StockCountError(Exception):\nclass StockPriceError(Exception):\nclass StockCodeError(Exception):\nclass ScreenLockedError(Exception):\nclass LoginError(Exception):\nclass RecordNotFoundError(Exception):\nclass RecordAmbiguousError(Exception):"
}
] | from os import remove
from csv import DictReader
from decimal import Decimal
from tempfile import NamedTemporaryFile
from os.path import exists
from .baseuiawrapper import BaseUIAWrapper
from ..error import RecordNotFoundError, RecordAmbiguousError, ItemKeyError, TimeoutError | 1,392 | #
# 券商客户端自动化测试库
# Copyright (C) 2023 谁的谁([email protected]) All rights reserved.
#
# 模块功能:各种自定义控件
# 建立日期:2023.07.20
# 联系方式:谁的谁([email protected])
#
# 开源软件声明:
# 本软件遵守“MIT License”开源协议开源,仅供学习和参考。您可以自由使用或修改源代码或二进制文件,但必须保留上述版权声明。
# 该软件旨在深度学习和挖掘python pywinauto库的功能和潜力,由于环境的不确定性和该软件的不可靠性,请不要将该软件应用于
# 实盘交易。如您确需量化交易实盘功能,请使用券商提供的量化交易平台,否则由于您使用该软件实盘交易所造成的账户损失或政策风
# 险,开源软件提供者或插件提供者均不承担任何责任。同时,无论是直接的、间接的、偶然的、潜在的因使用该软件所造成的账号安全
# 损失、数据安全损失、账户资产损失或其他任何责任事故,开源软件提供者或插件提供者均不承担任何责任。请不要将该软件应用于商
# 业活动,否则由于把该软件应用于商业活动所造成的一切损失或法律责任,开源软件提供者或插件提供者均不承担任何责任。
#
# 修改日志:
# 2022-07-20 第一次编写
#
class PromptWrapper(BaseUIAWrapper):
_control_types = ['Prompt']
def __init__(self, elem):
super(PromptWrapper, self).__init__(elem)
def __wait_prompt_close(self):
try:
# NOTE 使用_get_control从顶层窗口查找
self._get_control({'handle': self.handle}).wait_not('exists')
| #
# 券商客户端自动化测试库
# Copyright (C) 2023 谁的谁([email protected]) All rights reserved.
#
# 模块功能:各种自定义控件
# 建立日期:2023.07.20
# 联系方式:谁的谁([email protected])
#
# 开源软件声明:
# 本软件遵守“MIT License”开源协议开源,仅供学习和参考。您可以自由使用或修改源代码或二进制文件,但必须保留上述版权声明。
# 该软件旨在深度学习和挖掘python pywinauto库的功能和潜力,由于环境的不确定性和该软件的不可靠性,请不要将该软件应用于
# 实盘交易。如您确需量化交易实盘功能,请使用券商提供的量化交易平台,否则由于您使用该软件实盘交易所造成的账户损失或政策风
# 险,开源软件提供者或插件提供者均不承担任何责任。同时,无论是直接的、间接的、偶然的、潜在的因使用该软件所造成的账号安全
# 损失、数据安全损失、账户资产损失或其他任何责任事故,开源软件提供者或插件提供者均不承担任何责任。请不要将该软件应用于商
# 业活动,否则由于把该软件应用于商业活动所造成的一切损失或法律责任,开源软件提供者或插件提供者均不承担任何责任。
#
# 修改日志:
# 2022-07-20 第一次编写
#
class PromptWrapper(BaseUIAWrapper):
_control_types = ['Prompt']
def __init__(self, elem):
super(PromptWrapper, self).__init__(elem)
def __wait_prompt_close(self):
try:
# NOTE 使用_get_control从顶层窗口查找
self._get_control({'handle': self.handle}).wait_not('exists') | except TimeoutError: | 1 | 2023-11-03 02:22:34+00:00 | 2k |
ingra14m/Tensor4D-DNeRF | models/fields.py | [
{
"identifier": "get_embedder",
"path": "models/embedder.py",
"snippet": "def get_embedder(multires, input_dims=3):\n embed_kwargs = {\n 'include_input': True,\n 'input_dims': input_dims,\n 'max_freq_log2': multires-1,\n 'num_freqs': multires,\n 'log_sampling': True,\n 'periodic_fns': [torch.sin, torch.cos],\n }\n\n embedder_obj = Embedder(**embed_kwargs)\n def embed(x, eo=embedder_obj): return eo.embed(x)\n return embed, embedder_obj.out_dim"
},
{
"identifier": "integrated_pos_enc",
"path": "models/mip_utils.py",
"snippet": "def integrated_pos_enc(means_covs, min_deg, max_deg, diagonal=True):\n \"\"\"Encode `means` with sinusoids scaled by 2^[min_deg:max_deg-1].\n Args:\n means_covs:[B, N, 3] a tuple containing: means, torch.Tensor, variables to be encoded.\n covs, [B, N, 3] torch.Tensor, covariance matrices.\n min_deg: int, the min degree of the encoding.\n max_deg: int, the max degree of the encoding.\n diagonal: bool, if true, expects input covariances to be diagonal (full otherwise).\n Returns:\n encoded: torch.Tensor, encoded variables.\n \"\"\"\n if diagonal:\n means, covs_diag = means_covs\n scales = torch.tensor([2 ** i for i in range(min_deg, max_deg)], device=means.device) # [L]\n # [B, N, 1, 3] * [L, 1] = [B, N, L, 3]->[B, N, 3L]\n y = rearrange(torch.unsqueeze(means, dim=-2) * torch.unsqueeze(scales, dim=-1),\n 'batch sample scale_dim mean_dim -> batch sample (scale_dim mean_dim)')\n # [B, N, 1, 3] * [L, 1] = [B, N, L, 3]->[B, N, 3L]\n y_var = rearrange(torch.unsqueeze(covs_diag, dim=-2) * torch.unsqueeze(scales, dim=-1) ** 2,\n 'batch sample scale_dim cov_dim -> batch sample (scale_dim cov_dim)')\n else:\n means, x_cov = means_covs\n num_dims = means.shape[-1]\n # [3, L]\n basis = torch.cat([2 ** i * torch.eye(num_dims, device=means.device) for i in range(min_deg, max_deg)], 1)\n y = torch.matmul(means, basis) # [B, N, 3] * [3, 3L] = [B, N, 3L]\n y_var = torch.sum((torch.matmul(x_cov, basis)) * basis, -2)\n # sin(y + 0.5 * torch.tensor(np.pi)) = cos(y) 中国的学生脑子一定出现那句 “奇变偶不变 符号看象限”\n return expected_sin(torch.cat([y, y + 0.5 * torch.tensor(np.pi)], dim=-1), torch.cat([y_var] * 2, dim=-1))[0]"
}
] | from configparser import NoOptionError
from models.embedder import get_embedder
from models.mip_utils import integrated_pos_enc
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np | 978 |
class FieldNetwork(nn.Module):
def __init__(self,
d_in,
d_out,
d_hidden,
d_t4d,
min_emb,
max_emb,
n_layers,
t_emb=-1,
skip_in=(4,),
bias=0.5,
geometric_init=True,
weight_norm=True):
super(FieldNetwork, self).__init__()
dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
dims[0] = d_in + (max_emb - min_emb)*3*2
self.num_layers = len(dims)
self.skip_in = skip_in
self.min_emb = min_emb
self.max_emb = max_emb
self.t_emb = t_emb
if t_emb > 0:
|
class FieldNetwork(nn.Module):
def __init__(self,
d_in,
d_out,
d_hidden,
d_t4d,
min_emb,
max_emb,
n_layers,
t_emb=-1,
skip_in=(4,),
bias=0.5,
geometric_init=True,
weight_norm=True):
super(FieldNetwork, self).__init__()
dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
dims[0] = d_in + (max_emb - min_emb)*3*2
self.num_layers = len(dims)
self.skip_in = skip_in
self.min_emb = min_emb
self.max_emb = max_emb
self.t_emb = t_emb
if t_emb > 0: | embed_fn, time_input_ch = get_embedder(t_emb, input_dims=1) | 0 | 2023-11-07 10:16:33+00:00 | 2k |
865charlesw/homeassistant-kidde | custom_components/kidde/binary_sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/kidde/const.py",
"snippet": "DOMAIN = \"kidde\""
},
{
"identifier": "KiddeCoordinator",
"path": "custom_components/kidde/coordinator.py",
"snippet": "class KiddeCoordinator(DataUpdateCoordinator):\n \"\"\"Coordinator for Kidde HomeSafe.\"\"\"\n\n data: KiddeDataset\n\n def __init__(\n self, hass: HomeAssistant, client: KiddeClient, update_interval: int\n ) -> None:\n \"\"\"Initialize coordinator.\"\"\"\n super().__init__(\n hass,\n _LOGGER,\n name=DOMAIN,\n update_interval=timedelta(seconds=update_interval),\n )\n self.client = client\n\n async def _async_update_data(self) -> KiddeDataset:\n \"\"\"Fetch data from API endpoint.\"\"\"\n try:\n async with async_timeout.timeout(10):\n return await self.client.get_data(get_events=False)\n except KiddeClientAuthError as e:\n raise ConfigEntryAuthFailed from e\n except Exception as e:\n raise UpdateFailed(f\"{type(e).__name__} while communicating with API: {e}\")"
},
{
"identifier": "KiddeEntity",
"path": "custom_components/kidde/entity.py",
"snippet": "class KiddeEntity(CoordinatorEntity[KiddeCoordinator]):\n \"\"\"Entity base class.\"\"\"\n\n def __init__(\n self,\n coordinator: KiddeCoordinator,\n device_id: int,\n entity_description: EntityDescription,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__(coordinator)\n self.device_id = device_id\n self.entity_description = entity_description\n\n @property\n def kidde_device(self) -> dict:\n \"\"\"The device from the coordinator's data.\"\"\"\n return self.coordinator.data.devices[self.device_id]\n\n @property\n def unique_id(self) -> str:\n return f\"{self.kidde_device['label']}_{self.entity_description.key}\"\n\n @property\n def device_info(self) -> DeviceInfo | None:\n device = self.kidde_device\n return DeviceInfo(\n identifiers={(DOMAIN, device[\"label\"])},\n name=device.get(\"label\"),\n hw_version=device.get(\"hwrev\"),\n sw_version=str(device.get(\"fwrev\")),\n model=device.get(\"model\"),\n manufacturer=MANUFACTURER,\n )\n\n async def kidde_command(self, command: KiddeCommand) -> None:\n \"\"\"Send a Kidde command for this device.\"\"\"\n client = self.coordinator.client\n device = self.kidde_device\n await client.device_command(device[\"location_id\"], device[\"id\"], command)"
}
] | from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.components.binary_sensor import BinarySensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import KiddeCoordinator
from .entity import KiddeEntity | 825 | """Binary sensor platform for Kidde Homesafe integration."""
from __future__ import annotations
_BINARY_SENSOR_DESCRIPTIONS = (
BinarySensorEntityDescription(
"smoke_alarm", icon="mdi:smoke-detector-variant-alert", name="Smoke Alarm"
),
BinarySensorEntityDescription(
"smoke_hushed", icon="mdi:smoke-detector-variant-off", name="Smoke Hushed"
),
BinarySensorEntityDescription("co_alarm", icon="mdi:molecule-co", name="CO Alarm"),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_devices: AddEntitiesCallback
) -> None:
"""Set up the binary sensor platform."""
| """Binary sensor platform for Kidde Homesafe integration."""
from __future__ import annotations
_BINARY_SENSOR_DESCRIPTIONS = (
BinarySensorEntityDescription(
"smoke_alarm", icon="mdi:smoke-detector-variant-alert", name="Smoke Alarm"
),
BinarySensorEntityDescription(
"smoke_hushed", icon="mdi:smoke-detector-variant-off", name="Smoke Hushed"
),
BinarySensorEntityDescription("co_alarm", icon="mdi:molecule-co", name="CO Alarm"),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_devices: AddEntitiesCallback
) -> None:
"""Set up the binary sensor platform.""" | coordinator: KiddeCoordinator = hass.data[DOMAIN][entry.entry_id] | 0 | 2023-11-09 23:25:02+00:00 | 2k |
humemarx/CPG-LCF | models/backbone2d/resnet.py | [
{
"identifier": "constant_init",
"path": "models/utils/weight_init.py",
"snippet": "def constant_init(module, val, bias=0):\n if hasattr(module, 'weight') and module.weight is not None:\n nn.init.constant_(module.weight, val)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)"
},
{
"identifier": "kaiming_init",
"path": "models/utils/weight_init.py",
"snippet": "def kaiming_init(module,\n a=0,\n mode='fan_out',\n nonlinearity='relu',\n bias=0,\n distribution='normal'):\n assert distribution in ['uniform', 'normal']\n if hasattr(module, 'weight') and module.weight is not None:\n if distribution == 'uniform':\n nn.init.kaiming_uniform_(\n module.weight, a=a, mode=mode, nonlinearity=nonlinearity)\n else:\n nn.init.kaiming_normal_(\n module.weight, a=a, mode=mode, nonlinearity=nonlinearity)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)"
}
] | import warnings
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.modules.conv import _ConvNd
from models.utils.weight_init import (constant_init, kaiming_init) | 1,521 |
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
conv_type(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
norm_type(planes * block.expansion)
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_type=conv_type,
norm_type=norm_type,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_type=conv_type,
norm_type=norm_type,
**kwargs))
super().__init__(*layers)
class BasicBlock(nn.Module):
"""Basic block for ResNet."""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d,
dcn=None,
plugins=None,
zero_init_residual=True):
super().__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.zero_init_residual = zero_init_residual
norm1 = norm_type(planes)
self.norm1_name = get_norm_name(norm_type, postfix=1)
norm2 = norm_type(planes)
self.norm2_name = get_norm_name(norm_type, postfix=2)
self.conv1 = conv_type(
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = conv_type(
planes,
planes,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.GroupNorm) or isinstance(m, _BatchNorm):
| # coding=utf-8
'''
Author: husserl
License: Apache Licence
Software: VSCode
Date: 2023-07-17 06:36:26
LastEditors: husserl
LastEditTime: 2023-11-02 15:36:30
'''
def get_norm_name(norm_type, postfix=1):
if issubclass(norm_type, _InstanceNorm): # IN is a subclass of BN
return 'in{}'.format(postfix)
elif issubclass(norm_type, _BatchNorm):
return 'bn{}'.format(postfix)
elif issubclass(norm_type, nn.GroupNorm):
return 'gn{}'.format(postfix)
elif issubclass(norm_type, nn.LayerNorm):
return 'ln{}'.format(postfix)
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d,
multi_grid=None,
contract_dilation=False,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
conv_type(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
norm_type(planes * block.expansion)
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_type=conv_type,
norm_type=norm_type,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_type=conv_type,
norm_type=norm_type,
**kwargs))
super().__init__(*layers)
class BasicBlock(nn.Module):
"""Basic block for ResNet."""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d,
dcn=None,
plugins=None,
zero_init_residual=True):
super().__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.zero_init_residual = zero_init_residual
norm1 = norm_type(planes)
self.norm1_name = get_norm_name(norm_type, postfix=1)
norm2 = norm_type(planes)
self.norm2_name = get_norm_name(norm_type, postfix=2)
self.conv1 = conv_type(
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = conv_type(
planes,
planes,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.GroupNorm) or isinstance(m, _BatchNorm): | constant_init(m, val=1.0, bias=0.) | 0 | 2023-11-02 09:50:13+00:00 | 2k |
intelheropuck/steam.com-scraping | listingcollector.py | [
{
"identifier": "parse_price",
"path": "helpers.py",
"snippet": "def parse_price(string):\n return int(string[1:].replace(\",\", \"\").split('.')[0])"
},
{
"identifier": "Database",
"path": "helpers.py",
"snippet": "class Database:\n _instance = None\n\n @classmethod\n def get_instance(cls):\n if not cls._instance:\n cls._instance = Database()\n return cls._instance\n\n def __init__(self):\n self.connection = psycopg2.connect(\n host=\"localhost\",\n database=\"postgres\",\n user=\"postgres\",\n password=\"12345678\",\n port=\"5432\",\n )\n self.cursor = self.connection.cursor()\n\n def insert_listing(self, lst):\n avatar = parse_hash_from_link(lst.owner_avatar)\n self.cursor.execute(\n \"INSERT INTO listings(item_name, time, price, owner_name, owner_avatar, profile_link) \"\n f\"VALUES ( %s, now(), {lst.price}, %s, '{avatar}', %s)\"\n \" ON CONFLICT DO NOTHING\", (lst.item_name, lst.owner_name, lst.profile_link)\n )\n self.connection.commit()\n if self.cursor.rowcount:\n print(\n f\"{datetime.datetime.now()} {lst.item_name: <60} ${lst.price: <4} {lst.owner_name} {lst.profile_link}\")\n\n def get_listings(self, minprice=0, maxprice=-1, limit=50):\n conditions = [\n f\" price > {minprice} \",\n f\" price < {maxprice} \" if maxprice != -1 else \"true\",\n ]\n self.cursor.execute(\n f\"SELECT * FROM listings WHERE {' AND '.join(conditions)} ORDER BY id DESC LIMIT {limit}\")\n # print(f\"SELECT * FROM listings WHERE {' AND '.join(conditions)} ORDER BY id DESC LIMIT {limit}\")\n\n out = []\n for row in self.cursor.fetchall():\n listing = Listing(\n id=row[0],\n item_name=row[1],\n time=row[2],\n price=row[3],\n owner_name=row[4],\n owner_avatar=row[5],\n profile_link=row[6]\n )\n out.append(listing)\n # print(\"\\n\\nimg data out >>>> \", out)\n return out"
},
{
"identifier": "Listing",
"path": "helpers.py",
"snippet": "class Listing:\n item_name: str\n time: str\n price: int\n owner_name: str\n owner_avatar: str\n profile_link: str\n\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def __str__(self):\n return f\"{self.item_name} {self.price} {self.owner_name}\""
}
] | import json
import threading
import time
import argparse
import requests
import requests
import re
from queue import Queue
from urllib.parse import unquote
from helpers import parse_price
from helpers import Database, Listing
from urllib import parse
from bs4 import BeautifulSoup | 1,282 |
ACTIVITY_URL = "https://steamcommunity.com/market/itemordersactivity"\
"?item_nameid={item_id}&country=RU&language=english¤cy=1&&two_factor=0&norender=1"
db = Database().get_instance()
def get_activities(item_id):
return requests.get(ACTIVITY_URL.format(item_id=item_id)).json()["activity"]
STEAM_SESSION = "cafea932c9929bccba6f1583"
def getSteamidByAvatarNameAndUsername(user_name, avatar_name):
timeout=3
username = user_name
avatarname = avatar_name.split('/')[3].split(".")[0]
resp = requests.get("https://steamcommunity.com/search/SearchCommunityAjax?text=" + parse.quote(username) + "&filter=users&sessionid=" + STEAM_SESSION, headers={"Cookie": "sessionid=" + STEAM_SESSION}, timeout=timeout)
data = resp.json()
soup = BeautifulSoup(data["html"], "html.parser")
for div in soup.find_all('div', class_='avatarMedium'):
img = div.find('img')
imgFileName = img['src'].rsplit('/', 1)
imgName = imgFileName[1].split('_', 1)[0]
if imgName == avatarname:
return div.find('a')['href'].rsplit("/", 1)[1]
return ""
def get_profile_link(steamIdOrUsername):
if re.match(r'^\d+$', steamIdOrUsername):
url = "https://steamcommunity.com/profiles/" + steamIdOrUsername
else:
url = "https://steamcommunity.com/id/" + steamIdOrUsername
return url
def worker(queue):
while True:
listing_id, listing_link = queue.get()
for activity in get_activities(listing_id):
if activity["type"] == "BuyOrderCancel" or activity["type"] == "BuyOrderMulti":
continue
item_name=unquote(listing_link.split('/')[6])
price=parse_price(activity["price"])
owner_name=activity["persona_seller"] or activity["persona_buyer"]
owner_avatar=activity["avatar_seller"] or activity["avatar_buyer"]
steamIdOrUsername=getSteamidByAvatarNameAndUsername(owner_name, owner_avatar)
url=get_profile_link(steamIdOrUsername)
if url is None:
continue
else:
rrr = requests.get(url)
soup1 = BeautifulSoup(rrr.content, 'html.parser')
situation = soup1.find('div', class_ = 'profile_in_game_header')
if situation is None:
continue
else:
online = situation.text.split(' ')[1]
if online == 'Offline':
continue
else:
|
ACTIVITY_URL = "https://steamcommunity.com/market/itemordersactivity"\
"?item_nameid={item_id}&country=RU&language=english¤cy=1&&two_factor=0&norender=1"
db = Database().get_instance()
def get_activities(item_id):
return requests.get(ACTIVITY_URL.format(item_id=item_id)).json()["activity"]
STEAM_SESSION = "cafea932c9929bccba6f1583"
def getSteamidByAvatarNameAndUsername(user_name, avatar_name):
timeout=3
username = user_name
avatarname = avatar_name.split('/')[3].split(".")[0]
resp = requests.get("https://steamcommunity.com/search/SearchCommunityAjax?text=" + parse.quote(username) + "&filter=users&sessionid=" + STEAM_SESSION, headers={"Cookie": "sessionid=" + STEAM_SESSION}, timeout=timeout)
data = resp.json()
soup = BeautifulSoup(data["html"], "html.parser")
for div in soup.find_all('div', class_='avatarMedium'):
img = div.find('img')
imgFileName = img['src'].rsplit('/', 1)
imgName = imgFileName[1].split('_', 1)[0]
if imgName == avatarname:
return div.find('a')['href'].rsplit("/", 1)[1]
return ""
def get_profile_link(steamIdOrUsername):
if re.match(r'^\d+$', steamIdOrUsername):
url = "https://steamcommunity.com/profiles/" + steamIdOrUsername
else:
url = "https://steamcommunity.com/id/" + steamIdOrUsername
return url
def worker(queue):
while True:
listing_id, listing_link = queue.get()
for activity in get_activities(listing_id):
if activity["type"] == "BuyOrderCancel" or activity["type"] == "BuyOrderMulti":
continue
item_name=unquote(listing_link.split('/')[6])
price=parse_price(activity["price"])
owner_name=activity["persona_seller"] or activity["persona_buyer"]
owner_avatar=activity["avatar_seller"] or activity["avatar_buyer"]
steamIdOrUsername=getSteamidByAvatarNameAndUsername(owner_name, owner_avatar)
url=get_profile_link(steamIdOrUsername)
if url is None:
continue
else:
rrr = requests.get(url)
soup1 = BeautifulSoup(rrr.content, 'html.parser')
situation = soup1.find('div', class_ = 'profile_in_game_header')
if situation is None:
continue
else:
online = situation.text.split(' ')[1]
if online == 'Offline':
continue
else: | listing = Listing( | 2 | 2023-11-05 04:47:16+00:00 | 2k |
JaeBinCHA7/DEMUCS-for-Speech-Enhancement | models/HDDEMUCS_TF.py | [
{
"identifier": "capture_init",
"path": "models/tools.py",
"snippet": "def capture_init(init):\n \"\"\"capture_init.\n Decorate `__init__` with this, and you can then\n recover the *args and **kwargs passed to it in `self._init_args_kwargs`\n \"\"\"\n @functools.wraps(init)\n def __init__(self, *args, **kwargs):\n self._init_args_kwargs = (args, kwargs)\n init(self, *args, **kwargs)\n\n return __init__"
},
{
"identifier": "spectro",
"path": "models/tools.py",
"snippet": "def spectro(x, n_fft=512, hop_length=None, pad=0):\n *other, length = x.shape\n x = x.reshape(-1, length)\n is_mps = x.device.type == 'mps'\n if is_mps:\n x = x.cpu()\n z = th.stft(x,\n n_fft * (1 + pad),\n hop_length or n_fft // 4,\n window=th.hann_window(n_fft).to(x),\n win_length=n_fft,\n normalized=True,\n center=True,\n return_complex=True,\n pad_mode='reflect')\n _, freqs, frame = z.shape\n return z.view(*other, freqs, frame)"
},
{
"identifier": "ispectro",
"path": "models/tools.py",
"snippet": "def ispectro(z, hop_length=None, length=None, pad=0):\n *other, freqs, frames = z.shape\n n_fft = 2 * freqs - 2\n z = z.view(-1, freqs, frames)\n win_length = n_fft // (1 + pad)\n is_mps = z.device.type == 'mps'\n if is_mps:\n z = z.cpu()\n x = th.istft(z,\n n_fft,\n hop_length,\n window=th.hann_window(win_length).to(z.real),\n win_length=win_length,\n normalized=True,\n length=length,\n center=True)\n _, length = x.shape\n return x.view(*other, length)"
}
] | import math
import torch
import torch as th
import typing as tp
from torch import nn
from torch.nn import functional as F
from einops import rearrange
from .tools import capture_init, spectro, ispectro | 1,343 | """
Reference: https://github.com/facebookresearch/denoiser/blob/main/denoiser/demucs.py
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
author: adefossez
"""
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
klass = nn.LSTM
self.lstm = klass(bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim, batch_first=True)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happen."""
x0 = x
length = x.shape[-1]
padding_left, padding_right = paddings
if mode == 'reflect':
max_pad = max(padding_left, padding_right)
if length <= max_pad:
extra_pad = max_pad - length + 1
extra_pad_right = min(padding_right, extra_pad)
extra_pad_left = extra_pad - extra_pad_right
paddings = (padding_left - extra_pad_left, padding_right - extra_pad_right)
x = F.pad(x, (extra_pad_left, extra_pad_right))
out = F.pad(x, paddings, mode, value)
assert out.shape[-1] == length + padding_left + padding_right
assert (out[..., padding_left: padding_left + length] == x0).all()
return out
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference) ** 0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class HDDEMUCS_TF(nn.Module):
"""
Demucs speech enhancement model.
Args:
- chin (int): number of input channels.
- chout (int): number of output channels.
- hidden (int): number of initial hidden channels.
- depth (int): number of layers.
- kernel_size (int): kernel size for each layer.
- stride (int): stride for each layer.
- causal (bool): if false, uses BiLSTM instead of LSTM.
- resample (int): amount of resampling to apply to the input/output.
Can be one of 1, 2 or 4.
- growth (float): number of channels is multiplied by this for every layer.
- max_hidden (int): maximum number of channels. Can be useful to
control the size/speed of the model.
- normalize (bool): if true, normalize the input.
- glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
- rescale (float): controls custom weight initialization.
See https://arxiv.org/abs/1911.13254.
- floor (float): stability flooring when normalizing.
- sample_rate (float): sample_rate used for training the model.
"""
| """
Reference: https://github.com/facebookresearch/denoiser/blob/main/denoiser/demucs.py
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
author: adefossez
"""
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
klass = nn.LSTM
self.lstm = klass(bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim, batch_first=True)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happen."""
x0 = x
length = x.shape[-1]
padding_left, padding_right = paddings
if mode == 'reflect':
max_pad = max(padding_left, padding_right)
if length <= max_pad:
extra_pad = max_pad - length + 1
extra_pad_right = min(padding_right, extra_pad)
extra_pad_left = extra_pad - extra_pad_right
paddings = (padding_left - extra_pad_left, padding_right - extra_pad_right)
x = F.pad(x, (extra_pad_left, extra_pad_right))
out = F.pad(x, paddings, mode, value)
assert out.shape[-1] == length + padding_left + padding_right
assert (out[..., padding_left: padding_left + length] == x0).all()
return out
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference) ** 0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class HDDEMUCS_TF(nn.Module):
"""
Demucs speech enhancement model.
Args:
- chin (int): number of input channels.
- chout (int): number of output channels.
- hidden (int): number of initial hidden channels.
- depth (int): number of layers.
- kernel_size (int): kernel size for each layer.
- stride (int): stride for each layer.
- causal (bool): if false, uses BiLSTM instead of LSTM.
- resample (int): amount of resampling to apply to the input/output.
Can be one of 1, 2 or 4.
- growth (float): number of channels is multiplied by this for every layer.
- max_hidden (int): maximum number of channels. Can be useful to
control the size/speed of the model.
- normalize (bool): if true, normalize the input.
- glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
- rescale (float): controls custom weight initialization.
See https://arxiv.org/abs/1911.13254.
- floor (float): stability flooring when normalizing.
- sample_rate (float): sample_rate used for training the model.
"""
| @capture_init | 0 | 2023-11-06 08:16:24+00:00 | 2k |
pauloxnet/generatedfields | samples/tests.py | [
{
"identifier": "Circle",
"path": "samples/models.py",
"snippet": "class Circle(models.Model):\n radius = models.FloatField()\n area = models.GeneratedField(\n expression=Round(\n Power(\"radius\", 2) * Pi(),\n precision=2,\n ),\n output_field=models.FloatField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"{self.radius}²×π={self.area}\""
},
{
"identifier": "Event",
"path": "samples/models.py",
"snippet": "class Event(models.Model):\n start = models.DateTimeField()\n start_date = models.GeneratedField(\n expression=TruncDate(\"start\"),\n output_field=models.DateField(),\n db_persist=True,\n )\n end = models.DateTimeField(null=True)\n end_date = models.GeneratedField(\n expression=TruncDate(\"end\"),\n output_field=models.DateField(),\n db_persist=True,\n )\n duration = models.GeneratedField(\n expression=F(\"end\") - F(\"start\"),\n output_field=models.DurationField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"[{self.duration or '∞'}] {self.start_date}…{self.end_date or ''}\""
},
{
"identifier": "Item",
"path": "samples/models.py",
"snippet": "class Item(models.Model):\n price = models.DecimalField(max_digits=6, decimal_places=2)\n quantity = models.PositiveSmallIntegerField(db_default=Value(1))\n total_price = models.GeneratedField(\n expression=F(\"price\") * F(\"quantity\"),\n output_field=models.DecimalField(max_digits=11, decimal_places=2),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"{self.price}×{self.quantity}={self.total_price}\""
},
{
"identifier": "Order",
"path": "samples/models.py",
"snippet": "class Order(models.Model):\n creation = models.DateTimeField()\n payment = models.DateTimeField(null=True)\n status = models.GeneratedField(\n expression=Case(\n When(\n payment__isnull=False,\n then=Value(\"paid\"),\n ),\n default=Value(\"created\"),\n ),\n output_field=models.TextField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"[{self.status}] {self.payment or self.creation}\""
},
{
"identifier": "Package",
"path": "samples/models.py",
"snippet": "class Package(models.Model):\n slug = models.SlugField()\n data = models.JSONField()\n version = models.GeneratedField(\n expression=F(\"data__info__version\"),\n output_field=models.TextField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"{self.slug} {self.version}\""
},
{
"identifier": "Rectangle",
"path": "samples/models.py",
"snippet": "class Rectangle(models.Model):\n base = models.FloatField()\n height = models.FloatField()\n area = models.GeneratedField(\n expression=F(\"base\") * F(\"height\"),\n output_field=models.FloatField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"{self.base}×{self.height}={self.area}\""
},
{
"identifier": "RightTriangle",
"path": "samples/models.py",
"snippet": "class RightTriangle(models.Model):\n hypotenuse = models.FloatField()\n angle = models.FloatField()\n area = models.GeneratedField(\n expression=Round(\n (Power(\"hypotenuse\", 2) * Sin(Radians(\"angle\")) * Cos(Radians(\"angle\")))\n / 2,\n precision=2,\n ),\n output_field=models.FloatField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"{self.hypotenuse}²×sin({self.angle}°)×cos({self.angle}°)÷2={self.area}\""
},
{
"identifier": "Square",
"path": "samples/models.py",
"snippet": "class Square(models.Model):\n side = models.FloatField()\n area = models.GeneratedField(\n expression=Power(\"side\", 2),\n output_field=models.FloatField(),\n db_persist=True,\n )\n\n def __str__(self):\n return f\"{self.side}²={self.area}\""
},
{
"identifier": "User",
"path": "samples/models.py",
"snippet": "class User(models.Model):\n first_name = models.CharField(max_length=150)\n last_name = models.CharField(max_length=150)\n full_name = models.GeneratedField(\n expression=Concat(\"first_name\", Value(\" \"), \"last_name\"),\n output_field=models.TextField(),\n db_persist=True,\n )\n\n def __str__(self):\n return self.full_name"
}
] | from django.test import TestCase
from samples.models import (
Circle,
Event,
Item,
Order,
Package,
Rectangle,
RightTriangle,
Square,
User,
) | 1,227 |
class RectangleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.rectangle = Rectangle.objects.create(base=6, height=7)
def test_str(self):
self.assertEqual(str(self.rectangle), "6×7=42.0")
class SquareTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.square = Square.objects.create(side=3)
def test_str(self):
self.assertEqual(str(self.square), "3²=9.0")
class CircleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.circle = Circle.objects.create(radius=3.1415)
def test_str(self):
self.assertEqual(str(self.circle), "3.1415²×π=31.0")
class RightTriangleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
|
class RectangleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.rectangle = Rectangle.objects.create(base=6, height=7)
def test_str(self):
self.assertEqual(str(self.rectangle), "6×7=42.0")
class SquareTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.square = Square.objects.create(side=3)
def test_str(self):
self.assertEqual(str(self.square), "3²=9.0")
class CircleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.circle = Circle.objects.create(radius=3.1415)
def test_str(self):
self.assertEqual(str(self.circle), "3.1415²×π=31.0")
class RightTriangleTestCase(TestCase):
@classmethod
def setUpTestData(cls): | cls.righttriangle = RightTriangle.objects.create(hypotenuse=5, angle=45) | 6 | 2023-11-07 17:06:11+00:00 | 2k |
akhilravidas/stack-sparrow | sparrow/assistant/run.py | [
{
"identifier": "actions",
"path": "sparrow/assistant/actions.py",
"snippet": "class FileReviewComments(BaseModel):\nclass FileReviewResult(BaseModel):\n def new(cls, json_input: str) -> FileReviewResult:"
},
{
"identifier": "BaseReview",
"path": "sparrow/assistant/review.py",
"snippet": "class BaseReview(Protocol):\n \"\"\"\n Base Review Protocol\n \"\"\"\n\n def current_file_contents(self, path: str) -> Optional[str]:\n \"\"\"Read the current file contents which includes the changes made by this review\"\"\"\n ...\n\n def previous_file_contents(self, path: str) -> Optional[str]:\n \"\"\"Read a file contents before the changes made by this review\"\"\"\n ...\n\n @property\n def diff_by_file(\n self,\n ) -> Iterator[Tuple[str, Optional[str], List[Tuple[int, int]]]]:\n \"\"\"\n Returns a generator of (file_path, unified_diff, list of (hunk_start_line and hunk_end_line)) tuples\n \"\"\"\n ...\n\n @property\n def root_dir(self) -> str:\n \"\"\"Returns the root directory of the review\"\"\"\n ..."
},
{
"identifier": "ReviewFile",
"path": "sparrow/assistant/review.py",
"snippet": "class ReviewFile:\n \"\"\"\n Wrapper for a single LLM call\n \"\"\"\n\n file_path: str\n message: str\n status: Literal[\"needs_review\", \"skipped\"]\n input_tokens: int\n skipped_reason: Optional[str] = None"
},
{
"identifier": "ReviewPlan",
"path": "sparrow/assistant/review.py",
"snippet": "class ReviewPlan:\n \"\"\"\n Review broken down into individual review steps.\n\n Includes other metrics like estimated cost and input/output token counts computed during\n `plan` in case user confirmation is needed.\n \"\"\"\n\n files: List[ReviewFile] = field(default_factory=list)\n estimated_cost: float = 0\n input_tokens: int = 0\n estimated_output_tokens: int = 0\n\n def add_file(self, file: ReviewFile, in_tokens: int, est_out_tokens: int) -> None:\n \"\"\"\n Add a review step to the plan.\n \"\"\"\n self.files.append(file)\n self.input_tokens += in_tokens\n self.estimated_output_tokens += est_out_tokens"
},
{
"identifier": "config",
"path": "sparrow/libs/config.py",
"snippet": "EXCLUDED_EXTENSIONS = (\".lock\", \".yaml\", \".toml\", \".json\", \".md\", \".txt\")\ndef is_excluded(path: str):\ndef app_data_root() -> Path:\ndef config_path() -> Path:\n def instance(cls) -> AppConfig:\n def save(self):\n def model_name(self) -> str:\ndef get() -> AppConfig:\nclass AppConfig:"
},
{
"identifier": "constants",
"path": "sparrow/libs/constants.py",
"snippet": "PACKAGE_NAME = \"stack-sparrow\"\nMODEL_COSTS: Dict[str, ModelCost] = {\n \"gpt-4-1106-preview\": ModelCost(\n block_size=1000, input_cost_per_block=0.01, output_cost_per_block=0.03\n ),\n}\nMAX_TOKENS_PER_REVIEW = 20 * 1000 # 20K for high signal\nSENTRY_DSN = \"https://d57c1dcbafc96c6c28e233af853ac991@o4506171527266304.ingest.sentry.io/4506171531132928\""
},
{
"identifier": "llm",
"path": "sparrow/libs/llm.py",
"snippet": "def num_tokens(prompt: str) -> int:\ndef cost(input_tokens: int, estimated_output_tokens: int) -> float:"
},
{
"identifier": "scm",
"path": "sparrow/libs/scm.py",
"snippet": "def get_repo(repo_path: Optional[str] = None) -> Optional[git.Repo]:\ndef maybe_commit_rev(\n commit_hash: str, repo_path: Optional[str]\n) -> Optional[git.Commit]:\ndef patch_set(\n repo: git.Repo, head_commit_rev: str, base_commit_rev: Optional[str]\n) -> unidiff.PatchSet:\ndef file_is_binary(file_path, check_bytes=8000):"
},
{
"identifier": "strings",
"path": "sparrow/libs/strings.py",
"snippet": "MAX_PADDING = 5\ndef annotated_file_contents(\n content: str, changed_line_ranges: List[Tuple[int, int]], start: int = 1\n) -> str:\n def is_changed_line(line_number):"
}
] | import json
import logging
import os
import time
import pydantic
from functools import lru_cache
from typing import List, Optional, Tuple
from openai import OpenAI
from openai.types.beta.threads import Run
from rich import print # pylint: disable=redefined-builtin
from rich.progress import Progress, SpinnerColumn, TextColumn
from sparrow.assistant import actions
from sparrow.assistant.review import BaseReview, ReviewFile, ReviewPlan
from sparrow.libs import config, constants, llm, scm, strings | 1,248 | """
OpenAI assistant
"""
ASSISTANT_INSTRUCTIONS = """
You an an assistant that helps with DevOps tasks. You review code, help with adding documentation etc..
""".strip()
REVIEW_THREAD_INSTRUCTIONS = """
Each message in this thread represents changes made to a file in the patch set.
The first line is the file path. The subsequent lines contains the file contents annotated with line numbers.
Only the lines that start with an asterisk were updated.
IMPORTANT:
- Review code and flag substantive issues for updated code (lines marked with an asterisk).
- Only reject if you are sure that there is an underlying issue with the code.
- Do not flag formatting or style issues.
""".strip()
@lru_cache(maxsize=None)
def _client() -> OpenAI:
| """
OpenAI assistant
"""
ASSISTANT_INSTRUCTIONS = """
You an an assistant that helps with DevOps tasks. You review code, help with adding documentation etc..
""".strip()
REVIEW_THREAD_INSTRUCTIONS = """
Each message in this thread represents changes made to a file in the patch set.
The first line is the file path. The subsequent lines contains the file contents annotated with line numbers.
Only the lines that start with an asterisk were updated.
IMPORTANT:
- Review code and flag substantive issues for updated code (lines marked with an asterisk).
- Only reject if you are sure that there is an underlying issue with the code.
- Do not flag formatting or style issues.
""".strip()
@lru_cache(maxsize=None)
def _client() -> OpenAI: | return OpenAI(api_key=config.AppConfig.instance().openai_token) | 4 | 2023-11-07 00:55:26+00:00 | 2k |
nimamahmoudi/LLMStreamlitDemoBasic | app-agent.py | [
{
"identifier": "get_agent_chain",
"path": "llm_helper.py",
"snippet": "def get_agent_chain(file_name=\"Mahmoudi_Nima_202202_PhD.pdf\", index_folder=\"index\", callbacks=None, st_cb: Optional[StreamlitCallbackHandler] = None, ):\n if callbacks is None:\n callbacks = []\n\n from langchain.agents import initialize_agent, AgentType\n from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n from langchain.agents.format_scratchpad.openai_tools import (\n format_to_openai_tool_messages,\n )\n from langchain.agents import AgentExecutor\n from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser\n\n lc_tools, oai_tools = get_lc_oai_tools(file_name, index_folder, st_cb)\n \n\n prompt = ChatPromptTemplate.from_messages(\n [\n (\"system\", \"You are a helpful assistant, use the search tool to answer the user's question and cite only the page number when you use information coming (like [p1]) from the source document.\\nchat history: {chat_history}\"),\n (\"user\", \"{input}\"),\n MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n ]\n )\n llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-1106\")\n\n agent = (\n {\n \"input\": lambda x: x[\"input\"],\n \"agent_scratchpad\": lambda x: format_to_openai_tool_messages(\n x[\"intermediate_steps\"]\n ),\n \"chat_history\": lambda x: _format_chat_history(x[\"chat_history\"]),\n }\n | prompt\n | llm.bind(tools=oai_tools)\n | OpenAIToolsAgentOutputParser()\n )\n\n agent_executor = AgentExecutor(agent=agent, tools=lc_tools, verbose=True, callbacks=callbacks)\n return agent_executor"
},
{
"identifier": "get_lc_oai_tools",
"path": "llm_helper.py",
"snippet": "def get_lc_oai_tools(file_name:str = \"Mahmoudi_Nima_202202_PhD.pdf\", index_folder: str = \"index\", st_cb: Optional[StreamlitCallbackHandler] = None, ):\n from langchain.tools.render import format_tool_to_openai_tool\n search_index = get_search_index(file_name, index_folder)\n lc_tools = [get_search_tool_from_index(search_index=search_index, st_cb=st_cb)]\n oai_tools = [format_tool_to_openai_tool(t) for t in lc_tools]\n return lc_tools, oai_tools"
}
] | import streamlit as st
from langchain.agents import initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from llm_helper import get_agent_chain, get_lc_oai_tools | 1,017 |
with st.sidebar:
openai_api_key = st.secrets["OPENAI_API_KEY"]
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🔎 LangChain - Chat with search")
"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106", openai_api_key=openai_api_key, streaming=True)
|
with st.sidebar:
openai_api_key = st.secrets["OPENAI_API_KEY"]
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🔎 LangChain - Chat with search")
"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106", openai_api_key=openai_api_key, streaming=True) | lc_tools, _ = get_lc_oai_tools() | 1 | 2023-11-05 13:19:04+00:00 | 2k |
JakubPluta/gymhero | gymhero/api/routes/user.py | [
{
"identifier": "get_current_superuser",
"path": "gymhero/api/dependencies.py",
"snippet": "def get_current_superuser(\n current_user: User = Depends(get_current_user),\n) -> User:\n \"\"\"Returns the current superuser.\n\n Parameters:\n current_user (User, optional): The current user.\n\n Returns:\n User: The current superuser.\n\n Raises:\n HTTPException: If the current user is not a super user.\n\n \"\"\"\n if not user_crud.is_super_user(current_user):\n raise _get_credential_exception(\n status_code=status.HTTP_403_FORBIDDEN,\n details=\"The user does not have enough privileges\",\n )\n return current_user"
},
{
"identifier": "get_pagination_params",
"path": "gymhero/api/dependencies.py",
"snippet": "def get_pagination_params(\n skip: int = Query(0, ge=0), limit: int = Query(10, gt=0)\n) -> Tuple[int, int]:\n \"\"\"\n Get the pagination parameters.\n\n Parameters:\n skip (int): The number of items to skip. Defaults to 0.\n limit (int): The maximum number of items to return. Defaults to 10.\n\n Returns:\n Tuple[int, int]: A tuple containing the skip and limit values.\n \"\"\"\n return skip, limit"
},
{
"identifier": "user_crud",
"path": "gymhero/crud/user.py",
"snippet": "class UserCRUDRepository(CRUDRepository):\n def get_user_by_email(self, db: Session, email: str) -> Optional[User]:\n def is_super_user(user: User) -> bool:\n def is_active_user(user: User) -> bool:\n def deactivate_user(db: Session, user: User) -> User:\n def authenticate_user(\n self, db: Session, email: str, password: str\n ) -> Optional[User]:"
},
{
"identifier": "get_db",
"path": "gymhero/database/db.py",
"snippet": "def get_db() -> Generator: # pragma: no cover\n \"\"\"\n Returns a generator that yields a database session\n\n Yields:\n Session: A database session object.\n\n Raises:\n Exception: If an error occurs while getting the database session.\n \"\"\"\n\n log.debug(\"getting database session\")\n db = get_local_session(SQLALCHEMY_DATABASE_URL, False)()\n try:\n yield db\n finally: # pragma: no cover\n log.debug(\"closing database session\")\n db.close() # pragma: no cover"
},
{
"identifier": "get_logger",
"path": "gymhero/log.py",
"snippet": "def get_logger(\n name: Optional[str] = None, level: DebugLevelType = \"DEBUG\"\n) -> logging.Logger:\n \"\"\"\n Creates and configures a logger for logging messages.\n\n Parameters:\n name (Optional[str]): The name of the logger. Defaults to None.\n level (DebugLevel): The logging level. Defaults to DebugLevel.DEBUG.\n\n Returns:\n logging.Logger: The configured logger object.\n \"\"\"\n logger = logging.getLogger(name=name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(LOGGING_FORMATTER)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n if not level or level not in DebugLevels:\n logger.warning(\n \"Invalid logging level %s. Setting logging level to DEBUG.\", level\n )\n level = \"DEBUG\"\n\n logger.setLevel(level=level)\n return logger"
},
{
"identifier": "User",
"path": "gymhero/models/user.py",
"snippet": "class User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True, index=True)\n full_name = Column(String, index=True)\n email = Column(String, unique=True, index=True, nullable=False)\n hashed_password = Column(String, nullable=False)\n is_active = Column(Boolean, default=True)\n is_superuser = Column(Boolean, default=False)\n\n training_plans = relationship(\"TrainingPlan\", back_populates=\"owner\")\n training_units = relationship(\"TrainingUnit\", back_populates=\"owner\")\n\n def __repr__(self):\n return f\"<User(full_name={self.full_name}, email={self.email})>\""
},
{
"identifier": "UserCreate",
"path": "gymhero/schemas/user.py",
"snippet": "class UserCreate(UserBase):\n email: EmailStr\n password: str\n is_superuser: bool = False"
},
{
"identifier": "UserInDB",
"path": "gymhero/schemas/user.py",
"snippet": "class UserInDB(UserBase):\n hashed_password: str\n is_superuser: bool = False"
},
{
"identifier": "UserOut",
"path": "gymhero/schemas/user.py",
"snippet": "class UserOut(UserBase):\n id: int\n model_config = ConfigDict(from_attributes=True)"
},
{
"identifier": "UserUpdate",
"path": "gymhero/schemas/user.py",
"snippet": "class UserUpdate(UserBase):\n password: Optional[str] = None\n is_superuser: bool = False"
},
{
"identifier": "get_password_hash",
"path": "gymhero/security.py",
"snippet": "def get_password_hash(password: str) -> str:\n \"\"\"\n Generate the hash value of a password.\n\n Parameters:\n password (str): The password to be hashed.\n\n Returns:\n str: The hash value of the password.\n \"\"\"\n return pwd_context.hash(password)"
}
] | from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from gymhero.api.dependencies import get_current_superuser, get_pagination_params
from gymhero.crud import user_crud
from gymhero.database.db import get_db
from gymhero.log import get_logger
from gymhero.models import User
from gymhero.schemas.user import UserCreate, UserInDB, UserOut, UserUpdate
from gymhero.security import get_password_hash | 1,318 |
log = get_logger(__name__)
router = APIRouter(dependencies=[Depends(get_current_superuser)])
@router.get(
"/all", response_model=List[Optional[UserOut]], status_code=status.HTTP_200_OK
)
def fetch_all_users(
|
log = get_logger(__name__)
router = APIRouter(dependencies=[Depends(get_current_superuser)])
@router.get(
"/all", response_model=List[Optional[UserOut]], status_code=status.HTTP_200_OK
)
def fetch_all_users( | db: Session = Depends(get_db), pagination_params=Depends(get_pagination_params) | 3 | 2023-11-05 14:37:46+00:00 | 2k |
IIMunchII/restllm | src/restllm/models/chat.py | [
{
"identifier": "MetaModel",
"path": "src/restllm/models/base.py",
"snippet": "class MetaModel(BaseModel):\n id: int = Field(gt=0, examples=[1, 2, 3])\n class_name: str\n owner: int\n object: Any\n created_at: Datetime = Field(default_factory=Datetime)\n updated_at: Datetime = Field(default_factory=Datetime)"
},
{
"identifier": "FunctionCall",
"path": "src/restllm/models/functions.py",
"snippet": "class FunctionCall(BaseModel):\n name: str = Field(\n description=\"Name of the function to call\"\n )\n args: dict[str, Any]"
},
{
"identifier": "CompletionParameters",
"path": "src/restllm/models/completion.py",
"snippet": "class CompletionParameters(BaseModel):\n model: ModelTypes\n functions: Optional[list[FunctionName]] = Field(\n default=None,\n description=\"A list of functions that the model may use to generate JSON inputs. Each function should have the following properties\",\n examples=[None,[\"SearchArticlesFunction\"]],\n )\n temperature: Optional[Union[float, None]] = Field(\n default=0.2,\n ge=0,\n le=2,\n description=\"The sampling temperature to be used, between 0 and 2. Higher values like 0.8 produce more random outputs, while lower values like 0.2 make outputs more focused and deterministic.\",\n )\n top_p: Optional[Union[float, None]] = Field(\n default=None,\n description=\"An alternative to sampling with temperature. It instructs the model to consider the results of the tokens with top_p probability. For example, 0.1 means only the tokens comprising the top 10% probability mass are considered.\",\n examples=[None, 0.1]\n )\n n: Optional[int] = Field(\n default=None,\n description=\"The number of chat completion choices to generate for each input message.\",\n examples=[None, 2],\n )\n stop: Optional[Union[str, list[str], None]] = Field(\n default=None,\n description=\"Up to 4 sequences where the API will stop generating further tokens.\",\n examples=[None]\n )\n max_tokens: Optional[int] = Field(\n default=None,\n description=\"The maximum number of tokens to generate in the chat completion.\",\n examples=[None],\n )\n presence_penalty: Optional[Union[float, None]] = Field(\n default=None,\n description=\"It is used to penalize new tokens based on their existence in the text so far.\",\n examples=[None],\n )\n frequency_penalty: Optional[Union[float, None]] = Field(\n default=None,\n description=\"It is used to penalize new tokens based on their frequency in the text so far.\",\n examples=[None],\n )\n logit_bias: Optional[dict[str, float]] = Field(\n default={},\n description=\"Used to modify the probability of specific tokens appearing in the completion.\",\n examples=[None],\n )\n user: Optional[str] = Field(\n default=None,\n description=\"A unique identifier representing your end-user. This can help OpenAI to monitor and detect abuse.\",\n examples=[None, \"Alice\"],\n )\n function_call: Optional[Union[str, dict[str, Any]]] = Field(\n default=None,\n description=\"Controls how the model responds to function calls.\",\n examples=[None, \"auto\"],\n )"
},
{
"identifier": "get_function_schemas",
"path": "src/restllm/models/functions.py",
"snippet": "def get_function_schemas(function_names: list[str]) -> list[dict[str, Any]]:\n function_classes = get_function_classes()\n return [\n function_classes.get(function_name).function_schema\n for function_name in function_names\n ]"
}
] | from enum import auto, UNIQUE, verify, StrEnum
from typing import Optional
from pydantic import BaseModel, Field
from .base import MetaModel
from .functions import FunctionCall
from .completion import CompletionParameters
from ..models.functions import get_function_schemas | 1,232 |
@verify(UNIQUE)
class RoleTypes(StrEnum):
USER = auto()
SYSTEM = auto()
ASSISTANT = auto()
FUNCTION = auto()
@verify(UNIQUE)
class ModelTypes(StrEnum):
GPT3_TURBO = "gpt-3.5-turbo"
GPT3_TURBO_16K = "gpt-3.5-turbo-16k"
GPT4 = "gpt-4"
GPT4_32K = "gpt-4-32k"
class ChatMessage(BaseModel):
role: RoleTypes = Field(
description="The role of the message's author. Roles can be: system, user, assistant, or function.",
examples=[RoleTypes.USER, RoleTypes.SYSTEM],
)
content: str = Field(
description="The contents of the message. It is required for all messages, but may be null for assistant messages with function calls.",
examples=["Can you write a function in Python that adds two numbers together?"],
)
name: Optional[str] = Field(
default=None,
max_length=64,
pattern="^[a-zA-Z0-9_]*$",
description="The name of the author of the message. It is required if the role is 'function'. The name should match the name of the function represented in the content. It can contain characters (a-z, A-Z, 0-9), and underscores, with a maximum length of 64 characters.",
examples=["Alice", "AI Assistant"],
)
|
@verify(UNIQUE)
class RoleTypes(StrEnum):
USER = auto()
SYSTEM = auto()
ASSISTANT = auto()
FUNCTION = auto()
@verify(UNIQUE)
class ModelTypes(StrEnum):
GPT3_TURBO = "gpt-3.5-turbo"
GPT3_TURBO_16K = "gpt-3.5-turbo-16k"
GPT4 = "gpt-4"
GPT4_32K = "gpt-4-32k"
class ChatMessage(BaseModel):
role: RoleTypes = Field(
description="The role of the message's author. Roles can be: system, user, assistant, or function.",
examples=[RoleTypes.USER, RoleTypes.SYSTEM],
)
content: str = Field(
description="The contents of the message. It is required for all messages, but may be null for assistant messages with function calls.",
examples=["Can you write a function in Python that adds two numbers together?"],
)
name: Optional[str] = Field(
default=None,
max_length=64,
pattern="^[a-zA-Z0-9_]*$",
description="The name of the author of the message. It is required if the role is 'function'. The name should match the name of the function represented in the content. It can contain characters (a-z, A-Z, 0-9), and underscores, with a maximum length of 64 characters.",
examples=["Alice", "AI Assistant"],
) | function_call: Optional[FunctionCall] = Field( | 1 | 2023-11-05 19:16:00+00:00 | 2k |
rabilrbl/deepseek-api | deepseek_api/deepseek_api.py | [
{
"identifier": "API_URL",
"path": "deepseek_api/constants.py",
"snippet": "class API_URL:\n \"\"\"Deepseek API URL constants\"\"\"\n\n BASE_URL = \"https://coder.deepseek.com/api/v0\"\n LOGIN = BASE_URL + \"/users/login\"\n CLEAR_CONTEXT = BASE_URL + \"/chat/clear_context\"\n CHAT = BASE_URL + \"/chat/completions\""
},
{
"identifier": "DeepseekConstants",
"path": "deepseek_api/constants.py",
"snippet": "class DeepseekConstants:\n \"\"\"Deepseek constants\"\"\"\n\n BASE_HEADERS = {\n \"Accept-Language\": \"en-IN,en;q=0.9\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"DNT\": \"1\",\n \"Origin\": \"https://coder.deepseek.com\",\n \"Pragma\": \"no-cache\",\n \"Referer\": \"https://coder.deepseek.com/\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome\",\n \"accept\": \"*/*\",\n \"content-type\": \"application/json\",\n \"sec-ch-ua\": '\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"Linux\"',\n \"x-app-version\": \"20240105.0\",\n }"
},
{
"identifier": "EmptyEmailOrPasswordError",
"path": "deepseek_api/errors.py",
"snippet": "class EmptyEmailOrPasswordError(Exception):\n \"\"\"Exception raised when the email or password is empty.\"\"\"\n def __init__(self, message=\"Email or password cannot be empty\"):\n self.message = message\n super().__init__(self.message)"
},
{
"identifier": "NotLoggedInError",
"path": "deepseek_api/errors.py",
"snippet": "class NotLoggedInError(Exception):\n \"\"\"Exception raised when the user is not logged in.\"\"\"\n def __init__(self, message=\"You are not logged in. Please login first\"):\n self.message = message\n super().__init__(self.message)"
}
] | import requests
import aiohttp
import aiofiles
import threading
import json
import jwt
import datetime
from abc import ABC, abstractmethod
from deepseek_api.constants import API_URL, DeepseekConstants
from deepseek_api.errors import EmptyEmailOrPasswordError, NotLoggedInError | 743 |
class DeepseekBase(ABC):
"""
A base class to create DeepseekAPI instances.
"""
def __init__(
self,
email: str,
password: str,
model_class: str = "deepseek_code",
save_login: bool = False,
):
"""
Constructor method for DeepseekAPI class.
Initializes a DeepseekAPI instance with provided credentials and settings.
Parameters:
email (str): User's email for Deepseek account
password (str): Password for user's Deepseek account
model_class (str): Deepseek model to use, either 'deepseek_chat' or 'deepseek_code'
save_login (bool): Whether to save credentials to login.json to avoid re-login
"""
self.email = email
self.password = password
self.model_class = model_class
self.save_login = save_login
|
class DeepseekBase(ABC):
"""
A base class to create DeepseekAPI instances.
"""
def __init__(
self,
email: str,
password: str,
model_class: str = "deepseek_code",
save_login: bool = False,
):
"""
Constructor method for DeepseekAPI class.
Initializes a DeepseekAPI instance with provided credentials and settings.
Parameters:
email (str): User's email for Deepseek account
password (str): Password for user's Deepseek account
model_class (str): Deepseek model to use, either 'deepseek_chat' or 'deepseek_code'
save_login (bool): Whether to save credentials to login.json to avoid re-login
"""
self.email = email
self.password = password
self.model_class = model_class
self.save_login = save_login | self.headers = DeepseekConstants.BASE_HEADERS | 1 | 2023-11-09 18:42:43+00:00 | 2k |
HealthSciTech/E2E-PPG | example.py | [
{
"identifier": "e2e_hrv_extraction",
"path": "e2e_ppg_pipeline.py",
"snippet": "def e2e_hrv_extraction(\n input_sig: np.ndarray,\n sampling_rate: int,\n window_length_sec: int = 60,\n peak_detection_method: str = 'kazemi'\n) -> pd.DataFrame:\n '''\n End-to-end HR and HRV extraction from an input PPG signal.\n \n Args:\n input_sig (np.ndarray): The input PPG signal.\n sampling_rate (int): The sampling rate of the input signal.\n window_length_sec (int): The desired window length for HR and HRV extraction in seconds.\n peak_detection_method (str): Peak detection method. Valid inputs: 'nk', 'kazemi', and 'heartpy'. The default is 'kazemi'. (optional)\n \n Return:\n hrv_data (pd.Dataframe): A DataFrame containing HRV parameters.\n\n '''\n # Apply bandpass filter if needed\n filtered_sig = bandpass_filter(\n sig=input_sig, fs=sampling_rate, lowcut=0.5, highcut=3)\n\n # Run signal quality assessment\n clean_indices, noisy_indices = sqa(\n sig=filtered_sig, sampling_rate=sampling_rate, filter_signal=False)\n\n # Run PPG reconstruction\n sig_reconstructed, clean_indices, noisy_indices = reconstruction(\n sig=filtered_sig,\n clean_indices=clean_indices,\n noisy_indices=noisy_indices,\n sampling_rate=sampling_rate,\n filter_signal=False)\n\n # Calculate the window length for HR and HRV extraction in terms of samples\n window_length = window_length_sec*sampling_rate\n\n # Scan clean parts of the signal and extract clean segments with the specified window length\n clean_segments = clean_seg_extraction(\n sig=sig_reconstructed,\n noisy_indices=noisy_indices,\n window_length=window_length)\n\n # Display results\n print(\"Analysis Results:\")\n print(\"------------------\")\n # Check if clean segments are found, if not, print a message\n if len(clean_segments) == 0:\n print('No clean ' + str(window_length_sec) + ' seconds segment was detected in the signal!')\n else:\n # Print the number of detected clean segments\n print(str(len(clean_segments)) + ' clean ' + str(window_length_sec) + ' seconds segments was detected in the signal!' )\n\n # Run PPG peak detection\n peaks = peak_detection(clean_segments=clean_segments, sampling_rate=sampling_rate, method=peak_detection_method)\n\n # Perform HR and HRV extraction\n hrv_data = hrv_extraction(\n clean_segments=clean_segments,\n peaks=peaks,\n sampling_rate=sampling_rate,\n window_length=window_length)\n print(\"HR and HRV parameters:\")\n print(hrv_data)\n print('Done!')\n\n return hrv_data"
},
{
"identifier": "get_data",
"path": "utils.py",
"snippet": "def get_data(\n file_name: str,\n local_directory: str = \"data\",\n usecols: List[str] = ['ppg'],\n) -> np.ndarray:\n \"\"\"\n Import data (e.g., PPG signals)\n \n Args:\n file_name (str): Name of the input file\n local_directory (str): Data directory\n usecols (List[str]): The columns to read from the input file\n \n Return:\n sig (np.ndarray): the input signal (e.g., PPG)\n \"\"\"\n try:\n # Construct the file path\n file_path = os.path.join(local_directory, file_name)\n # Load data from the specified CSV file\n input_data = pd.read_csv(\n file_path,\n delim_whitespace=True,\n usecols=usecols)\n # Extract signal\n sig = input_data[usecols[0]].values\n return sig\n except FileNotFoundError:\n print(f\"File not found: {file_name}\")\n except pd.errors.EmptyDataError:\n print(f\"Empty data in file: {file_name}\")\n except Exception as e:\n print(f\"An unexpected error occurred: {e}\")\n # Return None in case of an error\n return None"
}
] | import os
import warnings
from e2e_ppg_pipeline import e2e_hrv_extraction
from utils import get_data | 1,085 | # -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
# Import a sample data
file_name = "201902020222_Data.csv"
sampling_frequency = 20
input_sig = get_data(file_name=file_name)
# Set the window length for HR and HRV extraction in seconds
window_length_sec = 90
# Extract HRV parameters from the input PPG signal
| # -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
# Import a sample data
file_name = "201902020222_Data.csv"
sampling_frequency = 20
input_sig = get_data(file_name=file_name)
# Set the window length for HR and HRV extraction in seconds
window_length_sec = 90
# Extract HRV parameters from the input PPG signal | hrv_data = e2e_hrv_extraction( | 0 | 2023-11-07 22:52:14+00:00 | 2k |
Antelcat/ida_copilot | ida_copilot/copilot.py | [
{
"identifier": "core",
"path": "ida_copilot/core.py",
"snippet": "def push_async_call_result(result):\ndef pop_async_call_result(index):\ndef preprocess_prompt(template: str) -> str:\ndef escape_agent_input(query: str, tool_name: str) -> str:\ndef get_screen_func():\ndef get_safe_new_name(new_func_name: str) -> str:\n def __init__(self, cfunc: ida_hexrays.cfuncptr_t):\n def __hash__(self):\n def __str__(self):\n def __refresh(self):\n def ea(self) -> int:\n def comment(self) -> str:\n def comment(self, new_comment: str):\n def name(self) -> str:\n def name(self, new_name: str):\n def definition(self) -> str:\n def definition(self, new_signature: str):\n def pseudocode(self) -> str:\n def __fix_func_types(self):\n def __type_to_var_name(tinfo: idaapi.tinfo_t):\n def __init__(self):\n def visit_expr(self, e):\n def functions(self) -> list[dict]:\n def __clean_up_local_variables(self):\ndef decompile_by_ea(ea: int) -> DecompiledFunction:\ndef decompile_by_name(name: str) -> DecompiledFunction:\nclass DecompiledFunction:\n class __FunctionCallCollector(ida_hexrays.ctree_visitor_t):"
},
{
"identifier": "prompts",
"path": "ida_copilot/prompts.py",
"snippet": ""
}
] | import asyncio
import concurrent.futures
import re
import idaapi
from typing import Any, Optional
from langchain.agents import tool, initialize_agent, AgentType
from langchain.callbacks import FileCallbackHandler
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool
from ida_copilot import core, prompts | 1,570 |
class Copilot:
def run(self, temperature=0.2, model='gpt-3.5-turbo-0613'):
ea = idaapi.get_screen_ea()
func_name = idaapi.get_func_name(ea)
tools = [
self.__GetAddressInfoTool(),
self.__GetDefinitionTool(),
self.__GetPseudocodeTool(),
self.__SetFunctionCommentTool(),
self.__SetFunctionDefinitionTool(),
self.__SetFunctionNameTool(),
self.__GetIsMyWorkDoneTool(ea)
]
agent = initialize_agent(
agent_type=AgentType.OPENAI_MULTI_FUNCTIONS,
llm=ChatOpenAI(temperature=temperature, model=model),
tools=tools,
# callback_manager=BaseCallbackManager(handlers=[
# CopilotPanelCallbackManager()]),
verbose=True,
)
prompt = prompts.default_prompt_zh.format(
binary_description=f'name: {func_name}, address 0x{ea:x}'
# pseudocode=pseudocode
)
# 开启新线程运行agent
t = concurrent.futures.ThreadPoolExecutor()
loop = asyncio.get_event_loop()
loop.run_in_executor(t, agent.run, prompt)
class __GetAddressInfoTool(BaseTool):
name = 'get_address_info'
description = ('Given a hex address or function name, show its information. '
'**Input Format**: `<hex_address_or_function_name>`. '
'**Input Example1**: `sub_140007080`. '
'**Input Example2**: `0x140007080`.')
@staticmethod
def __get_address_info(name_or_hex_address: str):
try:
if name_or_hex_address.lower().startswith('0x'):
ea = int(name_or_hex_address, 16)
else:
ea = idaapi.get_name_ea(idaapi.BADADDR, name_or_hex_address)
if ea == idaapi.BADADDR:
raise Exception
except Exception:
return f'{name_or_hex_address} is not a valid address or name.'
flags = idaapi.get_flags(ea)
result = ''
# 检查地址是否位于函数内部
func = idaapi.get_func(ea)
if func:
result += "Address 0x%X is inside a function.\n" % ea
result += "Function start: 0x%X\n" % func.start_ea
result += "Function end: 0x%X\n" % func.end_ea
func_name = idaapi.get_func_name(func.start_ea)
if func_name:
result += "Function name: %s\n" % func_name
elif idaapi.is_code(flags):
result += "Address 0x%X is code.\n" % ea
elif idaapi.is_data(flags):
result += "Address 0x%X is data.\n" % ea
if idaapi.is_byte(flags):
result += "Data type: Byte\n"
result += "Value: %d\n" % idaapi.get_wide_byte(ea)
elif idaapi.is_word(flags):
result += "Data type: Word\n"
result += "Value: %d\n" % idaapi.get_wide_word(ea)
elif idaapi.is_dword(flags):
result += "Data type: Dword\n"
result += "Value: %d\n" % idaapi.get_wide_dword(ea)
elif idaapi.is_qword(flags):
result += "Data type: Qword\n"
result += "Value: %d\n" % idaapi.get_qword(ea)
elif idaapi.is_float(flags):
result += "Data type: Float\n"
# result += "Value: %f\n" % idaapi.get_wide_float(address)
elif idaapi.is_double(flags):
result += "Data type: Double\n"
# result += "Value: %f\n" % idaapi.get_wide_double(address)
elif idaapi.is_strlit(flags):
result += "Data type: String\n"
result += "Value: %s\n" % idaapi.get_strlit_contents(ea)
elif idaapi.is_struct(flags):
result += "Data type: Struct\n"
# ... 其他数据类型检查
elif idaapi.is_unknown(flags):
result += "Address 0x%X is unknown.\n" % ea
# 名称和注释
if idaapi.has_name(flags):
result += "Name: %s\n" % idaapi.get_name(ea)
elif idaapi.has_dummy_name(flags):
result += "Dummy name: %s\n" % idaapi.get_name(ea)
if idaapi.has_cmt(flags):
result += "Comment: %s\n" % idaapi.get_cmt(ea, 0)
if result == '':
result = 'Address not found.'
elif result[-1] == '\n':
result = result[:-1]
return result
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
|
class Copilot:
def run(self, temperature=0.2, model='gpt-3.5-turbo-0613'):
ea = idaapi.get_screen_ea()
func_name = idaapi.get_func_name(ea)
tools = [
self.__GetAddressInfoTool(),
self.__GetDefinitionTool(),
self.__GetPseudocodeTool(),
self.__SetFunctionCommentTool(),
self.__SetFunctionDefinitionTool(),
self.__SetFunctionNameTool(),
self.__GetIsMyWorkDoneTool(ea)
]
agent = initialize_agent(
agent_type=AgentType.OPENAI_MULTI_FUNCTIONS,
llm=ChatOpenAI(temperature=temperature, model=model),
tools=tools,
# callback_manager=BaseCallbackManager(handlers=[
# CopilotPanelCallbackManager()]),
verbose=True,
)
prompt = prompts.default_prompt_zh.format(
binary_description=f'name: {func_name}, address 0x{ea:x}'
# pseudocode=pseudocode
)
# 开启新线程运行agent
t = concurrent.futures.ThreadPoolExecutor()
loop = asyncio.get_event_loop()
loop.run_in_executor(t, agent.run, prompt)
class __GetAddressInfoTool(BaseTool):
name = 'get_address_info'
description = ('Given a hex address or function name, show its information. '
'**Input Format**: `<hex_address_or_function_name>`. '
'**Input Example1**: `sub_140007080`. '
'**Input Example2**: `0x140007080`.')
@staticmethod
def __get_address_info(name_or_hex_address: str):
try:
if name_or_hex_address.lower().startswith('0x'):
ea = int(name_or_hex_address, 16)
else:
ea = idaapi.get_name_ea(idaapi.BADADDR, name_or_hex_address)
if ea == idaapi.BADADDR:
raise Exception
except Exception:
return f'{name_or_hex_address} is not a valid address or name.'
flags = idaapi.get_flags(ea)
result = ''
# 检查地址是否位于函数内部
func = idaapi.get_func(ea)
if func:
result += "Address 0x%X is inside a function.\n" % ea
result += "Function start: 0x%X\n" % func.start_ea
result += "Function end: 0x%X\n" % func.end_ea
func_name = idaapi.get_func_name(func.start_ea)
if func_name:
result += "Function name: %s\n" % func_name
elif idaapi.is_code(flags):
result += "Address 0x%X is code.\n" % ea
elif idaapi.is_data(flags):
result += "Address 0x%X is data.\n" % ea
if idaapi.is_byte(flags):
result += "Data type: Byte\n"
result += "Value: %d\n" % idaapi.get_wide_byte(ea)
elif idaapi.is_word(flags):
result += "Data type: Word\n"
result += "Value: %d\n" % idaapi.get_wide_word(ea)
elif idaapi.is_dword(flags):
result += "Data type: Dword\n"
result += "Value: %d\n" % idaapi.get_wide_dword(ea)
elif idaapi.is_qword(flags):
result += "Data type: Qword\n"
result += "Value: %d\n" % idaapi.get_qword(ea)
elif idaapi.is_float(flags):
result += "Data type: Float\n"
# result += "Value: %f\n" % idaapi.get_wide_float(address)
elif idaapi.is_double(flags):
result += "Data type: Double\n"
# result += "Value: %f\n" % idaapi.get_wide_double(address)
elif idaapi.is_strlit(flags):
result += "Data type: String\n"
result += "Value: %s\n" % idaapi.get_strlit_contents(ea)
elif idaapi.is_struct(flags):
result += "Data type: Struct\n"
# ... 其他数据类型检查
elif idaapi.is_unknown(flags):
result += "Address 0x%X is unknown.\n" % ea
# 名称和注释
if idaapi.has_name(flags):
result += "Name: %s\n" % idaapi.get_name(ea)
elif idaapi.has_dummy_name(flags):
result += "Dummy name: %s\n" % idaapi.get_name(ea)
if idaapi.has_cmt(flags):
result += "Comment: %s\n" % idaapi.get_cmt(ea, 0)
if result == '':
result = 'Address not found.'
elif result[-1] == '\n':
result = result[:-1]
return result
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any: | query = core.escape_agent_input( | 0 | 2023-11-02 14:23:11+00:00 | 2k |
WSH032/fastapi-proxy-lib | tests/test_http.py | [
{
"identifier": "AppFactoryFixture",
"path": "tests/conftest.py",
"snippet": "_P = ParamSpec(\"_P\")\nclass LifeAppDataclass4Test(AppDataclass4Test):\nclass UvicornServerFixture(Protocol): # noqa: D101\n def __call__( # noqa: D102\n self, config: uvicorn.Config, contx_exit_timeout: Union[int, float, None] = None\n ) -> Coroutine[None, None, UvicornServer]:\ndef anyio_backend() -> Literal[\"asyncio\"]:\nasync def lifespan_manager() -> AsyncIterator[LifespanManagerFixture]:\n async def _lifespan_manager(app: ASGIApp) -> ASGIApp:\nasync def echo_http_test_model(\n lifespan_manager: LifespanManagerFixture,\n) -> LifeAppDataclass4Test:\nasync def echo_ws_test_model(\n lifespan_manager: LifespanManagerFixture,\n) -> LifeAppDataclass4Test:\ndef _app_fct_life_wapper( # noqa: D417\n app_fct: Callable[_P, ASGIApp], lifespan_manager_fixture: LifespanManagerFixture\n) -> Callable[_P, Coroutine[None, None, ASGIApp]]:\n async def wappered_app_fct(*args: _P.args, **kwargs: _P.kwargs) -> ASGIApp:\ndef forward_http_app_fct(\n lifespan_manager: LifespanManagerFixture,\n): # -> AppFactoryFixture\ndef reverse_http_app_fct(\n lifespan_manager: LifespanManagerFixture,\n): # -> AppFactoryFixture\ndef reverse_ws_app_fct(\n lifespan_manager: LifespanManagerFixture,\n): # -> AppFactoryFixture\nasync def uvicorn_server_fixture() -> AsyncIterator[UvicornServerFixture]:\n async def uvicorn_server_fct(\n config: uvicorn.Config, contx_exit_timeout: Union[int, float, None] = None\n ) -> UvicornServer:"
},
{
"identifier": "DEFAULT_URL",
"path": "tests/tool.py",
"snippet": "DEFAULT_URL = \"http://www.example.com/\""
},
{
"identifier": "PRIVATE_IP_URL",
"path": "tests/tool.py",
"snippet": "PRIVATE_IP_URL = \"http://127.0.0.1/\""
},
{
"identifier": "WRONG_PROTO_URL",
"path": "tests/tool.py",
"snippet": "WRONG_PROTO_URL = \"wrong://wrong.fastapi_proxy_test.wrong/\""
},
{
"identifier": "AbstractTestProxy",
"path": "tests/tool.py",
"snippet": "class AbstractTestProxy(abc.ABC):\n \"\"\"Abstract class for testing proxy.\"\"\"\n\n @abc.abstractmethod\n def tool_4_test_fixture(self) -> Tool4TestFixture:\n \"\"\"Get the tool for test server.\"\"\""
},
{
"identifier": "Tool4TestFixture",
"path": "tests/tool.py",
"snippet": "class Tool4TestFixture:\n \"\"\"Tool for test server.\n\n Attributes:\n client_for_conn_to_target_server: The client for connecting to target server.\n client_for_conn_to_proxy_server: The client for connecting to proxy server.\n get_request: Get the latest original http/websocket request from the client.\n target_server_base_url: The base url of target server.\n proxy_server_base_url: The base url of proxy server.\n \"\"\"\n\n client_for_conn_to_target_server: httpx.AsyncClient\n client_for_conn_to_proxy_server: httpx.AsyncClient\n get_request: Callable[[], ServerRecvRequestsTypes]\n target_server_base_url: str\n proxy_server_base_url: str"
},
{
"identifier": "check_if_err_resp_is_from_px_serv",
"path": "tests/tool.py",
"snippet": "def check_if_err_resp_is_from_px_serv(resp: httpx.Response) -> None:\n \"\"\"Check if the response about error info is actively sent by proxy server.\n\n If not, will raise AssertionError\n \"\"\"\n assert resp.is_error, f\"Not a error response: {resp}\"\n try:\n resp_body = resp.json()\n except Exception:\n pytest.fail(f\"Not from proxy server: {resp}\")\n # 这两条消息是代理服务器主动返回的错误信息的API的一部分\n assert \"err_type\" in resp_body[\"detail\"]\n assert \"msg\" in resp_body[\"detail\"]"
}
] | import httpx
import pytest
from fastapi_proxy_lib.core.tool import default_proxy_filter
from typing_extensions import override
from .conftest import AppFactoryFixture, LifeAppDataclass4Test
from .tool import (
DEFAULT_URL,
PRIVATE_IP_URL,
WRONG_PROTO_URL,
AbstractTestProxy,
Tool4TestFixture,
check_if_err_resp_is_from_px_serv,
) | 1,070 | # noqa: D100
DEFAULT_TARGET_SERVER_BASE_URL = "http://www.echo.com/"
DEFAULT_PROXY_SERVER_BASE_URL = "http://www.proxy.com/"
class TestReverseHttpProxy(AbstractTestProxy):
"""For testing reverse http proxy."""
@override
@pytest.fixture()
async def tool_4_test_fixture( # pyright: ignore[reportIncompatibleMethodOverride]
self,
echo_http_test_model: LifeAppDataclass4Test,
| # noqa: D100
DEFAULT_TARGET_SERVER_BASE_URL = "http://www.echo.com/"
DEFAULT_PROXY_SERVER_BASE_URL = "http://www.proxy.com/"
class TestReverseHttpProxy(AbstractTestProxy):
"""For testing reverse http proxy."""
@override
@pytest.fixture()
async def tool_4_test_fixture( # pyright: ignore[reportIncompatibleMethodOverride]
self,
echo_http_test_model: LifeAppDataclass4Test, | reverse_http_app_fct: AppFactoryFixture, | 0 | 2023-11-08 04:38:36+00:00 | 2k |
simorxb/PID-Controller-Python | main.py | [
{
"identifier": "Car",
"path": "lib.py",
"snippet": "class Car:\n\n \"\"\" This class represents a car moving in 1D, subject to a throttle force F, with mass m, \n aerodynamic drag coefficient b, F_max/F_min forces, and time step T. \n \"\"\"\n\n def __init__(self, m, b, F_max_0, F_max_max, v_max, T):\n self.m = m # Mass of the car\n self.b = b # Aerodynamic drag coefficient\n self.F_max_0 = F_max_0 # Max force applied to the car by the powertrain at 0 speed\n self.F_max_max = F_max_max # Max force applied to the car by the powertrain at max speed\n self.v_max = v_max # Max speed (m/s)\n self.T = T # Time step\n self.v = 0 # Speed of the car\n\n def Step(self, F):\n\n \"\"\" Update the speed of the car based on the applied force F.\n \"\"\"\n # Max force applied by the powertrain depends on the speed\n v_to_F_max_x_axis = [0, self.v_max]\n F_max_y_axis = [self.F_max_0, self.F_max_max]\n\n if self.v < v_to_F_max_x_axis[0]:\n F_max = F_max_y_axis[0]\n elif self.v > v_to_F_max_x_axis[-1]:\n F_max = F_max_y_axis[-1]\n else:\n F_max = np.interp(self.v, v_to_F_max_x_axis, F_max_y_axis)\n\n # Saturate input force\n if F > F_max:\n F_sat = F_max\n\n elif F < 0:\n F_sat = 0\n else:\n F_sat = F\n\n # Calculate the derivative dv/dt using the input force and the car's speed and properties\n dv_dt = (F_sat - self.b*self.v*self.v)/self.m\n\n # Update the speed by integrating the derivative using the time step T\n self.v += dv_dt*self.T"
},
{
"identifier": "PID",
"path": "lib.py",
"snippet": "class PID:\n\n \"\"\" This class implements a PID controller.\n \"\"\"\n\n def __init__(self, Kp, Ki, Kd, Kaw, T_C, T, max, min, max_rate):\n self.Kp = Kp # Proportional gain\n self.Ki = Ki # Integral gain\n self.Kd = Kd # Derivative gain\n self.Kaw = Kaw # Anti-windup gain\n self.T_C = T_C # Time constant for derivative filtering\n self.T = T # Time step\n self.max = max # Maximum command\n self.min = min # Minimum command\n self.max_rate = max_rate # Maximum rate of change of the command\n self.integral = 0 # Integral term\n self.err_prev = 0 # Previous error\n self.deriv_prev = 0 # Previous derivative\n self.command_sat_prev = 0 # Previous saturated command\n self.command_prev = 0 # Previous command\n self.command_sat = 0 # Current saturated command\n self.command = 0 # Current command\n\n def Step(self, measurement, setpoint):\n \"\"\" Execute a step of the PID controller.\n\n Inputs:\n measurement: current measurement of the process variable\n setpoint: desired value of the process variable\n \"\"\"\n\n # Calculate error\n err = setpoint - measurement\n\n # Update integral term with anti-windup\n self.integral += self.Ki*err*self.T + self.Kaw*(self.command_sat_prev - self.command_prev)*self.T\n \n # Calculate filtered derivative\n deriv_filt = (err - self.err_prev + self.T_C*self.deriv_prev)/(self.T + self.T_C)\n self.err_prev = err\n self.deriv_prev = deriv_filt\n\n # Calculate command using PID equation\n self.command = self.Kp*err + self.integral + self.Kd*deriv_filt\n\n # Store previous command\n self.command_prev = self.command\n\n # Saturate command\n if self.command > self.max:\n self.command_sat = self.max\n elif self.command < self.min:\n self.command_sat = self.min\n else:\n self.command_sat = self.command\n\n # Apply rate limiter\n if self.command_sat > self.command_sat_prev + self.max_rate*self.T:\n self.command_sat = self.command_sat_prev + self.max_rate*self.T\n elif self.command_sat < self.command_sat_prev - self.max_rate*self.T:\n self.command_sat = self.command_sat_prev - self.max_rate*self.T\n\n # Store previous saturated command\n self.command_sat_prev = self.command_sat"
}
] | import numpy as np
import matplotlib.pyplot as plt
from lib import Car, PID | 1,348 |
def main():
# -------- Configuration --------
# Simulation parameters
time_step = 0.1
end_time = 25
length = round(end_time/time_step)
t = np.zeros(length)
stp = np.zeros(length)
v = np.zeros(length)
command = np.zeros(length)
# Car parameters
m = 2140
b = 0.33
F_max_0 = 22000
F_max_max = 1710
v_max = 72
# PID parameters
Kp = 800.0
Ki = 70.0
Kaw = 1.0
Kd = 20.0
T_C = 1.0
# Initialize PID controller
|
def main():
# -------- Configuration --------
# Simulation parameters
time_step = 0.1
end_time = 25
length = round(end_time/time_step)
t = np.zeros(length)
stp = np.zeros(length)
v = np.zeros(length)
command = np.zeros(length)
# Car parameters
m = 2140
b = 0.33
F_max_0 = 22000
F_max_max = 1710
v_max = 72
# PID parameters
Kp = 800.0
Ki = 70.0
Kaw = 1.0
Kd = 20.0
T_C = 1.0
# Initialize PID controller | pid = PID(Kp, Ki, Kd, Kaw, T_C, time_step, F_max_0, 0, 30000) | 1 | 2023-11-03 19:38:34+00:00 | 2k |
aws-samples/amazon-location-geospatial-agent | geospatial_agent/agent/geospatial/planner/planner.py | [
{
"identifier": "_graph_generation_instructions",
"path": "geospatial_agent/agent/geospatial/planner/prompts.py",
"snippet": ""
},
{
"identifier": "GIS_AGENT_ROLE_INTRO",
"path": "geospatial_agent/shared/prompts.py",
"snippet": "GIS_AGENT_ROLE_INTRO = r'You are a geospatial data scientist and an expert python developer.'"
},
{
"identifier": "HUMAN_STOP_SEQUENCE",
"path": "geospatial_agent/shared/prompts.py",
"snippet": "HUMAN_STOP_SEQUENCE = '\\n\\nHuman'"
},
{
"identifier": "extract_code",
"path": "geospatial_agent/shared/utils.py",
"snippet": "def extract_code(response):\n \"\"\"Extract python code from LLM response.\"\"\"\n\n python_code_match = re.search(r\"```(?:python)?(.*?)```\", response, re.DOTALL)\n if python_code_match:\n python_code = python_code_match.group(1).strip()\n return python_code\n else:\n raise ExtractionException(\"Failed to extract python code from response\")"
}
] | import time
from langchain import PromptTemplate, LLMChain
from langchain.llms.base import LLM
from geospatial_agent.agent.geospatial.planner.prompts import _graph_generation_instructions, \
_graph_reply_example, _task_name_generation_prompt, _graph_requirement_list, \
_planning_graph_task_prompt_template
from geospatial_agent.shared.prompts import GIS_AGENT_ROLE_INTRO, HUMAN_STOP_SEQUENCE
from geospatial_agent.shared.utils import extract_code | 730 |
class PlannerException(Exception):
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
def gen_task_name(llm: LLM, task: str) -> str:
"""Returns a task name for creating unix folders from task description using LLM"""
task_name_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(_task_name_generation_prompt)
task_name_gen_prompt = task_name_gen_prompt_template.format(human_role="Human",
assistant_role="Assistant",
task_definition=task)
task_name = llm.predict(text=task_name_gen_prompt, stop=[HUMAN_STOP_SEQUENCE]).strip()
task_name = f'{int(time.time())}_{task_name}'
return task_name
def gen_plan_graph(llm: LLM, task_definition: str, data_locations_instructions: str) -> str:
"""Returns a plan graph in the form of python code from a task definition."""
try:
graph_plan_code = _gen_plan_graph_code(llm, task_definition, data_locations_instructions)
return graph_plan_code
except Exception as e:
raise PlannerException(f"Failed to generate graph plan code for task") from e
def _gen_plan_graph_code(llm: LLM, task_definition: str, data_locations_instructions: str):
# Generating a graph plan python code using the LLM.
graph_requirements = _get_graph_requirements()
graph_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(_planning_graph_task_prompt_template)
chain = LLMChain(llm=llm, prompt=graph_gen_prompt_template)
graph_plan_response = chain.run(human_role="Human",
planner_role_intro=GIS_AGENT_ROLE_INTRO,
graph_generation_instructions=_graph_generation_instructions,
task_definition=task_definition.strip("\n").strip(),
graph_requirements=graph_requirements,
graph_reply_example=_graph_reply_example,
data_locations_instructions=data_locations_instructions,
assistant_role="Assistant",
stop=[HUMAN_STOP_SEQUENCE])
# Use the LLM to generate a plan graph code
|
class PlannerException(Exception):
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
def gen_task_name(llm: LLM, task: str) -> str:
"""Returns a task name for creating unix folders from task description using LLM"""
task_name_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(_task_name_generation_prompt)
task_name_gen_prompt = task_name_gen_prompt_template.format(human_role="Human",
assistant_role="Assistant",
task_definition=task)
task_name = llm.predict(text=task_name_gen_prompt, stop=[HUMAN_STOP_SEQUENCE]).strip()
task_name = f'{int(time.time())}_{task_name}'
return task_name
def gen_plan_graph(llm: LLM, task_definition: str, data_locations_instructions: str) -> str:
"""Returns a plan graph in the form of python code from a task definition."""
try:
graph_plan_code = _gen_plan_graph_code(llm, task_definition, data_locations_instructions)
return graph_plan_code
except Exception as e:
raise PlannerException(f"Failed to generate graph plan code for task") from e
def _gen_plan_graph_code(llm: LLM, task_definition: str, data_locations_instructions: str):
# Generating a graph plan python code using the LLM.
graph_requirements = _get_graph_requirements()
graph_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(_planning_graph_task_prompt_template)
chain = LLMChain(llm=llm, prompt=graph_gen_prompt_template)
graph_plan_response = chain.run(human_role="Human",
planner_role_intro=GIS_AGENT_ROLE_INTRO,
graph_generation_instructions=_graph_generation_instructions,
task_definition=task_definition.strip("\n").strip(),
graph_requirements=graph_requirements,
graph_reply_example=_graph_reply_example,
data_locations_instructions=data_locations_instructions,
assistant_role="Assistant",
stop=[HUMAN_STOP_SEQUENCE])
# Use the LLM to generate a plan graph code | graph_plan_code = extract_code(graph_plan_response) | 3 | 2023-11-09 18:29:25+00:00 | 2k |
Hojagulyyev/rp2 | apps/diaries/interactors.py | [
{
"identifier": "COMMIT_MIN_LENGTH",
"path": "rp2/business_logic.py",
"snippet": "COMMIT_MIN_LENGTH = 10"
},
{
"identifier": "Diary",
"path": "apps/diaries/models.py",
"snippet": "class Diary(models.Model):\n\n account = models.ForeignKey(Account, on_delete=models.CASCADE, related_name=\"diaries\")\n comments_last_read_by = models.ForeignKey(Account, on_delete=models.CASCADE, blank=True, null=True)\n created_date = models.DateField(default=timezone.now)\n\n class Meta:\n verbose_name_plural = \"Diaries\"\n\n def __str__(self):\n return f\"{self.account} - {self.created_date}\"\n\n def get_earned_xp(self):\n xp_amount_list = self.xps.values_list(\"amount\", flat=True)\n return sum(xp_amount_list)"
},
{
"identifier": "DiaryCommit",
"path": "apps/diaries/models.py",
"snippet": "class DiaryCommit(models.Model):\n\n diary = models.ForeignKey(Diary, on_delete=models.CASCADE, related_name=\"commits\")\n message = models.TextField()\n created_datetime = models.DateTimeField(default=timezone.now)\n\n class Meta:\n verbose_name_plural = \"Diary Commits\"\n\n def __str__(self):\n return self.message"
},
{
"identifier": "DiaryComment",
"path": "apps/diaries/models.py",
"snippet": "class DiaryComment(models.Model):\n\n diary = models.ForeignKey(Diary, on_delete=models.CASCADE, related_name=\"comments\")\n author = models.ForeignKey(Account, on_delete=models.CASCADE)\n body = models.TextField()\n created_datetime = models.DateTimeField(default=timezone.now)\n\n class Meta:\n verbose_name_plural = \"Diary Comments\"\n\n def __str__(self):\n return self.body"
},
{
"identifier": "diary_commit_created",
"path": "apps/diaries/signals.py",
"snippet": ""
}
] | import datetime
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.contrib import messages
from django.urls import reverse
from rp2.business_logic import COMMIT_MIN_LENGTH
from .models import Diary, DiaryCommit, DiaryComment
from .signals import diary_commit_created | 728 |
@login_required
def create_commit(request, diary_id: int):
# ===== DTO
message = request.POST.get("message", "").strip()
diary = Diary.objects.get(id=diary_id)
# ===== VALIDATION
if diary.account != request.user.account:
messages.error(request, f"others' diaries are readonly")
return redirect(
f"{reverse('diaries:detail_view', kwargs={'id': diary.id})}"
)
if len(message) < COMMIT_MIN_LENGTH:
messages.error(request, f"message length is less than {COMMIT_MIN_LENGTH} character")
return redirect(
f"{reverse('diaries:detail_view', kwargs={'id': diary.id})}"
f"?message={message}"
)
if (
DiaryCommit.objects
.filter(
diary=diary,
message=message,
created_datetime__date=datetime.date.today()
)
.exists()
):
messages.error(request, message="this message already exists for today")
return redirect(
f"{reverse(viewname='diaries:detail_view', kwargs={'id': diary.id})}"
f"?message={message}"
)
# ===== PROCESS
diary_commit = DiaryCommit()
diary_commit.diary = diary
diary_commit.message = message
diary_commit.save()
|
@login_required
def create_commit(request, diary_id: int):
# ===== DTO
message = request.POST.get("message", "").strip()
diary = Diary.objects.get(id=diary_id)
# ===== VALIDATION
if diary.account != request.user.account:
messages.error(request, f"others' diaries are readonly")
return redirect(
f"{reverse('diaries:detail_view', kwargs={'id': diary.id})}"
)
if len(message) < COMMIT_MIN_LENGTH:
messages.error(request, f"message length is less than {COMMIT_MIN_LENGTH} character")
return redirect(
f"{reverse('diaries:detail_view', kwargs={'id': diary.id})}"
f"?message={message}"
)
if (
DiaryCommit.objects
.filter(
diary=diary,
message=message,
created_datetime__date=datetime.date.today()
)
.exists()
):
messages.error(request, message="this message already exists for today")
return redirect(
f"{reverse(viewname='diaries:detail_view', kwargs={'id': diary.id})}"
f"?message={message}"
)
# ===== PROCESS
diary_commit = DiaryCommit()
diary_commit.diary = diary
diary_commit.message = message
diary_commit.save()
| diary_commit_created.send(sender=DiaryCommit, instance=diary_commit) | 4 | 2023-11-05 07:57:17+00:00 | 2k |
soobin419/DWT | basicsr/utils/logger.py | [
{
"identifier": "get_dist_info",
"path": "basicsr/utils/dist_util.py",
"snippet": "def get_dist_info():\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size"
},
{
"identifier": "master_only",
"path": "basicsr/utils/dist_util.py",
"snippet": "def master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper"
}
] | import datetime
import logging
import time
import wandb
import torch
import torchvision
from .dist_util import get_dist_info, master_only
from torch.utils.tensorboard import SummaryWriter
from version import __version__ | 1,528 | train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt['name']
self.interval = opt['logger']['print_freq']
self.start_iter = start_iter
self.max_iters = opt['train']['total_iter']
self.use_tb_logger = opt['logger']['use_tb_logger']
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def reset_start_time(self):
self.start_time = time.time()
@master_only
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop('epoch')
current_iter = log_vars.pop('iter')
lrs = log_vars.pop('lrs')
message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:(')
for v in lrs:
message += f'{v:.3e},'
message += ')] '
# time and estimated time
if 'time' in log_vars.keys():
iter_time = log_vars.pop('time')
data_time = log_vars.pop('data_time')
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f'[eta: {eta_str}, '
message += f'time (data): {iter_time:.3f} ({data_time:.3f})] '
# other items, especially losses
for k, v in log_vars.items():
message += f'{k}: {v:.4e} '
# tensorboard logger
if self.use_tb_logger and 'debug' not in self.exp_name:
if k.startswith('l_'):
self.tb_logger.add_scalar(f'losses/{k}', v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message)
@master_only
def init_tb_logger(log_dir):
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
@master_only
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
logger = get_root_logger()
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.')
def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
|
initialized_logger = {}
class AvgTimer():
def __init__(self, window=200):
self.window = window # average window
self.current_time = 0
self.total_time = 0
self.count = 0
self.avg_time = 0
self.start()
def start(self):
self.start_time = self.tic = time.time()
def record(self):
self.count += 1
self.toc = time.time()
self.current_time = self.toc - self.tic
self.total_time += self.current_time
# calculate average time
self.avg_time = self.total_time / self.count
# reset
if self.count > self.window:
self.count = 0
self.total_time = 0
self.tic = time.time()
def get_current_time(self):
return self.current_time
def get_avg_time(self):
return self.avg_time
class MessageLogger():
"""Message logger for printing.
Args:
opt (dict): Config. It contains the following keys:
name (str): Exp name.
logger (dict): Contains 'print_freq' (str) for logger interval.
train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt['name']
self.interval = opt['logger']['print_freq']
self.start_iter = start_iter
self.max_iters = opt['train']['total_iter']
self.use_tb_logger = opt['logger']['use_tb_logger']
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def reset_start_time(self):
self.start_time = time.time()
@master_only
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop('epoch')
current_iter = log_vars.pop('iter')
lrs = log_vars.pop('lrs')
message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:(')
for v in lrs:
message += f'{v:.3e},'
message += ')] '
# time and estimated time
if 'time' in log_vars.keys():
iter_time = log_vars.pop('time')
data_time = log_vars.pop('data_time')
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f'[eta: {eta_str}, '
message += f'time (data): {iter_time:.3f} ({data_time:.3f})] '
# other items, especially losses
for k, v in log_vars.items():
message += f'{k}: {v:.4e} '
# tensorboard logger
if self.use_tb_logger and 'debug' not in self.exp_name:
if k.startswith('l_'):
self.tb_logger.add_scalar(f'losses/{k}', v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message)
@master_only
def init_tb_logger(log_dir):
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
@master_only
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
logger = get_root_logger()
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.')
def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False | rank, _ = get_dist_info() | 0 | 2023-11-09 08:08:09+00:00 | 2k |
Rishit-dagli/Astroformer | pytorch-image-models/timm/layers/create_norm.py | [
{
"identifier": "GroupNorm",
"path": "pytorch-image-models/timm/layers/norm.py",
"snippet": "class GroupNorm(nn.GroupNorm):\n def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True):\n # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN\n super().__init__(num_groups, num_channels, eps=eps, affine=affine)\n self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x):\n if self.fast_norm:\n return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps)\n else:\n return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)"
},
{
"identifier": "GroupNorm1",
"path": "pytorch-image-models/timm/layers/norm.py",
"snippet": "class GroupNorm1(nn.GroupNorm):\n \"\"\" Group Normalization with 1 group.\n Input: tensor in shape [B, C, *]\n \"\"\"\n\n def __init__(self, num_channels, **kwargs):\n super().__init__(1, num_channels, **kwargs)\n self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.fast_norm:\n return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps)\n else:\n return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)"
},
{
"identifier": "LayerNorm",
"path": "pytorch-image-models/timm/layers/norm.py",
"snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\" LayerNorm w/ fast norm option\n \"\"\"\n def __init__(self, num_channels, eps=1e-6, affine=True):\n super().__init__(num_channels, eps=eps, elementwise_affine=affine)\n self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self._fast_norm:\n x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n else:\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x"
},
{
"identifier": "LayerNorm2d",
"path": "pytorch-image-models/timm/layers/norm.py",
"snippet": "class LayerNorm2d(nn.LayerNorm):\n \"\"\" LayerNorm for channels of '2D' spatial NCHW tensors \"\"\"\n def __init__(self, num_channels, eps=1e-6, affine=True):\n super().__init__(num_channels, eps=eps, elementwise_affine=affine)\n self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = x.permute(0, 2, 3, 1)\n if self._fast_norm:\n x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n else:\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n x = x.permute(0, 3, 1, 2)\n return x"
},
{
"identifier": "RmsNorm",
"path": "pytorch-image-models/timm/layers/norm.py",
"snippet": "class RmsNorm(nn.Module):\n \"\"\" RmsNorm w/ fast (apex) norm if available\n \"\"\"\n __constants__ = ['normalized_shape', 'eps', 'elementwise_affine']\n normalized_shape: Tuple[int, ...]\n eps: float\n elementwise_affine: bool\n\n def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__()\n normalized_shape = channels\n if isinstance(normalized_shape, numbers.Integral):\n # mypy error: incompatible types in assignment\n normalized_shape = (normalized_shape,) # type: ignore[assignment]\n self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]\n self.eps = eps\n self.elementwise_affine = affine\n if self.elementwise_affine:\n self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))\n else:\n self.register_parameter('weight', None)\n\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n if self.elementwise_affine:\n nn.init.ones_(self.weight)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # NOTE fast norm fallback needs our rms norm impl, so both paths through here.\n # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed.\n x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps)\n return x"
}
] | import functools
import types
import torch.nn as nn
from typing import Type
from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm
from torchvision.ops.misc import FrozenBatchNorm2d | 1,331 | """ Norm Layer Factory
Create norm modules by string (to mirror create_act and creat_norm-act fns)
Copyright 2022 Ross Wightman
"""
_NORM_MAP = dict(
batchnorm=nn.BatchNorm2d,
batchnorm2d=nn.BatchNorm2d,
batchnorm1d=nn.BatchNorm1d,
groupnorm=GroupNorm,
groupnorm1=GroupNorm1,
layernorm=LayerNorm,
layernorm2d=LayerNorm2d,
| """ Norm Layer Factory
Create norm modules by string (to mirror create_act and creat_norm-act fns)
Copyright 2022 Ross Wightman
"""
_NORM_MAP = dict(
batchnorm=nn.BatchNorm2d,
batchnorm2d=nn.BatchNorm2d,
batchnorm1d=nn.BatchNorm1d,
groupnorm=GroupNorm,
groupnorm1=GroupNorm1,
layernorm=LayerNorm,
layernorm2d=LayerNorm2d, | rmsnorm=RmsNorm, | 4 | 2023-11-05 01:25:14+00:00 | 2k |
dewgenenny/rtl_433_discoverandsubmit | rtl_433_discoverandsubmit/modules/mqtt_client.py | [
{
"identifier": "config",
"path": "rtl_433_discoverandsubmit/config.py",
"snippet": ""
},
{
"identifier": "save_devices_to_file",
"path": "rtl_433_discoverandsubmit/modules/device_manager.py",
"snippet": "def save_devices_to_file(devices):\n \"\"\"Save the list of devices to a JSON file.\"\"\"\n device_file = initialize_device_storage()\n logging.debug(\"device file = \" + str(device_file))\n with open(device_file, 'w') as file:\n json.dump(devices, file)"
}
] | import paho.mqtt.client as mqtt
import json
import logging
from datetime import datetime
from rtl_433_discoverandsubmit import config
from rtl_433_discoverandsubmit.modules.device_manager import save_devices_to_file | 704 | log_level = getattr(logging, config.configuration['log_level'])
logging.basicConfig(filename=config.configuration['log_filename'], level=log_level)
# List to store detected devices
detected_devices = []
def reset_message_counters():
global detected_devices
for device in detected_devices:
if 'message_count' in device:
device['message_count'] = 0
def sort_detected_devices():
global detected_devices
criteria = config.configuration['current_sort_criteria']
reverse = True if criteria in ["last_detected_time", "message_count"] else False
if criteria in ["last_detected_time", "message_count"]:
detected_devices.sort(key=lambda x: x[criteria], reverse=reverse)
else:
detected_devices.sort(key=lambda x: x.get('model', '').lower())
return detected_devices
def on_connect(client, userdata, flags, rc):
"""Callback for when the client receives a CONNACK response from the server."""
client.subscribe(config.configuration['topic'])
def on_message(client, userdata, msg):
global detected_devices
payload = json.loads(msg.payload.decode())
# Safely get 'id' from payload or default to 'unknown' (fixes https://github.com/dewgenenny/rtl_433_discoverandsubmit/issues/1)
device_id_value = payload.get('id', 'unknown')
topicprefix = "/".join(msg.topic.split("/", 2)[:2])
# Construct a unique device identifier from model and id
device_id = f"{payload['model']}_{device_id_value}"
# Check if the device is already in the detected devices list
existing_device = next((device for device in detected_devices if device['id'] == device_id), None)
# If the device is new, add it to the list
if not existing_device:
device_data = {
'first_detected_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'last_detected_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'message_count': 1 # Initialize message count
}
device_data.update(payload) # Add all the payload attributes to the dictionary
device_data['topicprefix']= topicprefix
old_id = payload.get('id')
logging.debug("Old ID = "+ str(old_id))
device_data['original_id'] = old_id
device_data['id'] = device_id # Set the id field after updating from payload
detected_devices.append(device_data)
#save new device to file, so that it is remembered on startup
| log_level = getattr(logging, config.configuration['log_level'])
logging.basicConfig(filename=config.configuration['log_filename'], level=log_level)
# List to store detected devices
detected_devices = []
def reset_message_counters():
global detected_devices
for device in detected_devices:
if 'message_count' in device:
device['message_count'] = 0
def sort_detected_devices():
global detected_devices
criteria = config.configuration['current_sort_criteria']
reverse = True if criteria in ["last_detected_time", "message_count"] else False
if criteria in ["last_detected_time", "message_count"]:
detected_devices.sort(key=lambda x: x[criteria], reverse=reverse)
else:
detected_devices.sort(key=lambda x: x.get('model', '').lower())
return detected_devices
def on_connect(client, userdata, flags, rc):
"""Callback for when the client receives a CONNACK response from the server."""
client.subscribe(config.configuration['topic'])
def on_message(client, userdata, msg):
global detected_devices
payload = json.loads(msg.payload.decode())
# Safely get 'id' from payload or default to 'unknown' (fixes https://github.com/dewgenenny/rtl_433_discoverandsubmit/issues/1)
device_id_value = payload.get('id', 'unknown')
topicprefix = "/".join(msg.topic.split("/", 2)[:2])
# Construct a unique device identifier from model and id
device_id = f"{payload['model']}_{device_id_value}"
# Check if the device is already in the detected devices list
existing_device = next((device for device in detected_devices if device['id'] == device_id), None)
# If the device is new, add it to the list
if not existing_device:
device_data = {
'first_detected_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'last_detected_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'message_count': 1 # Initialize message count
}
device_data.update(payload) # Add all the payload attributes to the dictionary
device_data['topicprefix']= topicprefix
old_id = payload.get('id')
logging.debug("Old ID = "+ str(old_id))
device_data['original_id'] = old_id
device_data['id'] = device_id # Set the id field after updating from payload
detected_devices.append(device_data)
#save new device to file, so that it is remembered on startup | save_devices_to_file(detected_devices) | 1 | 2023-11-03 19:34:56+00:00 | 2k |
dvruette/pygba | src/pygba/gym_env.py | [
{
"identifier": "KEY_MAP",
"path": "src/pygba/utils.py",
"snippet": "KEY_MAP = {\n \"up\": GBA.KEY_UP,\n \"down\": GBA.KEY_DOWN,\n \"left\": GBA.KEY_LEFT,\n \"right\": GBA.KEY_RIGHT,\n \"A\": GBA.KEY_A,\n \"B\": GBA.KEY_B,\n \"L\": GBA.KEY_L,\n \"R\": GBA.KEY_R,\n \"start\": GBA.KEY_START,\n \"select\": GBA.KEY_SELECT,\n}"
},
{
"identifier": "PyGBA",
"path": "src/pygba/pygba.py",
"snippet": "class PyGBA:\n @staticmethod\n def load(gba_file: str, save_file: str | None = None) -> \"PyGBA\":\n # create a temporary directory and copy the gba file into it\n # this is necessary to prevent mgba from overwriting the save file (and to prevent crashes)\n tmp_dir = Path(tempfile.mkdtemp())\n tmp_gba = tmp_dir / \"rom.gba\"\n tmp_gba.write_bytes(Path(gba_file).read_bytes())\n gba_file = str(tmp_gba)\n if save_file is not None:\n tmp_save = tmp_dir / \"rom.sav\"\n tmp_save.write_bytes(Path(save_file).read_bytes())\n save_file = str(tmp_save)\n\n core = mgba.core.load_path(gba_file)\n if core is None:\n raise ValueError(f\"Failed to load GBA file: {gba_file}\")\n if save_file is not None:\n core.autoload_save()\n core.reset()\n return PyGBA(core)\n \n def __init__(self, core: mgba.core.Core):\n self.core = core\n\n self.core.add_frame_callback(self._invalidate_mem_cache)\n self._mem_cache = {}\n\n def wait(self, frames: int):\n for _ in range(frames):\n self.core.run_frame()\n\n def press_key(self, key: str, frames: int = 2):\n if key not in KEY_MAP:\n raise ValueError(f\"Invalid key: {key}\")\n if frames < 2:\n raise ValueError(\"Cannot press a key for less than 2 frames.\")\n \n key = KEY_MAP[key]\n self.core.add_keys(key)\n self.wait(frames - 1)\n self.core.clear_keys(key)\n self.wait(1)\n\n def press_up(self, frames: int = 2):\n self.press_key(\"up\", frames)\n\n def press_down(self, frames: int = 2):\n self.press_key(\"down\", frames)\n\n def press_left(self, frames: int = 2):\n self.press_key(\"left\", frames)\n\n def press_right(self, frames: int = 2):\n self.press_key(\"right\", frames)\n\n def press_a(self, frames: int = 2):\n self.press_key(\"A\", frames)\n\n def press_b(self, frames: int = 2):\n self.press_key(\"B\", frames)\n\n def press_l(self, frames: int = 2):\n self.press_key(\"L\", frames)\n\n def press_r(self, frames: int = 2):\n self.press_key(\"R\", frames)\n\n def press_start(self, frames: int = 2):\n self.press_key(\"start\", frames)\n\n def press_select(self, frames: int = 2):\n self.press_key(\"select\", frames)\n\n def _invalidate_mem_cache(self):\n self._mem_cache = {}\n \n def _get_memory_region(self, region_id: int):\n if region_id not in self._mem_cache:\n mem_core = self.core.memory.u8._core\n size = ffi.new(\"size_t *\")\n ptr = ffi.cast(\"uint8_t *\", mem_core.getMemoryBlock(mem_core, region_id, size))\n self._mem_cache[region_id] = ffi.buffer(ptr, size[0])[:]\n return self._mem_cache[region_id]\n\n def read_memory(self, address: int, size: int = 1):\n region_id = address >> lib.BASE_OFFSET\n mem_region = self._get_memory_region(region_id)\n mask = len(mem_region) - 1\n address &= mask\n return mem_region[address:address + size]\n\n def read_u8(self, address: int):\n return int.from_bytes(self.read_memory(address, 1), byteorder='little', signed=False)\n\n def read_u16(self, address: int):\n return int.from_bytes(self.read_memory(address, 2), byteorder='little', signed=False)\n\n def read_u32(self, address: int):\n return int.from_bytes(self.read_memory(address, 4), byteorder='little', signed=False)"
},
{
"identifier": "GameWrapper",
"path": "src/pygba/game_wrappers/base.py",
"snippet": "class GameWrapper(ABC):\n @abstractmethod\n def reward(self, gba: PyGBA, observation: np.ndarray) -> float:\n raise NotImplementedError\n\n def game_over(self, gba: PyGBA, observation: np.ndarray) -> bool:\n return False\n \n def reset(self, gba: PyGBA) -> None:\n pass\n \n def info(self, gba: PyGBA, observation: np.ndarray) -> dict[str, Any]:\n return {}"
}
] | import sys
import gymnasium as gym
import mgba.core
import mgba.image
import numpy as np
import pygame
from typing import Any, Literal
from .utils import KEY_MAP
from .pygba import PyGBA
from .game_wrappers.base import GameWrapper
from pygame import gfxdraw | 1,415 |
try:
except ImportError as e:
pass
def _pil_image_to_pygame(img):
return pygame.image.fromstring(img.tobytes(), img.size, img.mode).convert()
class PyGBAEnv(gym.Env):
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 60,
}
def __init__(
self,
gba: PyGBA,
|
try:
except ImportError as e:
pass
def _pil_image_to_pygame(img):
return pygame.image.fromstring(img.tobytes(), img.size, img.mode).convert()
class PyGBAEnv(gym.Env):
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 60,
}
def __init__(
self,
gba: PyGBA, | game_wrapper: GameWrapper | None = None, | 2 | 2023-11-08 20:51:13+00:00 | 2k |
BouncyKoishi/ChuCaoQi-Bot | dbConnection/draw_item.py | [
{
"identifier": "DrawItemList",
"path": "dbConnection/models.py",
"snippet": "class DrawItemList(Model):\n id = IntField(pk=True)\n name = CharField(max_length=64)\n pool = CharField(max_length=32)\n rareRank = IntField()\n detail = CharField(max_length=1024)\n author = CharField(max_length=12)"
},
{
"identifier": "DrawItemStorage",
"path": "dbConnection/models.py",
"snippet": "class DrawItemStorage(Model):\n qq = CharField(max_length=12)\n item = ForeignKeyField(\"models.DrawItemList\", on_delete=CASCADE, related_name=\"draw_item_storage\")\n amount = IntField()"
}
] | from random import randint
from .models import DrawItemList, DrawItemStorage
from tortoise import Tortoise
from tortoise.query_utils import Prefetch
from tortoise.functions import Sum | 646 |
async def getItem(itemId):
return await DrawItemList.filter(id=itemId).first()
async def getItemByName(itemName):
return await DrawItemList.filter(name=itemName).first()
async def getItemListByAuthor(qqNum, rareRank=None, poolName=None):
filterQuery = getRareRankAndPoolFilter(rareRank, poolName)
return await filterQuery.filter(author=qqNum).order_by("-rareRank")
async def getRandomItem(rareRank, poolName=None):
if poolName:
rareItemList = await DrawItemList.filter(rareRank=rareRank, pool=poolName)
else:
rareItemList = await DrawItemList.filter(rareRank=rareRank)
if not rareItemList:
raise Exception(f"DrawItem Error: 抽奖物品列表为空!")
return rareItemList[randint(0, len(rareItemList) - 1)]
async def searchItem(keyword, limit, offset=0):
conn = Tortoise.get_connection('default')
count = (await conn.execute_query_dict('''
SELECT
count(*) AS count
FROM
DrawItemList
WHERE
name LIKE ('%' || ? || '%')
''', [keyword,]))[0]['count']
rows = await conn.execute_query_dict('''
SELECT
name,
rareRank
FROM
DrawItemList
WHERE
name LIKE ('%' || ? || '%')
ORDER BY
rareRank DESC
LIMIT ? OFFSET ?
''', [keyword, limit, offset])
return count, rows
async def addItem(itemName, itemRare, poolName, itemDetail, author):
await DrawItemList.create(name=itemName, rareRank=itemRare, pool=poolName, detail=itemDetail, author=author)
async def deleteItem(item: DrawItemList):
await item.delete()
async def setItemDetail(item: DrawItemList, newItemDetail):
item.detail = newItemDetail
await item.save()
async def getItemsWithStorage(qqNum, rareRank=None, poolName=None):
filterQuery = getRareRankAndPoolFilter(rareRank, poolName)
return await filterQuery.order_by("-rareRank").prefetch_related(
|
async def getItem(itemId):
return await DrawItemList.filter(id=itemId).first()
async def getItemByName(itemName):
return await DrawItemList.filter(name=itemName).first()
async def getItemListByAuthor(qqNum, rareRank=None, poolName=None):
filterQuery = getRareRankAndPoolFilter(rareRank, poolName)
return await filterQuery.filter(author=qqNum).order_by("-rareRank")
async def getRandomItem(rareRank, poolName=None):
if poolName:
rareItemList = await DrawItemList.filter(rareRank=rareRank, pool=poolName)
else:
rareItemList = await DrawItemList.filter(rareRank=rareRank)
if not rareItemList:
raise Exception(f"DrawItem Error: 抽奖物品列表为空!")
return rareItemList[randint(0, len(rareItemList) - 1)]
async def searchItem(keyword, limit, offset=0):
conn = Tortoise.get_connection('default')
count = (await conn.execute_query_dict('''
SELECT
count(*) AS count
FROM
DrawItemList
WHERE
name LIKE ('%' || ? || '%')
''', [keyword,]))[0]['count']
rows = await conn.execute_query_dict('''
SELECT
name,
rareRank
FROM
DrawItemList
WHERE
name LIKE ('%' || ? || '%')
ORDER BY
rareRank DESC
LIMIT ? OFFSET ?
''', [keyword, limit, offset])
return count, rows
async def addItem(itemName, itemRare, poolName, itemDetail, author):
await DrawItemList.create(name=itemName, rareRank=itemRare, pool=poolName, detail=itemDetail, author=author)
async def deleteItem(item: DrawItemList):
await item.delete()
async def setItemDetail(item: DrawItemList, newItemDetail):
item.detail = newItemDetail
await item.save()
async def getItemsWithStorage(qqNum, rareRank=None, poolName=None):
filterQuery = getRareRankAndPoolFilter(rareRank, poolName)
return await filterQuery.order_by("-rareRank").prefetch_related( | Prefetch("draw_item_storage", queryset=DrawItemStorage.filter(qq=qqNum), to_attr="storage") | 1 | 2023-11-02 04:06:31+00:00 | 2k |
ilur98/DGQ | dgq/utils/evalutils.py | [
{
"identifier": "get_blocks",
"path": "dgq/utils/modelutils.py",
"snippet": "def get_blocks(model):\n if isinstance(model, LlamaForCausalLM):\n layers = model.model.layers\n elif isinstance(model, OPTForCausalLM):\n layers = model.model.decoder.layers\n elif isinstance(model, BloomForCausalLM):\n layers = model.transformer.h\n elif \"mpt\" in str(model.__class__).lower():\n layers = model.transformer.blocks\n elif \"falcon\" in str(model.__class__).lower():\n layers = model.transformer.h\n else:\n raise NotImplementedError(type(model))\n return layers"
},
{
"identifier": "move_embed",
"path": "dgq/utils/modelutils.py",
"snippet": "def move_embed(model, device):\n if isinstance(model, LlamaForCausalLM):\n model.model.embed_tokens = model.model.embed_tokens.to(device)\n elif isinstance(model, OPTForCausalLM):\n model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(device)\n model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(device)\n elif isinstance(model, BloomForCausalLM):\n model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(device)\n elif \"mpt\" in str(model.__class__).lower():\n model.transformer.wte = model.transformer.wte.to(device)\n model.transformer.emb_drop = model.transformer.emb_drop.to(device)\n elif \"falcon\" in str(model.__class__).lower():\n model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n else:\n raise NotImplementedError(type(model))"
},
{
"identifier": "move_norm_head",
"path": "dgq/utils/modelutils.py",
"snippet": "def move_norm_head(model, device):\n mod_list = torch.nn.ModuleList()\n mod_list.append(nn.Identity().to(device))\n if isinstance(model, LlamaForCausalLM):\n model.lm_head = model.lm_head.to(device)\n if model.model.norm is not None:\n mod_list.append(model.model.norm.to(device))\n elif isinstance(model, OPTForCausalLM):\n model.lm_head = model.lm_head.to(device)\n if model.model.decoder.final_layer_norm is not None:\n mod_list.append(model.model.decoder.final_layer_norm.to(device))\n if model.model.decoder.project_out is not None:\n mod_list.append(model.model.decoder.project_out.to(device))\n elif isinstance(model, BloomForCausalLM):\n model.lm_head = model.lm_head.to(device)\n mod_list.append(model.transformer.ln_f.to(device))\n # elif \"mpt\" in str(model.__class__).lower():\n # model.transformer.wte = model.transformer.wte.to(device)\n # model.transformer.emb_drop = model.transformer.emb_drop.to(device)\n # elif \"falcon\" in str(model.__class__).lower():\n # model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n else:\n raise NotImplementedError(type(model))\n return mod_list"
},
{
"identifier": "IGNORE_INDEX",
"path": "dgq/utils/datautils.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "DEFAULT_PAD_TOKEN",
"path": "dgq/utils/datautils.py",
"snippet": "DEFAULT_PAD_TOKEN = \"[PAD]\""
}
] | import torch
import torch.nn as nn
import numpy as np
from dgq.utils.modelutils import get_blocks, move_embed, move_norm_head
from datasets import load_dataset, load_from_disk
from tqdm import tqdm
from dgq.utils.datautils import IGNORE_INDEX, DEFAULT_PAD_TOKEN | 866 |
@torch.no_grad()
def model_eval(model, testenc, dev, local_args=None):
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
# model = model.to(dev)
model.eval()
model.config.use_cache = False
# testenc = testenc.to(dev)
|
@torch.no_grad()
def model_eval(model, testenc, dev, local_args=None):
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
# model = model.to(dev)
model.eval()
model.config.use_cache = False
# testenc = testenc.to(dev) | layers = get_blocks(model) | 0 | 2023-11-01 13:45:16+00:00 | 2k |
JeasunLok/ResNet-pytorch | test.py | [
{
"identifier": "AverageMeter",
"path": "utils/utils.py",
"snippet": "class AverageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.average = 0 \n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.count += n\n self.average = self.sum / self.count"
},
{
"identifier": "accuracy",
"path": "utils/accuracy.py",
"snippet": "def accuracy(output, label, topk=(1,)):\n maxk = max(topk)\n batch_size = output.size(0)\n\t\n\t# 在输出结果中取前maxk个最大概率作为预测结果,并获取其下标,当topk=(1, 5)时取5就可以了。\n _, pred = torch.topk(output, k=maxk, dim=1, largest=True, sorted=True)\n \n # 将得到的k个预测结果的矩阵进行转置,方便后续和label作比较\n pred = pred.T\n # 将label先拓展成为和pred相同的形状,和pred进行对比,输出结果\n correct = torch.eq(pred, label.contiguous().view(1,-1).expand_as(pred))\n\t# 例:\n\t# 若label为:[1,2,3,4], topk = (1, 5)时\n\t# 则label.contiguous().view(1,-1).expand_as(pred)为:\n\t# [[1, 2, 3, 4],\n\t# [1, 2, 3, 4],\n\t# [1, 2, 3, 4],\n\t# [1, 2, 3, 4],\n\t# [1, 2, 3, 4]]\n\t\n res = []\n\n for k in topk:\n \t# 取前k个预测正确的结果进行求和\n correct_k = correct[:k].contiguous().view(-1).float().sum(dim=0, keepdim=True)\n # 计算平均精度, 将结果加入res中\n res.append(correct_k*100/batch_size)\n return res"
}
] | import numpy as np
import torch
from tqdm import tqdm
from utils.utils import AverageMeter
from utils.accuracy import accuracy | 695 |
# test model
def test_epoch(model, test_loader, device):
acc1 = AverageMeter()
acc3 = AverageMeter()
prediction = np.array([])
label = np.array([])
loop = tqdm(enumerate(test_loader), total = len(test_loader))
with torch.no_grad():
for batch_idx, (batch_data, batch_label) in loop:
batch_data = batch_data.to(device).float()
batch_label = batch_label.to(device)
batch_prediction = model(batch_data)
# calculate the accuracy
|
# test model
def test_epoch(model, test_loader, device):
acc1 = AverageMeter()
acc3 = AverageMeter()
prediction = np.array([])
label = np.array([])
loop = tqdm(enumerate(test_loader), total = len(test_loader))
with torch.no_grad():
for batch_idx, (batch_data, batch_label) in loop:
batch_data = batch_data.to(device).float()
batch_label = batch_label.to(device)
batch_prediction = model(batch_data)
# calculate the accuracy | acc_batch = accuracy(batch_prediction, batch_label, topk=(1,3)) | 1 | 2023-11-02 06:26:47+00:00 | 2k |
soobin419/EDAT | basicsr/utils/logger.py | [
{
"identifier": "get_dist_info",
"path": "basicsr/utils/dist_util.py",
"snippet": "def get_dist_info():\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size"
},
{
"identifier": "master_only",
"path": "basicsr/utils/dist_util.py",
"snippet": "def master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper"
}
] | import datetime
import logging
import time
import wandb
import torch
import torchvision
from .dist_util import get_dist_info, master_only
from torch.utils.tensorboard import SummaryWriter
from version import __version__ | 1,528 | train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt['name']
self.interval = opt['logger']['print_freq']
self.start_iter = start_iter
self.max_iters = opt['train']['total_iter']
self.use_tb_logger = opt['logger']['use_tb_logger']
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def reset_start_time(self):
self.start_time = time.time()
@master_only
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop('epoch')
current_iter = log_vars.pop('iter')
lrs = log_vars.pop('lrs')
message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:(')
for v in lrs:
message += f'{v:.3e},'
message += ')] '
# time and estimated time
if 'time' in log_vars.keys():
iter_time = log_vars.pop('time')
data_time = log_vars.pop('data_time')
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f'[eta: {eta_str}, '
message += f'time (data): {iter_time:.3f} ({data_time:.3f})] '
# other items, especially losses
for k, v in log_vars.items():
message += f'{k}: {v:.4e} '
# tensorboard logger
if self.use_tb_logger and 'debug' not in self.exp_name:
if k.startswith('l_'):
self.tb_logger.add_scalar(f'losses/{k}', v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message)
@master_only
def init_tb_logger(log_dir):
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
@master_only
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
logger = get_root_logger()
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.')
def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
|
initialized_logger = {}
class AvgTimer():
def __init__(self, window=200):
self.window = window # average window
self.current_time = 0
self.total_time = 0
self.count = 0
self.avg_time = 0
self.start()
def start(self):
self.start_time = self.tic = time.time()
def record(self):
self.count += 1
self.toc = time.time()
self.current_time = self.toc - self.tic
self.total_time += self.current_time
# calculate average time
self.avg_time = self.total_time / self.count
# reset
if self.count > self.window:
self.count = 0
self.total_time = 0
self.tic = time.time()
def get_current_time(self):
return self.current_time
def get_avg_time(self):
return self.avg_time
class MessageLogger():
"""Message logger for printing.
Args:
opt (dict): Config. It contains the following keys:
name (str): Exp name.
logger (dict): Contains 'print_freq' (str) for logger interval.
train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt['name']
self.interval = opt['logger']['print_freq']
self.start_iter = start_iter
self.max_iters = opt['train']['total_iter']
self.use_tb_logger = opt['logger']['use_tb_logger']
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def reset_start_time(self):
self.start_time = time.time()
@master_only
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop('epoch')
current_iter = log_vars.pop('iter')
lrs = log_vars.pop('lrs')
message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:(')
for v in lrs:
message += f'{v:.3e},'
message += ')] '
# time and estimated time
if 'time' in log_vars.keys():
iter_time = log_vars.pop('time')
data_time = log_vars.pop('data_time')
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f'[eta: {eta_str}, '
message += f'time (data): {iter_time:.3f} ({data_time:.3f})] '
# other items, especially losses
for k, v in log_vars.items():
message += f'{k}: {v:.4e} '
# tensorboard logger
if self.use_tb_logger and 'debug' not in self.exp_name:
if k.startswith('l_'):
self.tb_logger.add_scalar(f'losses/{k}', v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message)
@master_only
def init_tb_logger(log_dir):
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
@master_only
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
logger = get_root_logger()
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.')
def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False | rank, _ = get_dist_info() | 0 | 2023-11-09 08:53:40+00:00 | 2k |
noco-ai/elemental-golem | modules/noco-ai/bark-tts/handler.py | [
{
"identifier": "BaseHandler",
"path": "application/base_handler.py",
"snippet": "class BaseHandler:\n\n def __init__(self):\n self.cached_schemas = {}\n\n def execute(self, model, request) -> dict:\n raise NotImplementedError(\"The `execute` method should be implemented in the derived class.\")\n \n def validate(self, request) -> dict:\n raise NotImplementedError(\"The `validate` method should be implemented in the derived class.\")\n\n def load(self, model, model_options) -> dict:\n return {}\n \n def copy_queue_headers(self, incoming_headers, override_command = None):\n # copy amqp headers\n outgoing_headers = {}\n stream_override = None\n for incoming_header in incoming_headers:\n if incoming_header in [\"x-delay\", \"return_exchange\", \"return_routing_key\"]:\n continue\n if incoming_header == \"stream_to_override\":\n stream_override = incoming_headers[incoming_header]\n\n outgoing_headers[incoming_header] = incoming_headers[incoming_header]\n\n stream_to = \"prompt_fragment\" if stream_override == None else stream_override\n outgoing_headers[\"command\"] = override_command if override_command is not None else stream_to\n return BasicProperties(headers=outgoing_headers)\n\n def load_schema_file(self, schema_file):\n # Check if schema is in cache\n if schema_file in self.cached_schemas:\n schema = self.cached_schemas[schema_file]\n else:\n # Load the schema at the path\n try: \n with open(f\"schema/{schema_file}.jsonschema\", 'r') as file:\n schema = json.load(file)\n except Exception as e:\n logger.error(e)\n return None\n # Cache the schema\n self.cached_schemas[schema_file] = schema\n\n return schema\n \n # A dictionary to hold the cached schemas \n def validate_request(self, json_data: dict, schema_file: str) -> Union[bool, List[str]]:\n \n schema = self.load_schema_file(schema_file)\n if schema is None:\n return False, [\"Invalid schema file for handler\"]\n \n json_data = self.apply_schema_defaults(json_data, schema_file) \n try:\n validate(instance=json_data, schema=schema)\n except jsonschema.exceptions.ValidationError as err:\n # If there is a validation error, return a list containing the error message\n logger.warn(\"validation failed for incoming request\")\n return False, [str(err)]\n else:\n # If the data is valid, return True\n return True, []\n\n def apply_schema_defaults(self, raw_data: dict, schema_file: str) -> dict:\n \n schema = self.load_schema_file(schema_file)\n if schema is None:\n logger.error(\"could not load schema file\")\n return raw_data\n \n # Fill default values\n for property, attributes in schema['properties'].items():\n if \"default\" in attributes and property not in raw_data:\n raw_data[property] = attributes[\"default\"]\n\n return raw_data\n\n \n def check_stop_generation(self, counter, stop_generation_event, stop_generation_filter, socket_id): \n counter += 1\n if counter >= 5:\n counter = 0\n if stop_generation_event.is_set():\n stop_generation_event.clear()\n if socket_id == None:\n return False, counter\n \n stop_socket = bytes(stop_generation_filter.raw).rstrip(b'\\x00').decode(\"utf-8\")\n if stop_socket == socket_id:\n return True, counter\n\n return False, counter"
},
{
"identifier": "ProgressStreamer",
"path": "application/progress_streamer.py",
"snippet": "class ProgressStreamer(BaseStreamer):\n def __init__(self):\n self.token_count = 0\n self.max_new_tokens = 0\n self.show_bar = False\n self.amqp_config = None \n self.label = \"\"\n\n def put(self, value):\n self.token_count += 1\n if self.show_bar:\n self.progress_bar.update(1) \n\n if self.amqp_config: \n send_body = {\n \"total\": self.max_new_tokens,\n \"current\": self.token_count,\n \"label\": self.label\n }\n \n self.amqp_config[\"channel\"].basic_publish(\n exchange=self.amqp_config[\"headers\"]['return_exchange'], \n routing_key=self.amqp_config[\"headers\"]['return_routing_key'], \n body=json.dumps(send_body), properties=self.amqp_config[\"outgoing_properties\"]) \n\n def end(self):\n if self.show_bar:\n self.progress_bar.close() \n\n def configure(self, max_new_tokens, label, amqp_config = None, show_bar = True):\n self.max_new_tokens = max_new_tokens\n self.show_bar = show_bar\n self.amqp_config = amqp_config \n self.token_count = 0\n self.label = label\n if show_bar:\n self.progress_bar = tqdm(total=max_new_tokens)"
}
] | from application.base_handler import BaseHandler
from transformers import AutoProcessor, AutoModel
from io import BytesIO
from application.progress_streamer import ProgressStreamer
import torch
import base64
import scipy
import copy
import logging | 1,247 |
logger = logging.getLogger(__name__)
class BarkHandler(BaseHandler):
def __init__(self):
|
logger = logging.getLogger(__name__)
class BarkHandler(BaseHandler):
def __init__(self): | self.progress_streamer = ProgressStreamer() | 1 | 2023-11-06 19:03:07+00:00 | 2k |
anilaltuner/personalized-news-agent | pages/chatbot.py | [
{
"identifier": "CUSTOM_ALGO_ID",
"path": "news.py",
"snippet": "CUSTOM_ALGO_ID = st.secrets[\"custom_algo_id\"]"
},
{
"identifier": "initialize_session",
"path": "news.py",
"snippet": "def initialize_session(user_input=\"\"):\n \"\"\"Initialize or restart the session.\"\"\"\n if user_input:\n st.session_state.username = user_input\n username_suffix = st.session_state.username if \"username\" in st.session_state else None\n if username_suffix:\n st.session_state.session = st.session_state.personalized.session(\n AlgorithmLabel.CUSTOM,\n custom_id=CUSTOM_ALGO_ID,\n vdbid=FIRSTBATCH_DB_NAME,\n session_id=\"rss_feed\" + username_suffix\n )\n else:\n st.session_state.session = st.session_state.personalized.session(\n AlgorithmLabel.CUSTOM,\n custom_id=CUSTOM_ALGO_ID,\n vdbid=FIRSTBATCH_DB_NAME\n )\n st.session_state.batches = []\n st.session_state.ids = []\n st.session_state.likes = []\n st.session_state.html_content = \"\"\"\n <div class=\"chat-container\">\n <div class=\"chat-box\">\n <div class=\"chat-output\" id=\"chat-output\"></div>\n </div>\n </div>\n \"\"\"\n st.session_state.chat_placeholder = st.empty()\n st.session_state.chat_history = \"\"\n st.session_state.chat_loader = 0"
},
{
"identifier": "fetch_content",
"path": "news.py",
"snippet": "def fetch_content():\n \"\"\"Fetch content for the current session.\"\"\"\n ids, batch = st.session_state.personalized.batch(st.session_state.session)\n st.session_state.batches += batch\n st.session_state.ids += ids"
},
{
"identifier": "chat",
"path": "chat_tools/kernel.py",
"snippet": "def chat(model, prompt, message):\n if \"chat_history\" not in st.session_state:\n st.session_state[\"chat_history\"] = \"\"\n context, user_interaction = generate_session_context(st.session_state)\n runnable = prompt | model | StrOutputParser()\n soup = BeautifulSoup(st.session_state.html_content, 'html.parser')\n chat_output = soup.find(id='chat-output')\n if \"init\" in st.session_state:\n user_div = soup.new_tag(\"div\", **{'class': 'user-message'})\n user_div.string = f\"{st.session_state.username}: {message}\"\n chat_output.append(user_div)\n stream_handler = StreamHandler(soup=soup, chat_output=chat_output)\n strategy = no_history if st.session_state[\"chat_history\"] == \"\" else has_history\n answer = runnable.invoke(\n ({\"context\": context,\n \"user_interaction\": user_interaction,\n \"user_input\": message,\n \"chat_history\": st.session_state[\"chat_history\"],\n \"strategy\": strategy}),\n config={\"callbacks\": [stream_handler]})\n st.session_state[\"chat_history\"] += f\"\\nUser:> {message}\\nChatBot:> {answer}\\n\""
},
{
"identifier": "setup_chat_with_memory",
"path": "chat_tools/kernel.py",
"snippet": "def setup_chat_with_memory():\n sk_prompt = \"\"\" \nAvoid using \"Answer:\" or \"Chatbot>\" as a response header. Responses should be concise, not exceeding 250 tokens.\n\nUser preferences is user's interaction with the articles. Use the articles that the user has liked for tailored recommendations.\nRelevant Articles for Context and Suggestions is the articles which exctracted user has liked. Use the articles that the more exploration and alternative.\nPrior Conversation Record is the previous chat history. This one is least important for user interests. Use that for engagement and continuity.\nUpcoming Chatbot Response will focus on is the strategy for the upcoming response. Use that for learn about user state on product experience.\n\nUser Preferences:\n{user_interaction}\n\nRelevant Articles for Context and Suggestions:\n{context}\n\nPrior Conversation Record:\n{chat_history}\n\nUser Inquiry:\n{user_input}\n\nUpcoming Chatbot Response will focus on:\n{strategy}\n\"\"\".strip()\n\n prompt = PromptTemplate(\n template=sk_prompt, input_variables=[\"context\", \"user_input\", \"chat_history\", \"user_interaction\", \"strategy\"]\n )\n chain = ChatOpenAI(model_name=\"gpt-4-1106-preview\", temperature=0.8, streaming=True, max_tokens=512)\n\n return chain, prompt"
},
{
"identifier": "css_",
"path": "markdowns/markdowns_chat.py",
"snippet": ""
}
] | import streamlit as st
from firstbatch import AlgorithmLabel
from pydantic import BaseModel
from news import CUSTOM_ALGO_ID, initialize_session, fetch_content
from chat_tools.kernel import chat, setup_chat_with_memory
from markdowns.markdowns_chat import css_, sidebar | 1,313 |
# Pydantic models
class SessionData(BaseModel):
username: str
class PersonalizeData(BaseModel):
message: str
class SignalData(BaseModel):
sessionID: dict
id: str
def get_user_input():
return st.sidebar.text_input("Username/Session Name", st.session_state.get("username", ""))
def update_session_state(user_input):
st.session_state.session = st.session_state.personalized.session(
AlgorithmLabel.CUSTOM, vdbid="rss_db", custom_id=CUSTOM_ALGO_ID
)
st.session_state.batches = []
st.session_state.ids = []
st.session_state.likes = []
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches += batch
st.session_state.ids += ids
st.session_state.username = user_input
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_history = ""
st.session_state.chat_loader = 3
def display_sidebar():
user_input = get_user_input()
if user_input and st.session_state.get("username") != user_input:
update_session_state(user_input)
initialize_session(user_input)
|
# Pydantic models
class SessionData(BaseModel):
username: str
class PersonalizeData(BaseModel):
message: str
class SignalData(BaseModel):
sessionID: dict
id: str
def get_user_input():
return st.sidebar.text_input("Username/Session Name", st.session_state.get("username", ""))
def update_session_state(user_input):
st.session_state.session = st.session_state.personalized.session(
AlgorithmLabel.CUSTOM, vdbid="rss_db", custom_id=CUSTOM_ALGO_ID
)
st.session_state.batches = []
st.session_state.ids = []
st.session_state.likes = []
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches += batch
st.session_state.ids += ids
st.session_state.username = user_input
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_history = ""
st.session_state.chat_loader = 3
def display_sidebar():
user_input = get_user_input()
if user_input and st.session_state.get("username") != user_input:
update_session_state(user_input)
initialize_session(user_input) | fetch_content() | 2 | 2023-11-07 12:51:01+00:00 | 2k |
m4rkw/monzo-utils | monzo_utils/model/payment.py | [
{
"identifier": "Config",
"path": "monzo_utils/lib/config.py",
"snippet": "class Config(metaclass=Singleton):\n\n def __init__(self, config=None, config_path=None):\n if config_path is None:\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n config_path = f\"{homedir}/.monzo\"\n\n if not os.path.exists(config_path):\n os.mkdir(config_path, 0o755)\n\n self.config_file = f\"{config_path}/config.yaml\"\n\n if config:\n self.config = config\n else:\n if not os.path.exists(self.config_file):\n sys.stderr.write(f\"config file not found: {self.config_file}, run setup first.\\n\")\n sys.exit(1)\n\n self.config = yaml.safe_load(open(self.config_file).read())\n\n\n def __getattr__(self, name):\n if name in self.config:\n return self.config[name]\n\n return object.__getattribute__(self, name)\n\n\n def set(self, key, value):\n self.config[key] = value\n\n\n @property\n def keys(self):\n return self.config.keys()\n\n\n def save(self):\n with open(self.config_file, 'w') as f:\n f.write(yaml.dump(self.config))"
},
{
"identifier": "Transaction",
"path": "monzo_utils/model/transaction.py",
"snippet": "class Transaction(BaseModel):\n\n DISPLAY_KEYS = ['date','type','money_in','money_out','pending','description']\n RELATIONSHIPS = {\n 'account': ['`transaction`.account_id', 'account.id'],\n 'transaction_metadata': ['`transaction`.id', 'transaction_metadata.transaction_id'],\n 'pot': ['`transaction`.pot_id', 'pot.id']\n }"
},
{
"identifier": "Transactions",
"path": "monzo_utils/lib/transactions.py",
"snippet": "class Transactions(metaclass=Singleton):\n seen = {}"
}
] | import re
import datetime
from monzo_utils.lib.config import Config
from monzo_utils.model.transaction import Transaction
from monzo_utils.lib.transactions import Transactions | 1,356 | self.today = datetime.datetime.now()
self.cache = {}
def data(self, abbreviate=False):
if self.num_paid is not None:
suffix = '%d/%d' % (
self.num_paid,
self.num_total
)
else:
suffix = ''
if self.remaining is not None:
remaining = self.remaining
else:
remaining = None
return {
'status': self.status,
'payment_type': self.payment_type if abbreviate is False else self.abbreviate(self.payment_type),
'name': self.name,
'suffix': suffix,
'amount': self.display_amount,
'remaining': remaining,
'last_date': self.short_date(self.last_date) if abbreviate else self.last_date,
'due_date': self.short_date(self.due_date) if abbreviate else self.due_date
}
def abbreviate(self, string):
abbreviated = ''
for i in range(0, len(string)):
if string[i].isupper():
abbreviated += string[i]
return abbreviated
def short_date(self, date):
if not date:
return None
return date.strftime('%d/%m/%y')
def display(self):
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
data['status'].rjust(7),
data['payment_type'].ljust(15),
data['name'].ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (data['amount'])).ljust(8),
('£%.2f' % (data['remaining'])).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def name(self):
return self.payment_config['name']
@property
def status(self):
if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'yearly_month' in self.payment_config:
if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:
return 'SKIPPED'
if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:
return 'SKIPPED'
if self.last_date and self.last_date >= self.last_salary_date:
return 'PAID'
if self.due_date and self.due_date >= self.next_salary_date:
return 'SKIPPED'
return 'DUE'
@property
def payment_type(self):
return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')
@property
def num_paid(self):
return None
@property
def num_total(self):
if 'months' in self.payment_config:
return self.payment_config['months']
return None
@property
def remaining(self):
pass
@property
def display_amount(self):
today = datetime.datetime.now()
today = datetime.date(today.year, today.month, today.day)
|
class Payment:
transaction_type = 'money_out'
always_fixed = False
def __init__(self, config, payment_list_config, payment_config, last_salary_date, next_salary_date, following_salary_date):
self.config = config
self.payment_list_config = payment_list_config
self.payment_config = payment_config
self.last_salary_date = last_salary_date
self.next_salary_date = next_salary_date
self.following_salary_date = following_salary_date
self.today = datetime.datetime.now()
self.cache = {}
def data(self, abbreviate=False):
if self.num_paid is not None:
suffix = '%d/%d' % (
self.num_paid,
self.num_total
)
else:
suffix = ''
if self.remaining is not None:
remaining = self.remaining
else:
remaining = None
return {
'status': self.status,
'payment_type': self.payment_type if abbreviate is False else self.abbreviate(self.payment_type),
'name': self.name,
'suffix': suffix,
'amount': self.display_amount,
'remaining': remaining,
'last_date': self.short_date(self.last_date) if abbreviate else self.last_date,
'due_date': self.short_date(self.due_date) if abbreviate else self.due_date
}
def abbreviate(self, string):
abbreviated = ''
for i in range(0, len(string)):
if string[i].isupper():
abbreviated += string[i]
return abbreviated
def short_date(self, date):
if not date:
return None
return date.strftime('%d/%m/%y')
def display(self):
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
data['status'].rjust(7),
data['payment_type'].ljust(15),
data['name'].ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (data['amount'])).ljust(8),
('£%.2f' % (data['remaining'])).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def name(self):
return self.payment_config['name']
@property
def status(self):
if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'yearly_month' in self.payment_config:
if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:
return 'SKIPPED'
if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:
return 'SKIPPED'
if self.last_date and self.last_date >= self.last_salary_date:
return 'PAID'
if self.due_date and self.due_date >= self.next_salary_date:
return 'SKIPPED'
return 'DUE'
@property
def payment_type(self):
return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')
@property
def num_paid(self):
return None
@property
def num_total(self):
if 'months' in self.payment_config:
return self.payment_config['months']
return None
@property
def remaining(self):
pass
@property
def display_amount(self):
today = datetime.datetime.now()
today = datetime.date(today.year, today.month, today.day)
| if 'last_amount_overrides' in Config().keys and \ | 0 | 2023-11-05 12:48:18+00:00 | 2k |
rossiyareich/inknhue | src/conditional/conditional_encoder.py | [
{
"identifier": "DownSample",
"path": "src/downsample.py",
"snippet": "class DownSample(nn.Module):\n \"\"\"\n ## Down-sampling layer\n \"\"\"\n\n def __init__(self, channels: int):\n \"\"\"\n :param channels: is the number of channels\n \"\"\"\n super().__init__()\n # $3 \\times 3$ convolution with stride length of $2$ to down-sample by a factor of $2$\n self.conv = nn.Conv2d(channels, channels, 3, stride=2, padding=0)\n\n def forward(self, x: torch.Tensor):\n \"\"\"\n :param x: is the input feature map with shape `[batch_size, channels, height, width]`\n \"\"\"\n # Add padding\n x = F.pad(x, (0, 1, 0, 1), mode=\"constant\", value=0)\n # Apply convolution\n return self.conv(x)"
},
{
"identifier": "ResnetBlock",
"path": "src/resnet_block.py",
"snippet": "class ResnetBlock(nn.Module):\n \"\"\"\n ## ResNet Block\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int):\n \"\"\"\n :param in_channels: is the number of channels in the input\n :param out_channels: is the number of channels in the output\n \"\"\"\n super().__init__()\n # First normalization and convolution layer\n self.norm1 = normalization(in_channels)\n self.conv1 = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1)\n # Second normalization and convolution layer\n self.norm2 = normalization(out_channels)\n self.conv2 = nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1)\n # `in_channels` to `out_channels` mapping layer for residual connection\n if in_channels != out_channels:\n self.nin_shortcut = nn.Conv2d(\n in_channels, out_channels, 1, stride=1, padding=0\n )\n else:\n self.nin_shortcut = nn.Identity()\n\n def forward(self, x: torch.Tensor):\n \"\"\"\n :param x: is the input feature map with shape `[batch_size, channels, height, width]`\n \"\"\"\n\n h = x\n\n # First normalization and convolution layer\n h = self.norm1(h)\n h = swish(h)\n h = self.conv1(h)\n\n # Second normalization and convolution layer\n h = self.norm2(h)\n h = swish(h)\n h = self.conv2(h)\n\n # Map and add residual\n return self.nin_shortcut(x) + h"
},
{
"identifier": "zero_module",
"path": "src/utils.py",
"snippet": "def zero_module(module: nn.Module) -> nn.Module:\n for p in module.parameters():\n p.detach().zero_()\n return module"
}
] | from typing import List
from torch import nn
from ..downsample import DownSample
from ..resnet_block import ResnetBlock
from ..utils import zero_module
import torch | 1,084 |
class ConditionalEncoder(nn.Module):
def __init__(
self,
*,
channels: int,
channel_multipliers: List[int],
n_resnet_blocks: int,
in_channels: int,
) -> None:
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
n_resolutions = len(channel_multipliers)
# Initial $3 \times 3$ convolution layer that maps the image to `channels`
self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)
# Number of channels in each top level block
channels_list = [m * channels for m in [1] + channel_multipliers]
# List of top-level blocks
self.down = nn.ModuleList()
self.proj = nn.ModuleList()
# Create top-level blocks
for i in range(n_resolutions):
# Each top level block consists of multiple ResNet Blocks and down-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks):
resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))
channels = channels_list[i + 1]
# Top-level block
down = nn.Module()
down.block = resnet_blocks
# Down-sampling at the end of each top level block except the last
if i != n_resolutions - 1:
down.downsample = DownSample(channels)
else:
down.downsample = nn.Identity()
self.down.append(down)
# Projection
proj = nn.Conv2d(channels, channels, 1, 1, 0)
|
class ConditionalEncoder(nn.Module):
def __init__(
self,
*,
channels: int,
channel_multipliers: List[int],
n_resnet_blocks: int,
in_channels: int,
) -> None:
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
n_resolutions = len(channel_multipliers)
# Initial $3 \times 3$ convolution layer that maps the image to `channels`
self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)
# Number of channels in each top level block
channels_list = [m * channels for m in [1] + channel_multipliers]
# List of top-level blocks
self.down = nn.ModuleList()
self.proj = nn.ModuleList()
# Create top-level blocks
for i in range(n_resolutions):
# Each top level block consists of multiple ResNet Blocks and down-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks):
resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))
channels = channels_list[i + 1]
# Top-level block
down = nn.Module()
down.block = resnet_blocks
# Down-sampling at the end of each top level block except the last
if i != n_resolutions - 1:
down.downsample = DownSample(channels)
else:
down.downsample = nn.Identity()
self.down.append(down)
# Projection
proj = nn.Conv2d(channels, channels, 1, 1, 0) | proj = zero_module(proj) | 2 | 2023-11-03 09:35:30+00:00 | 2k |
drakoRRR/chatSynthia | users/views.py | [
{
"identifier": "ProfileForm",
"path": "users/forms.py",
"snippet": "class ProfileForm(UserChangeForm):\n first_name = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control py-4'\n }))\n last_name = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control py-4'\n }))\n image = forms.ImageField(widget=forms.FileInput(attrs={\n 'class': 'custom-file-input',\n }), required=False)\n username = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control py-4',\n 'readonly': True\n }))\n email = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control py-4',\n 'readonly': True\n }))\n\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'image', 'username', 'email')"
},
{
"identifier": "UserLoginForm",
"path": "users/forms.py",
"snippet": "class UserLoginForm(AuthenticationForm):\n username = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your username',\n }))\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your password',\n }))\n\n class Meta:\n model = User\n fields = ('username', 'password')"
},
{
"identifier": "UserRegisterForm",
"path": "users/forms.py",
"snippet": "class UserRegisterForm(UserCreationForm):\n first_name = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your first name',\n }))\n last_name = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your last name',\n }))\n username = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter username',\n }))\n email = forms.CharField(widget=forms.EmailInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your email',\n }))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your password',\n }))\n password2 = forms.CharField(widget=forms.PasswordInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Enter your password confirmation',\n }))\n\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'username', 'email', 'password1', 'password2')"
},
{
"identifier": "User",
"path": "users/models.py",
"snippet": "class User(AbstractUser):\n '''User model'''\n\n image = models.ImageField(upload_to='users_images', null=True, blank=True)\n is_verified_email = models.BooleanField(default=False)"
}
] | from django.contrib import auth, messages
from django.contrib.auth.views import LoginView
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, UpdateView
from users.forms import ProfileForm, UserLoginForm, UserRegisterForm
from users.models import User | 864 |
# Create your views here.
class LoginUserView(LoginView):
template_name = 'users/login.html'
form_class = UserLoginForm
def form_invalid(self, form):
messages.error(self.request, 'There was an error with username or password, check again !')
return super().form_invalid(form)
class RegistrationView(CreateView):
model = User
form_class = UserRegisterForm
template_name = 'users/register.html'
success_url = reverse_lazy('')
def form_valid(self, form):
response = super().form_valid(form)
auth.login(self.request, self.object, backend='django.contrib.auth.backends.ModelBackend')
return response
def form_invalid(self, form):
messages.error(self.request, 'There was an error with username or password, check again !')
return super().form_invalid(form)
class ProfileView(UpdateView):
model = User
|
# Create your views here.
class LoginUserView(LoginView):
template_name = 'users/login.html'
form_class = UserLoginForm
def form_invalid(self, form):
messages.error(self.request, 'There was an error with username or password, check again !')
return super().form_invalid(form)
class RegistrationView(CreateView):
model = User
form_class = UserRegisterForm
template_name = 'users/register.html'
success_url = reverse_lazy('')
def form_valid(self, form):
response = super().form_valid(form)
auth.login(self.request, self.object, backend='django.contrib.auth.backends.ModelBackend')
return response
def form_invalid(self, form):
messages.error(self.request, 'There was an error with username or password, check again !')
return super().form_invalid(form)
class ProfileView(UpdateView):
model = User | form_class = ProfileForm | 0 | 2023-11-08 12:21:53+00:00 | 2k |
TencentBlueKing/bkflow-feel | bkflow_feel/parsers.py | [
{
"identifier": "RangeGroupData",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupData(BaseModel):\n left_val: Any\n right_val: Any\n left_operator: RangeGroupOperator\n right_operator: RangeGroupOperator"
},
{
"identifier": "RangeGroupOperator",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupOperator(enum.Enum):\n GT = \"greater than\"\n GTE = \"greater than or equal\"\n LT = \"less than\"\n LTE = \"less than or equal\""
},
{
"identifier": "FEELFunctionsManager",
"path": "bkflow_feel/utils.py",
"snippet": "class FEELFunctionsManager:\n __hub = {}\n\n @classmethod\n def register_invocation_cls(cls, invocation_cls):\n func_name = invocation_cls.Meta.func_name\n existed_invocation_cls = cls.__hub.get(func_name)\n if existed_invocation_cls:\n raise RuntimeError(\n \"func register error, {}'s func_name {} conflict with {}\".format(\n existed_invocation_cls, func_name, invocation_cls\n )\n )\n\n cls.__hub[func_name] = invocation_cls\n\n @classmethod\n def register_funcs(cls, func_dict):\n for func_name, func_path in func_dict.items():\n if not isinstance(func_name, str):\n raise ValueError(f\"func_name {func_name} should be string\")\n if func_name in cls.__hub:\n raise ValueError(\n \"func register error, {}'s func_name {} conflict with {}\".format(\n func_path, func_name, cls.__hub[func_name]\n )\n )\n cls.__hub[func_name] = func_path\n\n @classmethod\n def clear(cls):\n cls.__hub = {}\n\n @classmethod\n def all_funcs(cls):\n funcs = {}\n for version, invocation_cls in cls.__hub.items():\n funcs[version] = invocation_cls\n return funcs\n\n @classmethod\n def get_func(cls, func_name) -> Callable:\n func_obj = cls.__hub.get(func_name)\n if not func_obj:\n raise ValueError(\"func object {} not found\".format(func_name))\n\n if isinstance(func_obj, FEELInvocationMeta):\n return func_obj()\n else:\n module_path, func_name = str(func_obj).rsplit(\".\", 1)\n module = importlib.import_module(module_path)\n func = getattr(module, func_name)\n return func\n\n @classmethod\n def func_call(cls, func_name, *args, **kwargs):\n func = cls.get_func(func_name)\n return func(*args, **kwargs)"
},
{
"identifier": "BinaryOperationValidator",
"path": "bkflow_feel/validators.py",
"snippet": "class BinaryOperationValidator(Validator):\n def validate(self, left_item, right_item, instance_type=None, *args, **kwargs) -> ValidationResult:\n if not isinstance(left_item, type(right_item)):\n return ValidationResult(\n False, f\"Type of both operators must be same, get {type(left_item)} and {type(right_item)}\",\n )\n if instance_type is not None and not isinstance(left_item, instance_type):\n return ValidationResult(\n False, f\"Type of both operators must be {instance_type}, get {type(left_item)} and {type(right_item)}\",\n )\n return ValidationResult(True)"
},
{
"identifier": "DummyValidator",
"path": "bkflow_feel/validators.py",
"snippet": "class DummyValidator(Validator):\n def validate(self, *args, **kwargs):\n pass"
},
{
"identifier": "ListsLengthValidator",
"path": "bkflow_feel/validators.py",
"snippet": "class ListsLengthValidator(Validator):\n def validate(self, lists, *args, **kwargs):\n if not lists or all(len(alist) == len(lists[0]) for alist in lists):\n return ValidationResult(True)\n return ValidationResult(False, \"lists length not equal\")"
}
] | import abc
import datetime
import logging
import re
import pytz
from dateutil.parser import parse as date_parse
from .data_models import RangeGroupData, RangeGroupOperator
from .utils import FEELFunctionsManager
from .validators import BinaryOperationValidator, DummyValidator, ListsLengthValidator | 1,209 | # -*- coding: utf-8 -*-
logger = logging.getLogger(__name__)
class Expression(metaclass=abc.ABCMeta):
validator_cls = DummyValidator
@abc.abstractmethod
def evaluate(self, context):
pass
class CommonExpression(Expression):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
class Expr(CommonExpression):
def evaluate(self, context):
return self.value.evaluate(context)
class Number(CommonExpression):
pass
class String(CommonExpression):
pass
class Boolean(CommonExpression):
pass
class Null(Expression):
def evaluate(self, context):
return None
class List(Expression):
def __init__(self, *items):
self.items = items
def evaluate(self, context):
return [item.evaluate(context) for item in self.items]
class ListItem(Expression):
def __init__(self, list_expr, index):
self.list_expr = list_expr
self.index = index
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list) or self.index == 0 or len(items) < abs(self.index):
return None
items = items[self.index - 1] if self.index > 0 else items[self.index]
return items
class ListMatch(Expression):
| # -*- coding: utf-8 -*-
logger = logging.getLogger(__name__)
class Expression(metaclass=abc.ABCMeta):
validator_cls = DummyValidator
@abc.abstractmethod
def evaluate(self, context):
pass
class CommonExpression(Expression):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
class Expr(CommonExpression):
def evaluate(self, context):
return self.value.evaluate(context)
class Number(CommonExpression):
pass
class String(CommonExpression):
pass
class Boolean(CommonExpression):
pass
class Null(Expression):
def evaluate(self, context):
return None
class List(Expression):
def __init__(self, *items):
self.items = items
def evaluate(self, context):
return [item.evaluate(context) for item in self.items]
class ListItem(Expression):
def __init__(self, list_expr, index):
self.list_expr = list_expr
self.index = index
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list) or self.index == 0 or len(items) < abs(self.index):
return None
items = items[self.index - 1] if self.index > 0 else items[self.index]
return items
class ListMatch(Expression): | validator_cls = ListsLengthValidator | 5 | 2023-11-09 13:47:26+00:00 | 2k |
namedgraph/oxijen | oxijen/model_impl/impl.py | [
{
"identifier": "Resource",
"path": "oxijen/rdf_model.py",
"snippet": "class Resource(ABC):\n\n @property\n def node(self):\n return self._node\n\n @property\n def graph(self):\n return self._graph\n\n @property\n def is_anon(self):\n if isinstance(self.node, NamedNode):\n return False\n else:\n return True\n \n @property\n def uri(self):\n if isinstance(self.node, NamedNode):\n return self.node.value\n else:\n return None\n\n @property\n def id(self):\n if isinstance(self.node, BlankNode):\n return self.node.value\n else:\n return None\n \n @abstractmethod\n def add_property(self, property: 'Property', value: Union['Resource', Literal]) -> 'Resource':\n pass\n\n @abstractmethod\n def remove_all(self, property: 'Property') -> 'Resource':\n pass\n\n @abstractmethod\n def list_properties(self, property: 'Property') -> Iterator[Triple]:\n pass"
},
{
"identifier": "Property",
"path": "oxijen/rdf_model.py",
"snippet": "class Property(Resource):\n\n pass"
},
{
"identifier": "Graph",
"path": "oxijen/rdf_model.py",
"snippet": "class Graph(ABC):\n\n @abstractmethod\n def __len__(self) -> int:\n pass\n\n @abstractmethod\n def create_resource(self, uri: Optional[str] = None) -> Resource:\n pass\n\n @abstractmethod\n def create_property(self, uri: str) -> Property:\n pass\n\n @abstractmethod\n def create_literal(self, value: str, language: Optional[str] = None) -> Literal:\n pass\n\n @abstractmethod\n def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:\n pass\n\n @abstractmethod\n def list_subjects(self) -> Iterator[Resource]:\n pass\n\n @abstractmethod\n def list_triples(self) -> Iterator[Triple]:\n pass\n\n @abstractmethod\n def add(self, triples: Union[Iterator[Triple], 'Graph']) -> 'Graph':\n pass\n\n @abstractmethod\n def remove_all(self ) -> 'Graph':\n pass"
},
{
"identifier": "Dataset",
"path": "oxijen/rdf_model.py",
"snippet": "class Dataset(ABC):\n\n @property\n def default_graph(self):\n pass\n \n @abstractmethod\n def graph_names(self) -> Iterator[Resource]:\n pass\n\n @abstractmethod\n def contains_named_graph(self, name: Union[str, Resource]) -> bool:\n pass\n\n @abstractmethod\n def get_named_graph(self, name: Union[str, Resource]) -> Graph:\n pass\n\n @abstractmethod\n def add_named_graph(self, name: Union[str, Resource], graph: Graph) -> 'Dataset':\n pass\n\n @abstractmethod\n def remove_named_graph(self, name: Union[str, Resource], graph: Graph) -> 'Dataset':\n pass\n\n @abstractmethod\n def list_quads(self) -> Iterator[Quad]:\n pass"
},
{
"identifier": "XSD",
"path": "oxijen/model_impl/xsd.py",
"snippet": "class XSD(Enum):\n\n NS : str = \"http://www.w3.org/2001/XMLSchema#\"\n\n INTEGER = NS + \"integer\"\n STRING = NS + \"string\"\n FLOAT = NS + \"float\""
}
] | from oxijen.rdf_model import Resource, Property, Graph, Dataset
from oxijen.model_impl.xsd import XSD
from pyoxigraph import Store, Triple, BlankNode, NamedNode, Literal, Quad, DefaultGraph
from typing import Iterator, Union, Optional, Any | 1,453 |
class ResourceImpl(Resource):
def __init__(self, node: Union[BlankNode, NamedNode], graph: Graph):
self._node = node
self._graph = graph
def __hash__(self):
return hash(self.node.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.node.value == other.node.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return self.node.__str__()
def __repr__(self) -> str:
return self.__str__()
def add_property(self, property: 'Property', value: Union[Resource, Literal]) -> 'Resource':
if isinstance(value, Resource):
value = value.node
self.graph.store.add(Quad(self.node, property.node, value, self.graph.name)) # assumes GraphStoreImpl!
return self
def list_properties(self, property: Optional[Property] = None) -> Iterator[Triple]:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quads = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
return map(lambda quad: quad.triple, quads)
def remove_all(self, property: Optional[Property] = None) -> Resource:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quad_iter = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
for quad in quad_iter:
self.graph.store.remove(quad)
return self
class PropertyImpl(ResourceImpl, Property):
pass
class GraphImpl(Graph):
def create_resource(self, uri: Optional[str] = None) -> Resource:
if uri is not None:
return ResourceImpl(NamedNode(uri), self)
else:
return ResourceImpl(BlankNode(), self)
def create_property(self, uri: str) -> Property:
return ResourceImpl(NamedNode(uri), self)
def create_literal(self, value: str, language: Optional[str] = None) -> Literal:
return Literal(value, language=language) # should it be xsd:string-typed by default as per RDF 1.1?
def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:
if datatype is None:
match value:
case int():
|
class ResourceImpl(Resource):
def __init__(self, node: Union[BlankNode, NamedNode], graph: Graph):
self._node = node
self._graph = graph
def __hash__(self):
return hash(self.node.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.node.value == other.node.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return self.node.__str__()
def __repr__(self) -> str:
return self.__str__()
def add_property(self, property: 'Property', value: Union[Resource, Literal]) -> 'Resource':
if isinstance(value, Resource):
value = value.node
self.graph.store.add(Quad(self.node, property.node, value, self.graph.name)) # assumes GraphStoreImpl!
return self
def list_properties(self, property: Optional[Property] = None) -> Iterator[Triple]:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quads = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
return map(lambda quad: quad.triple, quads)
def remove_all(self, property: Optional[Property] = None) -> Resource:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quad_iter = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
for quad in quad_iter:
self.graph.store.remove(quad)
return self
class PropertyImpl(ResourceImpl, Property):
pass
class GraphImpl(Graph):
def create_resource(self, uri: Optional[str] = None) -> Resource:
if uri is not None:
return ResourceImpl(NamedNode(uri), self)
else:
return ResourceImpl(BlankNode(), self)
def create_property(self, uri: str) -> Property:
return ResourceImpl(NamedNode(uri), self)
def create_literal(self, value: str, language: Optional[str] = None) -> Literal:
return Literal(value, language=language) # should it be xsd:string-typed by default as per RDF 1.1?
def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:
if datatype is None:
match value:
case int(): | datatype = NamedNode(XSD.INTEGER.value) | 4 | 2023-11-03 19:50:51+00:00 | 2k |
sivasurend/lyzr | build/lib/lyzr/utils/document_reading.py | [
{
"identifier": "LyzrDocxReader",
"path": "lyzr/utils/docx_reader.py",
"snippet": "class LyzrDocxReader(BaseReader):\n def __init__(self) -> None:\n try:\n import docx2txt\n except ImportError:\n raise ImportError(\n \"`docx2txt` package not found, please run `pip install docx2txt`\"\n )\n\n def load_data(self, file_path: str, extra_info: dict = None) -> List[Document]:\n loader = Docx2txtLoader(str(file_path))\n langchain_documents = loader.load()\n\n documents = []\n for langchain_document in langchain_documents:\n doc = Document.from_langchain_format(langchain_document)\n\n if extra_info is not None:\n doc.metadata.update(extra_info)\n\n documents.append(doc)\n\n return documents"
},
{
"identifier": "LyzrPDFReader",
"path": "lyzr/utils/pdf_reader.py",
"snippet": "class LyzrPDFReader(BaseReader):\n def __init__(self) -> None:\n try:\n from pdfminer.high_level import extract_text \n except ImportError:\n raise ImportError(\n \"`pdfminer` package not found, please install it with \"\n \"`pip install pdfminer.six`\"\n )\n\n def load_data(self, file_path: str, extra_info: dict = None) -> List[Document]:\n loader = PDFMinerLoader(str(file_path))\n langchain_documents = loader.load() \n\n documents = []\n for langchain_document in langchain_documents:\n doc = Document.from_langchain_format(langchain_document)\n\n if extra_info is not None:\n doc.metadata.update(extra_info)\n\n documents.append(doc)\n\n return documents"
},
{
"identifier": "LyzrTxtReader",
"path": "lyzr/utils/txt_reader.py",
"snippet": "class LyzrTxtReader(BaseReader):\n def __init__(self) -> None:\n None\n\n def load_data(self, file_path: str, extra_info: dict = None) -> List[Document]:\n loader = TextLoader(str(file_path))\n langchain_documents = loader.load()\n\n documents = []\n for langchain_document in langchain_documents:\n doc = Document.from_langchain_format(langchain_document)\n\n if extra_info is not None:\n doc.metadata.update(extra_info)\n\n documents.append(doc)\n\n return documents"
},
{
"identifier": "LyzrWebPageReader",
"path": "lyzr/utils/webpage_reader.py",
"snippet": "class LyzrWebPageReader:\n \n def __init__(self) -> None:\n pass\n\n @staticmethod\n def load_data(url: str) -> List[Document]:\n if IS_IPYKERNEL:\n warning_msg = \"Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts.\"\n warnings.warn(warning_msg, RuntimeWarning)\n \n html = load_content_using_playwright(url)\n content = scrape(html)\n document = Document(text=content, metadata={\"url\": url})\n return [document]"
},
{
"identifier": "LyzrWebsiteReader",
"path": "lyzr/utils/website_reader.py",
"snippet": "class LyzrWebsiteReader:\n def __init__(self):\n self.visited_links = set()\n\n @staticmethod\n def load_data(url: str) -> List[Document]:\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, \"html.parser\")\n\n all_urls = set()\n for link in soup.find_all(\"a\"):\n href = link.get(\"href\")\n if href is not None:\n all_urls.add(url + href)\n\n logger.info(f\"Total URLs to process: {len(all_urls)}\")\n web_reader = LyzrWebPageReader()\n documents = []\n for u in tqdm(all_urls, desc=\"Processing URLs\"):\n documents.extend(web_reader.load_data(u))\n\n return documents"
},
{
"identifier": "LyzrYoutubeReader",
"path": "lyzr/utils/youtube_reader.py",
"snippet": "class LyzrYoutubeReader(BaseReader):\n def __init__(self) -> None:\n try:\n from youtube_transcript_api import YouTubeTranscriptApi\n except ImportError:\n raise ImportError(\n \"`youtube_transcript_api` package not found, \\\n please run `pip install youtube-transcript-api`\"\n )\n\n def load_data(self, urls: List[str]) -> List[Document]:\n loader = YoutubeTranscriptReader()\n documents = loader.load_data(ytlinks=urls)\n return documents"
}
] | import logging
from typing import List, Sequence, Optional
from llama_index.readers.file.base import SimpleDirectoryReader
from llama_index.schema import Document
from lyzr.utils.docx_reader import LyzrDocxReader
from lyzr.utils.pdf_reader import LyzrPDFReader
from lyzr.utils.txt_reader import LyzrTxtReader
from lyzr.utils.webpage_reader import LyzrWebPageReader
from lyzr.utils.website_reader import LyzrWebsiteReader
from lyzr.utils.youtube_reader import LyzrYoutubeReader | 1,230 |
logger = logging.getLogger(__name__)
def read_pdf_as_documents(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
**kwargs,
) -> Sequence[Document]:
|
logger = logging.getLogger(__name__)
def read_pdf_as_documents(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
**kwargs,
) -> Sequence[Document]: | file_extractor = {".pdf": LyzrPDFReader()} | 1 | 2023-11-07 14:52:08+00:00 | 2k |
focused-labs/ai-custom-chatbot-data-pipeline | main.py | [
{
"identifier": "import_web_scrape_data",
"path": "import_service.py",
"snippet": "def import_web_scrape_data(urls: list):\n BeautifulSoupWebReader = download_loader(\"BeautifulSoupWebReader\")\n\n loader = BeautifulSoupWebReader()\n documents = loader.load_data(urls=urls)\n\n for document in documents:\n document.text = normalize_text(document.text)\n\n index = VectorStoreIndex.from_documents(documents,\n storage_context=get_pinecone_storage_context(),\n service_context=get_service_context())\n return index"
},
{
"identifier": "import_notion_data",
"path": "import_service.py",
"snippet": "def import_notion_data(page_ids):\n documents = NotionPageReader(integration_token=NOTION_API_KEY).load_data(page_ids=page_ids)\n for document in documents:\n document_metadata = get_notion_metadata(page_id=document.extra_info[\"page_id\"])\n url = document_metadata['page_url']\n title = document_metadata['page_title']\n document.extra_info.update({\"URL\": url, \"title\": title})\n document.metadata = ({\"URL\": url, \"title\": title})\n document.text = normalize_text(document.text)\n\n index = VectorStoreIndex.from_documents(documents,\n storage_context=get_pinecone_storage_context(),\n service_context=get_service_context())\n return index"
},
{
"identifier": "ImportedPages",
"path": "models/imported_pages.py",
"snippet": "class ImportedPages(BaseModel):\n page_ids: list"
},
{
"identifier": "ImportedUrls",
"path": "models/imported_urls.py",
"snippet": "class ImportedUrls(BaseModel):\n page_urls: list"
},
{
"identifier": "Question",
"path": "models/question.py",
"snippet": "class Question(BaseModel):\n text: str"
},
{
"identifier": "QueryService",
"path": "query_service.py",
"snippet": "class QueryService:\n\n def __init__(self):\n self.agent = Agent()\n self.vector_database_query_engine = self._create_vector_database_query_engine()\n\n def _create_query_session(self):\n self.agent = Agent()\n\n def _create_vector_database_query_engine(self):\n pinecone_index = get_pinecone_index()\n return pinecone_index.as_query_engine()\n\n def search_vector_database(self, question: str):\n response = self.vector_database_query_engine.query(question)\n return response.response\n\n def ask_agent(self, question: str):\n try:\n answer = self.agent.query_agent(user_input=question)\n response_formatted = json.loads(answer, strict=False)\n except Exception as e:\n print(f\"An error occurred when querying the agent. Error: {e}\")\n raise e\n\n return {\"response\": response_formatted, }"
}
] | import logging
import os
import sys
import openai
import uvicorn
from contextlib import asynccontextmanager
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from import_service import import_web_scrape_data, import_notion_data
from models.imported_pages import ImportedPages
from models.imported_urls import ImportedUrls
from models.question import Question
from query_service import QueryService | 689 |
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
allowed_origins = [
"http://localhost:3000",
]
|
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
allowed_origins = [
"http://localhost:3000",
]
| query_service = QueryService() | 5 | 2023-11-01 20:47:07+00:00 | 2k |
pradyunsg/pip-resolver-benchmarks | src/common/creation.py | [
{
"identifier": "DistributionInfo",
"path": "src/common/model.py",
"snippet": "class DistributionInfo(BaseModel):\n depends_by_extra: dict[str, list[str]]\n requires_python: str | None = None\n\n @field_validator(\"depends_by_extra\", mode=\"after\")\n @classmethod\n def ensure_no_empty_extras(\n cls, v: dict[str | None, list[str]]\n ) -> dict[str | None, list[str]]:\n for depends in v.values():\n for dep in depends:\n assert \";\" not in dep\n return v\n\n def as_METADATA(self, name: str, version: str) -> str:\n parts = [\n \"Metadata-Version: 2.1\",\n f\"Name: {name}\",\n f\"Version: {version}\",\n ]\n for extra, depends in self.depends_by_extra.items():\n if extra == \"\":\n for dep in depends:\n parts.append(f\"Requires-Dist: {dep}\")\n else:\n parts.append(f\"Provides-Extra: {extra}\")\n for dep in depends:\n parts.append(f\"Requires-Dist: {dep} ; extra == '{extra}'\")\n\n if self.requires_python:\n parts.append(f\"Requires-Python: {self.requires_python}\")\n\n return \"\\n\".join(parts)"
},
{
"identifier": "Scenario",
"path": "src/common/model.py",
"snippet": "class Scenario(BaseModel):\n input: ScenarioInput\n packages: dict[PackageName, dict[VersionString, DistributionInfo]]\n\n @field_validator(\"packages\", mode=\"after\")\n @classmethod\n def ensure_unique_versions_when_canonicalized(\n cls, v: dict[PackageName, dict[VersionString, DistributionInfo]]\n ) -> dict[PackageName, dict[VersionString, DistributionInfo]]:\n for name, versions in v.items():\n seen = set()\n for version in versions:\n canonicalized = canonicalize_version(version)\n if canonicalized in seen:\n raise ValueError(\n f\"{name} has multiple versions with same \"\n f\"canonicalized value: {canonicalized}\"\n )\n seen.add(canonicalized)\n return v\n\n def check_for_issues(self) -> list[str]:\n issues = []\n packages_with_no_version = []\n for name, grouped_by_version in self.packages.items():\n if not grouped_by_version:\n packages_with_no_version.append(name)\n\n if packages_with_no_version:\n count = len(packages_with_no_version)\n names = \"\\n \".join(packages_with_no_version)\n issues.append(f\"Found {count} packages with no versions...\\n {names}\")\n return issues"
}
] | import base64
import hashlib
import zipfile
from pathlib import Path
from rich.progress import BarColumn, MofNCompleteColumn, Progress
from .model import DistributionInfo, Scenario | 725 | """Creates the actual wheel files in a directory to pass to the resolver.
"""
from __future__ import annotations
WHEEL = """\
Wheel-Version: 1.0
Generator: pip-resolver-benchmark
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
"""
def _make_wheel(
| """Creates the actual wheel files in a directory to pass to the resolver.
"""
from __future__ import annotations
WHEEL = """\
Wheel-Version: 1.0
Generator: pip-resolver-benchmark
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
"""
def _make_wheel( | name: str, version: str, wheel: DistributionInfo, output_dir: Path | 0 | 2023-11-05 17:59:32+00:00 | 2k |
allmonday/pydantic2-resolve | pydantic2_resolve/resolver.py | [
{
"identifier": "ResolverTargetAttrNotFound",
"path": "pydantic2_resolve/exceptions.py",
"snippet": "class ResolverTargetAttrNotFound(Exception):\n pass"
},
{
"identifier": "LoaderFieldNotProvidedError",
"path": "pydantic2_resolve/exceptions.py",
"snippet": "class LoaderFieldNotProvidedError(Exception):\n pass"
},
{
"identifier": "MissingAnnotationError",
"path": "pydantic2_resolve/exceptions.py",
"snippet": "class MissingAnnotationError(Exception):\n pass"
},
{
"identifier": "core",
"path": "pydantic2_resolve/core.py",
"snippet": "T = TypeVar(\"T\")\ndef _get_class(target):\ndef _get_pydantic_attrs(kls):\ndef _get_dataclass_attrs(kls):\ndef scan_and_store_required_fields(target):\n def walker(kls):\ndef is_acceptable_kls(kls):\ndef is_acceptable_instance(target):\ndef iter_over_object_resolvers_and_acceptable_fields(target, attr_map):\ndef iter_over_object_post_methods(target, attr_map):"
}
] | import asyncio
import contextvars
import inspect
import pydantic2_resolve.constant as const
import pydantic2_resolve.util as util
from collections import defaultdict
from inspect import iscoroutine
from typing import TypeVar, Dict
from .exceptions import ResolverTargetAttrNotFound, LoaderFieldNotProvidedError, MissingAnnotationError
from typing import Any, Callable, Optional
from pydantic2_resolve import core
from aiodataloader import DataLoader
from inspect import isclass
from types import MappingProxyType | 1,477 |
# for dataloader which has class attributes, you can assign the value at here
self.loader_filters = loader_filters or {}
# now you can pass your loader instance, Resolver will check `isinstance``
if loader_instances and self._validate_loader_instance(loader_instances):
self.loader_instances = loader_instances
else:
self.loader_instances = None
self.ensure_type = ensure_type
self.context = MappingProxyType(context) if context else None
self.scan_data = {}
def _add_expose_fields(self, target):
"""
1. check whether expose to descendant existed
2. add fields into contextvars (ancestor_vars_checker)
2.1 check overwrite by another class(which is forbidden)
2.2 check field exists
"""
dct: Optional[dict] = getattr(target, const.EXPOSE_TO_DESCENDANT, None)
# 1
if dct:
if type(dct) is not dict:
raise AttributeError(f'{const.EXPOSE_TO_DESCENDANT} is not dict')
# 2
for field, alias in dct.items(): # eg: name, bar_name
# 2.1
self.ancestor_vars_checker[alias].add(util.get_kls_full_path(target.__class__))
if len(self.ancestor_vars_checker[alias]) > 1:
conflict_modules = ', '.join(list(self.ancestor_vars_checker[alias]))
raise AttributeError(f'alias name conflicts, please check: {conflict_modules}')
if not self.ancestor_vars.get(alias):
self.ancestor_vars[alias] = contextvars.ContextVar(alias)
try:
val = getattr(target, field)
except AttributeError:
raise AttributeError(f'{field} does not existed')
self.ancestor_vars[alias].set(val)
def _build_ancestor_context(self):
"""get values from contextvars and put into a dict"""
return { k: v.get() for k, v in self.ancestor_vars.items()}
def _validate_loader_instance(self, loader_instances: Dict[Any, Any]):
for cls, loader in loader_instances.items():
if not issubclass(cls, DataLoader):
raise AttributeError(f'{cls.__name__} must be subclass of DataLoader')
if not isinstance(loader, cls):
raise AttributeError(f'{loader.__name__} is not instance of {cls.__name__}')
return True
def _execute_resolver_method(self, method):
"""
1. inspect method, atttach context if declared in method
2. if params includes LoaderDepend, create instance and cache it.
2.1 create from DataLoader class
2.1.1 apply loader_filters into dataloader instance
2.2 ceate from batch_load_fn
3. execute method
"""
# >>> 1
signature = inspect.signature(method)
params = {}
if signature.parameters.get('context'):
if self.context is None:
raise AttributeError('Resolver.context is missing')
params['context'] = self.context
if signature.parameters.get('ancestor_context'):
if self.ancestor_vars is None:
raise AttributeError(f'there is not class has {const.EXPOSE_TO_DESCENDANT} configed')
params['ancestor_context'] = self._build_ancestor_context()
# manage the creation of loader instances
for k, v in signature.parameters.items():
# >>> 2
if isinstance(v.default, Depends):
# Base: DataLoader or batch_load_fn
Loader = v.default.dependency
# check loader_instance first, if already defined in Resolver param, just take it.
if self.loader_instances and self.loader_instances.get(Loader):
loader = self.loader_instances.get(Loader)
params[k] = loader
continue
# module.kls to avoid same kls name from different module
cache_key = util.get_kls_full_path(v.default.dependency)
hit = self.loader_instance_cache.get(cache_key)
if hit:
loader = hit
else:
# >>> 2.1
# create loader instance
if isclass(Loader):
# if extra transform provides
loader = Loader()
filter_config = self.loader_filters.get(Loader, {})
for field in util.get_class_field_annotations(Loader):
# >>> 2.1.1
# class ExampleLoader(DataLoader):
# filtar_x: bool <--------------- set this field
try:
value = filter_config[field]
setattr(loader, field, value)
except KeyError:
|
def LoaderDepend( # noqa: N802
dependency: Optional[Callable[..., Any]] = None,
) -> Any:
return Depends(dependency=dependency)
class Depends:
def __init__(
self,
dependency: Optional[Callable[..., Any]] = None,
):
self.dependency = dependency
T = TypeVar("T")
class Resolver:
"""
Entrypoint of a resolve action
"""
def __init__(
self,
loader_filters: Optional[Dict[Any, Dict[str, Any]]] = None,
loader_instances: Optional[Dict[Any, Any]] = None,
ensure_type=False,
context: Optional[Dict[str, Any]] = None
):
self.loader_instance_cache = {}
self.ancestor_vars = {}
self.ancestor_vars_checker = defaultdict(set) # expose_field_name: set(kls fullpath) if len > 1, raise error
# for dataloader which has class attributes, you can assign the value at here
self.loader_filters = loader_filters or {}
# now you can pass your loader instance, Resolver will check `isinstance``
if loader_instances and self._validate_loader_instance(loader_instances):
self.loader_instances = loader_instances
else:
self.loader_instances = None
self.ensure_type = ensure_type
self.context = MappingProxyType(context) if context else None
self.scan_data = {}
def _add_expose_fields(self, target):
"""
1. check whether expose to descendant existed
2. add fields into contextvars (ancestor_vars_checker)
2.1 check overwrite by another class(which is forbidden)
2.2 check field exists
"""
dct: Optional[dict] = getattr(target, const.EXPOSE_TO_DESCENDANT, None)
# 1
if dct:
if type(dct) is not dict:
raise AttributeError(f'{const.EXPOSE_TO_DESCENDANT} is not dict')
# 2
for field, alias in dct.items(): # eg: name, bar_name
# 2.1
self.ancestor_vars_checker[alias].add(util.get_kls_full_path(target.__class__))
if len(self.ancestor_vars_checker[alias]) > 1:
conflict_modules = ', '.join(list(self.ancestor_vars_checker[alias]))
raise AttributeError(f'alias name conflicts, please check: {conflict_modules}')
if not self.ancestor_vars.get(alias):
self.ancestor_vars[alias] = contextvars.ContextVar(alias)
try:
val = getattr(target, field)
except AttributeError:
raise AttributeError(f'{field} does not existed')
self.ancestor_vars[alias].set(val)
def _build_ancestor_context(self):
"""get values from contextvars and put into a dict"""
return { k: v.get() for k, v in self.ancestor_vars.items()}
def _validate_loader_instance(self, loader_instances: Dict[Any, Any]):
for cls, loader in loader_instances.items():
if not issubclass(cls, DataLoader):
raise AttributeError(f'{cls.__name__} must be subclass of DataLoader')
if not isinstance(loader, cls):
raise AttributeError(f'{loader.__name__} is not instance of {cls.__name__}')
return True
def _execute_resolver_method(self, method):
"""
1. inspect method, atttach context if declared in method
2. if params includes LoaderDepend, create instance and cache it.
2.1 create from DataLoader class
2.1.1 apply loader_filters into dataloader instance
2.2 ceate from batch_load_fn
3. execute method
"""
# >>> 1
signature = inspect.signature(method)
params = {}
if signature.parameters.get('context'):
if self.context is None:
raise AttributeError('Resolver.context is missing')
params['context'] = self.context
if signature.parameters.get('ancestor_context'):
if self.ancestor_vars is None:
raise AttributeError(f'there is not class has {const.EXPOSE_TO_DESCENDANT} configed')
params['ancestor_context'] = self._build_ancestor_context()
# manage the creation of loader instances
for k, v in signature.parameters.items():
# >>> 2
if isinstance(v.default, Depends):
# Base: DataLoader or batch_load_fn
Loader = v.default.dependency
# check loader_instance first, if already defined in Resolver param, just take it.
if self.loader_instances and self.loader_instances.get(Loader):
loader = self.loader_instances.get(Loader)
params[k] = loader
continue
# module.kls to avoid same kls name from different module
cache_key = util.get_kls_full_path(v.default.dependency)
hit = self.loader_instance_cache.get(cache_key)
if hit:
loader = hit
else:
# >>> 2.1
# create loader instance
if isclass(Loader):
# if extra transform provides
loader = Loader()
filter_config = self.loader_filters.get(Loader, {})
for field in util.get_class_field_annotations(Loader):
# >>> 2.1.1
# class ExampleLoader(DataLoader):
# filtar_x: bool <--------------- set this field
try:
value = filter_config[field]
setattr(loader, field, value)
except KeyError: | raise LoaderFieldNotProvidedError(f'{cache_key}.{field} not found in Resolver()') | 1 | 2023-11-01 02:37:26+00:00 | 2k |
StoneMoe/ASub | app/ui/windows/subtitle_window.py | [
{
"identifier": "SRTFile",
"path": "app/core/models/srt.py",
"snippet": "class SRTFile:\r\n filepath: str\r\n entries: List[SRTEntry]\r\n\r\n def __init__(self, source: str | list):\r\n self.filepath = ''\r\n self.entries = []\r\n\r\n match source:\r\n case str():\r\n self.load_from_file(filepath=source)\r\n case list():\r\n self.load_from_segments(segments=source)\r\n case _:\r\n pprint(source)\r\n raise NotImplementedError\r\n\r\n def load_from_segments(self, segments: List[dict]):\r\n for segment in segments:\r\n start_time = format_srt_time(timedelta(seconds=segment['start']))\r\n end_time = format_srt_time(timedelta(seconds=segment['end']))\r\n text: str = segment['text']\r\n segment_id = segment['id'] + 1\r\n srt_item = f\"{segment_id}\\n\" \\\r\n f\"{start_time} --> {end_time}\\n\" \\\r\n f\"{text.lstrip()}\"\r\n self.entries.append(SRTEntry.load(srt_item))\r\n\r\n def load_from_file(self, filepath: str):\r\n with open(filepath, encoding='utf8') as f:\r\n file_content = f.read()\r\n self.filepath = filepath\r\n self.entries = [SRTEntry.load(item) for item in file_content.split('\\n\\n') if item]\r\n info(f'自 {filepath} 载入了 {len(self.entries)} 个条目')\r\n\r\n def dump(self, filepath: str = None):\r\n if filepath:\r\n self.filepath = filepath\r\n if not self.filepath:\r\n raise ValueError('未设置 SRT 文件的写入路径')\r\n with open(self.filepath, 'w', encoding='utf-8') as f:\r\n for entry in self.entries:\r\n f.write(entry.dumps() + '\\n\\n')\r\n info(f'SRT 文件已保存至 {self.filepath}')\r\n\r\n def translate(self, vocab=None):\r\n \"\"\"translate and write to new file in realtime\"\"\"\r\n target_file = f\"{self.filepath}.translated.{vocab or '_'}.srt\"\r\n if os.path.isfile(target_file):\r\n info(f'文件 \"{target_file}\" 已存在,跳过翻译')\r\n return\r\n if vocab:\r\n info(f'正在使用术语表 {vocab}')\r\n\r\n source_text = '\\n'.join([item.text for item in self.entries])\r\n translated_text = youdao_translate(source_text, vocab_id=vocab)\r\n lines = translated_text.split('\\n')\r\n if len(self.entries) != len(lines):\r\n info(f'原 {len(self.entries)} 条,翻译后 {len(lines)} 条。无法应用翻译结果')\r\n return\r\n\r\n with open(target_file, mode='w+', encoding='utf8') as f:\r\n for i, line in enumerate(lines):\r\n f.write(self.entries[i].dumps())\r\n f.write('\\n')\r\n f.write(line)\r\n f.write('\\n\\n')\r"
},
{
"identifier": "CONTAINER_MARGINS",
"path": "app/ui/const.py",
"snippet": "CONTAINER_MARGINS = (32, 64, 32, 32)\r"
},
{
"identifier": "res_dir",
"path": "app/core/utils/env.py",
"snippet": "def res_dir(relative_path):\r\n \"\"\"Get application resource file\"\"\"\r\n try:\r\n # noinspection PyUnresolvedReferences,PyProtectedMember\r\n base_path = sys._MEIPASS # PyInstaller one file mode\r\n except AttributeError:\r\n base_path = get_exec_dir()\r\n\r\n return os.path.join(base_path, relative_path)\r"
}
] | from PyQt5.QtWidgets import QVBoxLayout, QPushButton, QTableWidgetItem, QDialog
from qfluentwidgets import TableWidget, isDarkTheme
from qframelesswindow import FramelessWindow
from app.core.models.srt import SRTFile
from app.ui.const import CONTAINER_MARGINS
from app.core.utils.env import res_dir | 1,290 |
class SubtitleWindow(QDialog, FramelessWindow):
def __init__(self, filepath: str, parent=None):
super().__init__(parent)
self.srt_file = SRTFile(filepath)
self.hBoxLayout = QVBoxLayout(self)
self.tableView = TableWidget(self)
self.saveButton = QPushButton("Save", self)
self.saveButton.clicked.connect(self._save_subtitle_file)
self.hBoxLayout.setContentsMargins(*CONTAINER_MARGINS)
self.hBoxLayout.addWidget(self.tableView)
self.hBoxLayout.addWidget(self.saveButton)
self.init_window()
self._load_subtitle_file()
def _load_subtitle_file(self):
self.tableView.setWordWrap(False)
self.tableView.setRowCount(len(self.srt_file.entries))
self.tableView.setColumnCount(3)
for i, entry in enumerate(self.srt_file.entries):
self.tableView.setItem(i, 0, QTableWidgetItem(entry.index))
self.tableView.setItem(i, 1, QTableWidgetItem(entry.time))
self.tableView.setItem(i, 2, QTableWidgetItem(entry.text))
self.tableView.verticalHeader().hide()
self.tableView.setHorizontalHeaderLabels(['Index', 'Time', 'Text'])
self.tableView.resizeColumnsToContents()
def _save_subtitle_file(self):
for i in range(self.tableView.rowCount()):
self.srt_file.entries[i].index = self.tableView.item(i, 0).text()
self.srt_file.entries[i].time = self.tableView.item(i, 1).text()
self.srt_file.entries[i].text = self.tableView.item(i, 2).text()
self.srt_file.dump()
def init_window(self):
self.setWindowTitle(f'编辑 {self.srt_file.filepath}')
self.resize(625, 700)
self._set_qss()
def _set_qss(self):
color = 'dark' if isDarkTheme() else 'light'
|
class SubtitleWindow(QDialog, FramelessWindow):
def __init__(self, filepath: str, parent=None):
super().__init__(parent)
self.srt_file = SRTFile(filepath)
self.hBoxLayout = QVBoxLayout(self)
self.tableView = TableWidget(self)
self.saveButton = QPushButton("Save", self)
self.saveButton.clicked.connect(self._save_subtitle_file)
self.hBoxLayout.setContentsMargins(*CONTAINER_MARGINS)
self.hBoxLayout.addWidget(self.tableView)
self.hBoxLayout.addWidget(self.saveButton)
self.init_window()
self._load_subtitle_file()
def _load_subtitle_file(self):
self.tableView.setWordWrap(False)
self.tableView.setRowCount(len(self.srt_file.entries))
self.tableView.setColumnCount(3)
for i, entry in enumerate(self.srt_file.entries):
self.tableView.setItem(i, 0, QTableWidgetItem(entry.index))
self.tableView.setItem(i, 1, QTableWidgetItem(entry.time))
self.tableView.setItem(i, 2, QTableWidgetItem(entry.text))
self.tableView.verticalHeader().hide()
self.tableView.setHorizontalHeaderLabels(['Index', 'Time', 'Text'])
self.tableView.resizeColumnsToContents()
def _save_subtitle_file(self):
for i in range(self.tableView.rowCount()):
self.srt_file.entries[i].index = self.tableView.item(i, 0).text()
self.srt_file.entries[i].time = self.tableView.item(i, 1).text()
self.srt_file.entries[i].text = self.tableView.item(i, 2).text()
self.srt_file.dump()
def init_window(self):
self.setWindowTitle(f'编辑 {self.srt_file.filepath}')
self.resize(625, 700)
self._set_qss()
def _set_qss(self):
color = 'dark' if isDarkTheme() else 'light' | with open(res_dir(f'app/ui/resource/qss/{color}/style.qss'), encoding='utf-8') as f: | 2 | 2023-11-07 16:45:43+00:00 | 2k |
openshift/lightspeed-service | tests/unit/docs/test_doc_summarizer.py | [
{
"identifier": "DocsSummarizer",
"path": "ols/src/docs/docs_summarizer.py",
"snippet": "class DocsSummarizer:\n \"\"\"A class for summarizing documentation context.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the DocsSummarizer.\"\"\"\n self.logger = Logger(\"docs_summarizer\").logger\n\n def summarize(self, conversation, query, **kwargs) -> tuple[str, str]:\n \"\"\"Summarize the given query based on the provided conversation context.\n\n Args:\n conversation: The unique identifier for the conversation.\n query: The query to be summarized.\n kwargs: Additional keyword arguments for customization (model, verbose, etc.).\n\n Returns:\n A tuple containing the summary as a string and referenced documents as a string.\n \"\"\"\n provider = config.ols_config.summarizer_provider\n model = config.ols_config.summarizer_model\n bare_llm = LLMLoader(provider, model).llm\n\n verbose = kwargs.get(\"verbose\", \"\").lower() == \"true\"\n\n # Set up llama index to show prompting if verbose is True\n # TODO: remove this, we can't be setting global handlers, it will\n # affect other calls\n if verbose:\n llama_index.set_global_handler(\"simple\")\n\n settings_string = f\"conversation: {conversation}, query: {query}, provider: {provider}, model: {model}, verbose: {verbose}\"\n self.logger.info(f\"{conversation} call settings: {settings_string}\")\n\n summarization_template = PromptTemplate(constants.SUMMARIZATION_TEMPLATE)\n\n self.logger.info(f\"{conversation} Getting service context\")\n self.logger.info(f\"{conversation} using model: {model}\")\n\n embed_model = \"local:BAAI/bge-base-en\"\n # TODO get this from global config instead of env\n # Not a priority because embedding model probably won't be configurable in the final product\n tei_embedding_url = os.getenv(\"TEI_SERVER_URL\", None)\n if tei_embedding_url:\n self.logger.info(f\"{conversation} using TEI embedding server\")\n\n embed_model = TextEmbeddingsInference(\n model_name=constants.TEI_EMBEDDING_MODEL,\n base_url=tei_embedding_url,\n )\n\n service_context = ServiceContext.from_defaults(\n chunk_size=1024, llm=bare_llm, embed_model=embed_model, **kwargs\n )\n\n self.logger.info(\n f\"{conversation} using embed model: {service_context.embed_model!s}\"\n )\n\n # TODO get this from global config\n storage_context = StorageContext.from_defaults(\n persist_dir=constants.PRODUCT_DOCS_PERSIST_DIR\n )\n self.logger.info(f\"{conversation} Setting up index\")\n index = load_index_from_storage(\n storage_context=storage_context,\n index_id=constants.PRODUCT_INDEX,\n service_context=service_context,\n verbose=verbose,\n )\n\n self.logger.info(f\"{conversation} Setting up query engine\")\n query_engine = index.as_query_engine(\n text_qa_template=summarization_template,\n verbose=verbose,\n streaming=False,\n similarity_top_k=1,\n )\n\n self.logger.info(f\"{conversation} Submitting summarization query\")\n summary = query_engine.query(query)\n\n referenced_documents = \"\\n\".join(\n [\n source_node.node.metadata[\"file_name\"]\n for source_node in summary.source_nodes\n ]\n )\n\n self.logger.info(f\"{conversation} Summary response: {summary!s}\")\n self.logger.info(f\"{conversation} Referenced documents: {referenced_documents}\")\n\n return str(summary), referenced_documents"
},
{
"identifier": "config",
"path": "ols/utils/config.py",
"snippet": "def load_empty_config() -> None:\ndef load_config_from_env() -> None:"
},
{
"identifier": "mock_llm_loader",
"path": "tests/mock_classes/llm_loader.py",
"snippet": "def mock_llm_loader(llm=None):\n \"\"\"Constructs mock for LLMLoader.\"\"\"\n\n def loader(*args, **kwargs):\n return MockLLMLoader(llm)\n\n return loader"
},
{
"identifier": "MockLlamaIndex",
"path": "tests/mock_classes/mock_llama_index.py",
"snippet": "class MockLlamaIndex:\n \"\"\"Mocked (llama) index.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor accepting all parameters.\"\"\"\n self.args = args\n self.kwargs = kwargs\n\n def as_query_engine(self, **kwargs):\n \"\"\"Returns mocked query engine.\"\"\"\n return MockQueryEngine(kwargs)"
}
] | import os
from unittest.mock import patch
from ols.src.docs.docs_summarizer import DocsSummarizer
from ols.utils import config
from tests.mock_classes.llm_loader import mock_llm_loader
from tests.mock_classes.mock_llama_index import MockLlamaIndex | 1,175 | """Unit tests for DocsSummarizer class."""
@patch("ols.src.docs.docs_summarizer.LLMLoader", new=mock_llm_loader(None))
@patch("ols.src.docs.docs_summarizer.ServiceContext.from_defaults")
@patch("ols.src.docs.docs_summarizer.StorageContext.from_defaults")
| """Unit tests for DocsSummarizer class."""
@patch("ols.src.docs.docs_summarizer.LLMLoader", new=mock_llm_loader(None))
@patch("ols.src.docs.docs_summarizer.ServiceContext.from_defaults")
@patch("ols.src.docs.docs_summarizer.StorageContext.from_defaults") | @patch("ols.src.docs.docs_summarizer.load_index_from_storage", new=MockLlamaIndex) | 3 | 2023-11-08 06:29:41+00:00 | 2k |
xlcaptain/LLM-Workbench | component/knowledge_chat.py | [
{
"identifier": "ElasticsearchServer",
"path": "component/pipelines/es.py",
"snippet": "class ElasticsearchServer:\n def __init__(self):\n self.client = Elasticsearch(\n ES_URL,\n verify_certs=False,\n )\n self.embedding = Embeddings()\n self.es = ElasticsearchStore(\n index_name='audit_index',\n embedding=self.embedding,\n es_connection=self.client,\n )\n\n def create_index(self, index_name: str):\n if not self.client.indices.exists(index=index_name):\n dims = len(self.embedding.embed_query(\"test\"))\n mapping = _default_knn_mapping(dims)\n self.client.indices.create(index=index_name, body={\"mappings\": mapping})\n logger.info(f\"Successfully Created Index: {index_name}!\")\n else:\n logger.info(f\"Index: {index_name} already exists!\")\n\n def doc_upload(self, index_name: str, data_url: str):\n self.create_index(index_name)\n\n docs = []\n for root, dirs, files in os.walk(data_url):\n for file in tqdm(files):\n file_path = os.path.join(root, file)\n res = load_document(file_path)\n if res:\n self.es.add_documents(res)\n logger.info(f\"Successfully inserted document {res[0].metadata}!\")\n logger.info(\"Successfully inserted documents!\")\n\n def doc_search(\n self, method: str, query: str, top_k: int, knn_boost: float, index_name: str\n ) -> List[Dict]:\n result = []\n query_vector = self.embedding.embed_query(query)\n if method == \"knn\":\n query_body = generate_knn_query(vec=query_vector, size=top_k)\n elif method == \"hybrid\":\n query_body = generate_hybrid_query(text=query, vec=query_vector, size=top_k, knn_boost=knn_boost)\n else:\n query_body = generate_search_query(vec=query_vector, size=top_k)\n\n response = self.client.search(index=index_name, body=query_body)\n hits = [hit for hit in response[\"hits\"][\"hits\"]]\n for i in hits:\n result.append(\n {\n \"content\": i[\"_source\"][\"text\"],\n 'source': i[\"_source\"][\"metadata\"][\"source\"],\n 'score': i[\"_score\"]\n }\n )\n return result\n\n def delete(self, index_name):\n if self.client.indices.exists(index=index_name):\n self.client.indices.delete(index=index_name)\n logger.info(f\"Successfully Deleted Index: {index_name}!\")"
},
{
"identifier": "handle_response",
"path": "component/pipelines/utils.py",
"snippet": "def handle_response(messages, temperature, history_len, message_placeholder):\n full_response = \"\"\n openai.api_key = 'xxxx'\n openai.api_base = BAICHUAN_URL\n for response in openai.ChatCompletion.create(\n model=\"baichuan\",\n messages=messages[-history_len * 2 - 1:],\n temperature=temperature,\n stream=True,\n ):\n full_response += response.choices[0].delta.get(\"content\", \"\")\n message_placeholder.markdown(full_response + \"▌\")\n message_placeholder.markdown(full_response)\n return full_response"
},
{
"identifier": "create_message",
"path": "component/pipelines/utils.py",
"snippet": "def create_message(role, content, reference=None):\n message = {\"role\": role, \"content\": content}\n if reference is not None:\n message[\"reference\"] = reference\n return message"
},
{
"identifier": "KNOWLEDGE_PROMPT",
"path": "component/pipelines/prompt.py",
"snippet": "KNOWLEDGE_PROMPT = \"\"\"\n你是由南京审计大学智能审计团队研发的‘审元’大模型,目前还在不断完善中。\n如果不是询问身份信息就正常根据下面指令回答。\n<指令>请仔细阅读以下已知信息,并根据已知内容以专业的方式回答提出的问题。并且满足以下要求:\n1.你的任务是从已知信息中找到问题的答案,而不是生成新的信息。\n2.回答应符合逻辑,且答案内不能出现大量重复内容。\n2.如果已知信息中明确包含问题对应的答案,请直接提供,并且参考第二条。如果已知信息中没有答案,或者答案不明确,请回答“无法根据已知信息回答该问题”。\n3.请避免在答案中添加任何编造的信息。所有回答请使用中文。\n</指令>\n<已知信息>{context}</已知信息>\n<问题>请回答:{query}</问题>\n\"\"\""
},
{
"identifier": "CHAT_EXAMPLES",
"path": "component/pipelines/prompt.py",
"snippet": "CHAT_EXAMPLES = [\"公共工程项目跟踪审计概念是什么?\",\n \"王天朝收受了哪些贿赂?\",\n \"如何认定本罪的标准?\"]"
}
] | import time
import os
import streamlit as st
import pandas as pd
from .pipelines.es import ElasticsearchServer
from .pipelines.utils import handle_response, create_message
from .pipelines.prompt import KNOWLEDGE_PROMPT, CHAT_EXAMPLES | 1,286 |
BAICHUAN_URL = os.getenv("BAICHUAN_URL")
def handle_kb_qa(prompt, top_k, threshold):
index_name = 'audit_index'
|
BAICHUAN_URL = os.getenv("BAICHUAN_URL")
def handle_kb_qa(prompt, top_k, threshold):
index_name = 'audit_index' | es_server = ElasticsearchServer() | 0 | 2023-11-01 07:54:03+00:00 | 2k |
NicolasZucchet/Online-learning-LR-dependencies | online_lru/rec.py | [
{
"identifier": "matrix_init",
"path": "online_lru/rec_init.py",
"snippet": "def matrix_init(key, shape, dtype=jnp.float32, normalization=1):\n return random.normal(key=key, shape=shape, dtype=dtype) / normalization"
},
{
"identifier": "truncated_normal_matrix_init",
"path": "online_lru/rec_init.py",
"snippet": "def truncated_normal_matrix_init(key, shape, dtype=jnp.float_, normalization=1):\n return random.truncated_normal(key, -2.0, 2.0, shape, dtype) / normalization"
},
{
"identifier": "theta_init",
"path": "online_lru/rec_init.py",
"snippet": "def theta_init(key, shape, max_phase, dtype=jnp.float32, log=True):\n u = random.uniform(key, shape=shape, dtype=dtype)\n theta = max_phase * u\n if log:\n theta = jnp.log(theta)\n return theta"
},
{
"identifier": "nu_init",
"path": "online_lru/rec_init.py",
"snippet": "def nu_init(key, shape, r_min, r_max, dtype=jnp.float32, log=True):\n u = random.uniform(key=key, shape=shape, dtype=dtype)\n nu = -0.5 * jnp.log(u * (r_max**2 - r_min**2) + r_min**2)\n if log:\n nu = jnp.log(nu)\n return nu"
},
{
"identifier": "gamma_log_init",
"path": "online_lru/rec_init.py",
"snippet": "def gamma_log_init(key, lamb, log=True):\n nu, theta = lamb\n if log:\n nu = jnp.exp(nu)\n theta = jnp.exp(theta)\n diag_lambda = jnp.exp(-nu + 1j * theta)\n return jnp.log(jnp.sqrt(1 - jnp.abs(diag_lambda) ** 2))"
}
] | from functools import partial
from flax import linen as nn
from .rec_init import matrix_init, truncated_normal_matrix_init, theta_init, nu_init, gamma_log_init
from flax.core.frozen_dict import unfreeze
import jax
import jax.numpy as jnp | 1,517 | A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, jax.lax.stop_gradient(A_j * b_i) + b_j
class LRU(nn.Module):
"""
LRU layer that updates internal elegibility traces to allow online learning.
"""
d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta",
|
# Parallel scan operations
@jax.vmap
def binary_operator_diag(q_i, q_j):
"""Binary operator for parallel scan of linear recurrence"""
A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, A_j * b_i + b_j
@jax.vmap
def binary_operator_diag_spatial(q_i, q_j):
"""Same as above but stop the gradient for the recurrent connection"""
A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, jax.lax.stop_gradient(A_j * b_i) + b_j
class LRU(nn.Module):
"""
LRU layer that updates internal elegibility traces to allow online learning.
"""
d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta", | partial(theta_init, max_phase=self.max_phase, log=self.exp_param), | 2 | 2023-11-01 13:18:32+00:00 | 2k |
uygarkurt/video-retalking | models/ENet.py | [
{
"identifier": "ResBlock",
"path": "models/base_blocks.py",
"snippet": "class ResBlock(nn.Module):\n def __init__(self, in_channels, out_channels, mode='down'):\n super(ResBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)\n self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1)\n self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False)\n if mode == 'down':\n self.scale_factor = 0.5\n elif mode == 'up':\n self.scale_factor = 2\n\n def forward(self, x):\n out = F.leaky_relu_(self.conv1(x), negative_slope=0.2)\n # upsample/downsample\n out = F.interpolate(out, scale_factor=self.scale_factor, mode='bilinear', align_corners=False)\n out = F.leaky_relu_(self.conv2(out), negative_slope=0.2)\n # skip\n x = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False)\n skip = self.skip(x)\n out = out + skip\n return out"
},
{
"identifier": "StyleConv",
"path": "models/base_blocks.py",
"snippet": "class StyleConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None):\n super(StyleConv, self).__init__()\n self.modulated_conv = ModulatedConv2d(\n in_channels, out_channels, kernel_size, num_style_feat, demodulate=demodulate, sample_mode=sample_mode)\n self.weight = nn.Parameter(torch.zeros(1)) # for noise injection\n self.bias = nn.Parameter(torch.zeros(1, out_channels, 1, 1))\n self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n def forward(self, x, style, noise=None):\n # modulate\n out = self.modulated_conv(x, style) * 2**0.5 # for conversion\n # noise injection\n if noise is None:\n b, _, h, w = out.shape\n noise = out.new_empty(b, 1, h, w).normal_()\n out = out + self.weight * noise\n # add bias\n out = out + self.bias\n # activation\n out = self.activate(out)\n return out"
},
{
"identifier": "ToRGB",
"path": "models/base_blocks.py",
"snippet": "class ToRGB(nn.Module):\n def __init__(self, in_channels, num_style_feat, upsample=True):\n super(ToRGB, self).__init__()\n self.upsample = upsample\n self.modulated_conv = ModulatedConv2d(\n in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None)\n self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))\n\n def forward(self, x, style, skip=None):\n out = self.modulated_conv(x, style)\n out = out + self.bias\n if skip is not None:\n if self.upsample:\n skip = F.interpolate(skip, scale_factor=2, mode='bilinear', align_corners=False)\n out = out + skip\n return out"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base_blocks import ResBlock, StyleConv, ToRGB | 1,390 |
class ENet(nn.Module):
def __init__(
self,
num_style_feat=512,
lnet=None,
concat=False
):
super(ENet, self).__init__()
self.low_res = lnet
for param in self.low_res.parameters():
param.requires_grad = False
channel_multiplier, narrow = 2, 1
channels = {
'4': int(512 * narrow),
'8': int(512 * narrow),
'16': int(512 * narrow),
'32': int(512 * narrow),
'64': int(256 * channel_multiplier * narrow),
'128': int(128 * channel_multiplier * narrow),
'256': int(64 * channel_multiplier * narrow),
'512': int(32 * channel_multiplier * narrow),
'1024': int(16 * channel_multiplier * narrow)
}
self.log_size = 8
first_out_size = 128
self.conv_body_first = nn.Conv2d(3, channels[f'{first_out_size}'], 1) # 256 -> 128
# downsample
in_channels = channels[f'{first_out_size}']
self.conv_body_down = nn.ModuleList()
for i in range(8, 2, -1):
out_channels = channels[f'{2**(i - 1)}']
self.conv_body_down.append(ResBlock(in_channels, out_channels, mode='down'))
in_channels = out_channels
self.num_style_feat = num_style_feat
linear_out_channel = num_style_feat
self.final_linear = nn.Linear(channels['4'] * 4 * 4, linear_out_channel)
self.final_conv = nn.Conv2d(in_channels, channels['4'], 3, 1, 1)
self.style_convs = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
self.concat = concat
if concat:
in_channels = 3 + 32 # channels['64']
else:
in_channels = 3
for i in range(7, 9): # 128, 256
out_channels = channels[f'{2**i}'] #
self.style_convs.append(
StyleConv(
in_channels,
out_channels,
kernel_size=3,
num_style_feat=num_style_feat,
demodulate=True,
sample_mode='upsample'))
self.style_convs.append(
StyleConv(
out_channels,
out_channels,
kernel_size=3,
num_style_feat=num_style_feat,
demodulate=True,
sample_mode=None))
|
class ENet(nn.Module):
def __init__(
self,
num_style_feat=512,
lnet=None,
concat=False
):
super(ENet, self).__init__()
self.low_res = lnet
for param in self.low_res.parameters():
param.requires_grad = False
channel_multiplier, narrow = 2, 1
channels = {
'4': int(512 * narrow),
'8': int(512 * narrow),
'16': int(512 * narrow),
'32': int(512 * narrow),
'64': int(256 * channel_multiplier * narrow),
'128': int(128 * channel_multiplier * narrow),
'256': int(64 * channel_multiplier * narrow),
'512': int(32 * channel_multiplier * narrow),
'1024': int(16 * channel_multiplier * narrow)
}
self.log_size = 8
first_out_size = 128
self.conv_body_first = nn.Conv2d(3, channels[f'{first_out_size}'], 1) # 256 -> 128
# downsample
in_channels = channels[f'{first_out_size}']
self.conv_body_down = nn.ModuleList()
for i in range(8, 2, -1):
out_channels = channels[f'{2**(i - 1)}']
self.conv_body_down.append(ResBlock(in_channels, out_channels, mode='down'))
in_channels = out_channels
self.num_style_feat = num_style_feat
linear_out_channel = num_style_feat
self.final_linear = nn.Linear(channels['4'] * 4 * 4, linear_out_channel)
self.final_conv = nn.Conv2d(in_channels, channels['4'], 3, 1, 1)
self.style_convs = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
self.concat = concat
if concat:
in_channels = 3 + 32 # channels['64']
else:
in_channels = 3
for i in range(7, 9): # 128, 256
out_channels = channels[f'{2**i}'] #
self.style_convs.append(
StyleConv(
in_channels,
out_channels,
kernel_size=3,
num_style_feat=num_style_feat,
demodulate=True,
sample_mode='upsample'))
self.style_convs.append(
StyleConv(
out_channels,
out_channels,
kernel_size=3,
num_style_feat=num_style_feat,
demodulate=True,
sample_mode=None)) | self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True)) | 2 | 2023-11-02 18:25:51+00:00 | 2k |
fortelex/hiveline | hiveline/jobs/mongo.py | [
{
"identifier": "get_database",
"path": "hiveline/mongo/db.py",
"snippet": "def get_database():\n dotenv.load_dotenv()\n\n user = os.getenv(\"UP_MONGO_USER\")\n password = os.getenv(\"UP_MONGO_PASSWORD\")\n domain = os.getenv(\"UP_MONGO_DOMAIN\")\n database = os.getenv(\"UP_MONGO_DATABASE\")\n\n connection_string = \"mongodb://%s:%s@%s/%s?authSource=admin\" % (user, password, domain, database)\n\n client = MongoClient(connection_string)\n\n return client[database]"
},
{
"identifier": "JobsDataSource",
"path": "hiveline/jobs/jobs.py",
"snippet": "class JobsDataSource(ABC):\n @abstractmethod\n def create_jobs(self, sim_id: str, service_name: str, job_ids: list[str]):\n \"\"\"\n Creates the jobs in the data source. If the job already exists (uniquely identified by service_name, sim_id, job_id),\n it is not created again. Use reset_jobs and reset_failed_jobs to reset the status of existing jobs.\n :param sim_id: the simulation ID\n :param service_name: the name of the service\n :param job_ids: the job IDs\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def reset_jobs(self, sim_id: str, service_name: str, status: list[JobStatus] = None,\n max_started_date: datetime.datetime = None):\n \"\"\"\n Resets the status of the jobs to pending. If status is not None, only jobs with the specified status are reset.\n :param sim_id: the simulation ID\n :param service_name: the name of the service\n :param status: (optional) the status of the jobs to reset\n :param max_started_date: (optional) the maximum started date of the jobs to reset\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def pop_job(self, sim_id: str, service_name: str) -> str | None:\n \"\"\"\n Pops a job from the data source. If no job is available, returns None. It will automatically set the status of\n the job to \"started\".\n :param sim_id: the simulation ID\n :param service_name: the name of the service\n :return: the job ID or None if no job is available\n \"\"\"\n pass\n\n @abstractmethod\n def update_job(self, sim_id: str, service_name: str, job_id: str, status: JobStatus, error: str | None = None):\n \"\"\"\n Updates the status of a job.\n :param sim_id: the simulation ID\n :param service_name: the name of the service\n :param job_id: the job ID\n :param status: the new status\n :param error: (optional) the error message\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def count_jobs(self, sim_id: str, service_name: str, status: JobStatus = None) -> int:\n \"\"\"\n Counts the number of jobs. If status is not None, only jobs with the specified status are counted.\n :param sim_id: the simulation ID\n :param service_name: the name of the service\n :param status: (optional) the status of the jobs to count\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def delete_jobs(self, sim_id: str, service_name: str):\n \"\"\"\n Deletes all jobs for a simulation.\n :param sim_id: the simulation ID\n :param service_name: the name of the service\n :return:\n \"\"\"\n pass"
},
{
"identifier": "JobStatus",
"path": "hiveline/jobs/jobs.py",
"snippet": "class JobStatus(Enum):\n PENDING = \"pending\"\n STARTED = \"started\"\n FINISHED = \"finished\"\n FAILED = \"failed\"\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n return self.value\n\n def to_str(self):\n return self.value\n\n @staticmethod\n def from_str(s: str):\n if s == \"pending\":\n return JobStatus.PENDING\n elif s == \"started\":\n return JobStatus.STARTED\n elif s == \"finished\":\n return JobStatus.FINISHED\n elif s == \"failed\":\n return JobStatus.FAILED\n else:\n raise ValueError(f\"Invalid job status: {s}\")"
}
] | import datetime
import pymongo.errors
from hiveline import get_database
from hiveline.jobs.jobs import JobsDataSource, JobStatus | 1,517 |
class MongoJob:
"""
A calculation job of some sort. Used to track the status of a job. A job is uniquely identified by the key (
service_name, sim_id, job_id)
:param service_name: the name of the service
:param sim_id: the simulation ID
:param job_id: the job ID
:param status: the job status
"""
service_name: str
sim_id: str | None = None
job_id: str | None = None
status: str | None = None
created: datetime.datetime | None = None
started: datetime.datetime | None = None
finished: datetime.datetime | None = None
error: str | None = None
def __init__(self, service_name: str, sim_id: str | None = None, job_id: str | None = None,
status: str | None = None, created: datetime.datetime | None = None,
started: datetime.datetime | None = None, finished: datetime.datetime | None = None,
error: str | None = None):
self.service_name = service_name
self.sim_id = sim_id
self.job_id = job_id
self.status = status
self.created = created
self.started = started
self.finished = finished
self.error = error
def to_dict(self):
return {
"service-name": self.service_name,
"sim-id": self.sim_id,
"job-id": self.job_id,
"status": self.status,
"created": self.created,
"started": self.started,
"finished": self.finished,
"error": self.error
}
@staticmethod
def from_dict(d: dict):
return MongoJob(
service_name=d["service-name"],
sim_id=d["sim-id"],
job_id=d["job-id"],
status=d["status"],
created=d["created"],
started=d["started"],
finished=d["finished"],
error=d["error"]
)
|
class MongoJob:
"""
A calculation job of some sort. Used to track the status of a job. A job is uniquely identified by the key (
service_name, sim_id, job_id)
:param service_name: the name of the service
:param sim_id: the simulation ID
:param job_id: the job ID
:param status: the job status
"""
service_name: str
sim_id: str | None = None
job_id: str | None = None
status: str | None = None
created: datetime.datetime | None = None
started: datetime.datetime | None = None
finished: datetime.datetime | None = None
error: str | None = None
def __init__(self, service_name: str, sim_id: str | None = None, job_id: str | None = None,
status: str | None = None, created: datetime.datetime | None = None,
started: datetime.datetime | None = None, finished: datetime.datetime | None = None,
error: str | None = None):
self.service_name = service_name
self.sim_id = sim_id
self.job_id = job_id
self.status = status
self.created = created
self.started = started
self.finished = finished
self.error = error
def to_dict(self):
return {
"service-name": self.service_name,
"sim-id": self.sim_id,
"job-id": self.job_id,
"status": self.status,
"created": self.created,
"started": self.started,
"finished": self.finished,
"error": self.error
}
@staticmethod
def from_dict(d: dict):
return MongoJob(
service_name=d["service-name"],
sim_id=d["sim-id"],
job_id=d["job-id"],
status=d["status"],
created=d["created"],
started=d["started"],
finished=d["finished"],
error=d["error"]
)
| class MongoJobsDataSource(JobsDataSource): | 1 | 2023-11-07 15:34:04+00:00 | 2k |
uhppoted/uhppoted-app-home-assistant | custom_components/uhppoted/config.py | [
{
"identifier": "CONF_BIND_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BIND_ADDR = 'bind_address'"
},
{
"identifier": "CONF_BROADCAST_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BROADCAST_ADDR = 'broadcast_address'"
},
{
"identifier": "CONF_LISTEN_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_LISTEN_ADDR = 'listen_address'"
},
{
"identifier": "CONF_DEBUG",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DEBUG = 'debug'"
},
{
"identifier": "CONF_CONTROLLERS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLERS = 'controllers'"
},
{
"identifier": "CONF_CONTROLLER_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_ID = 'controller_id'"
},
{
"identifier": "CONF_CONTROLLER_SERIAL_NUMBER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_SERIAL_NUMBER = 'controller_serial_number'"
},
{
"identifier": "CONF_CONTROLLER_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CONTROLLER_ADDR = 'controller_address'"
},
{
"identifier": "CONF_DOORS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOORS = 'doors'"
},
{
"identifier": "CONF_DOOR_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOOR_ID = 'door_id'"
},
{
"identifier": "CONF_DOOR_CONTROLLER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOOR_CONTROLLER = 'door_controller'"
},
{
"identifier": "CONF_DOOR_NUMBER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DOOR_NUMBER = 'door_number'"
},
{
"identifier": "CONF_CARDS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARDS = 'cards'"
},
{
"identifier": "CONF_CARD_UNIQUE_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_UNIQUE_ID = 'card_unique_id'"
},
{
"identifier": "CONF_CARD_NUMBER",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_NUMBER = 'card_number'"
},
{
"identifier": "CONF_CARD_NAME",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_NAME = 'card_name'"
},
{
"identifier": "CONF_CARD_STARTDATE",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_STARTDATE = 'card_startdate'"
},
{
"identifier": "CONF_CARD_ENDDATE",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_ENDDATE = 'card_enddate'"
},
{
"identifier": "CONF_CARD_DOORS",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_CARD_DOORS = 'card_doors'"
},
{
"identifier": "ERR_INVALID_CONTROLLER_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_INVALID_CONTROLLER_ID = 'invalid_controller_id'"
},
{
"identifier": "ERR_DUPLICATE_CONTROLLER_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_DUPLICATE_CONTROLLER_ID = 'duplicate_controller_id'"
},
{
"identifier": "ERR_DUPLICATE_CONTROLLER_IDS",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_DUPLICATE_CONTROLLER_IDS = 'duplicate_controller_ids'"
},
{
"identifier": "ERR_INVALID_DOOR_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_INVALID_DOOR_ID = 'invalid_door_id'"
},
{
"identifier": "ERR_DUPLICATE_DOOR_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_DUPLICATE_DOOR_ID = 'duplicate_door_id'"
},
{
"identifier": "ERR_DUPLICATE_DOOR_IDS",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_DUPLICATE_DOOR_IDS = 'duplicate_door_ids'"
},
{
"identifier": "ERR_INVALID_CARD_ID",
"path": "custom_components/uhppoted/const.py",
"snippet": "ERR_INVALID_CARD_ID = 'invalid_card_id'"
}
] | import re
import logging
import datetime
import calendar
import uuid
from typing import Any
from uhppoted import uhppote
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import CONF_CONTROLLERS
from .const import CONF_CONTROLLER_ID
from .const import CONF_CONTROLLER_SERIAL_NUMBER
from .const import CONF_CONTROLLER_ADDR
from .const import CONF_DOORS
from .const import CONF_DOOR_ID
from .const import CONF_DOOR_CONTROLLER
from .const import CONF_DOOR_NUMBER
from .const import CONF_CARDS
from .const import CONF_CARD_UNIQUE_ID
from .const import CONF_CARD_NUMBER
from .const import CONF_CARD_NAME
from .const import CONF_CARD_STARTDATE
from .const import CONF_CARD_ENDDATE
from .const import CONF_CARD_DOORS
from .const import ERR_INVALID_CONTROLLER_ID
from .const import ERR_DUPLICATE_CONTROLLER_ID
from .const import ERR_DUPLICATE_CONTROLLER_IDS
from .const import ERR_INVALID_DOOR_ID
from .const import ERR_DUPLICATE_DOOR_ID
from .const import ERR_DUPLICATE_DOOR_IDS
from .const import ERR_INVALID_CARD_ID | 1,116 |
_LOGGER = logging.getLogger(__name__)
MAX_CARDS = 25
MAX_CARD_INDEX = 20000
MAX_ERRORS = 5
def normalise(v):
return re.sub(r'\s+', '', f'{v}', flags=re.UNICODE).lower()
def validate_controller_id(serial_no, name, options):
if not name or name.strip() == '':
raise ValueError(ERR_INVALID_CONTROLLER_ID)
if options and CONF_CONTROLLERS in options:
for v in options[CONF_CONTROLLERS]:
if normalise(v[CONF_CONTROLLER_ID]) == normalise(name):
if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') != int(f'{serial_no}'):
raise ValueError(ERR_DUPLICATE_CONTROLLER_ID)
def validate_all_controllers(options):
if options and CONF_CONTROLLERS in options:
controllers = [normalise(v[CONF_CONTROLLER_ID]) for v in options[CONF_CONTROLLERS]]
if len(controllers) != len(set(controllers)):
raise ValueError(ERR_DUPLICATE_CONTROLLER_IDS)
def validate_door_id(name, options):
if not name or name.strip() == '':
raise ValueError(ERR_INVALID_DOOR_ID)
|
_LOGGER = logging.getLogger(__name__)
MAX_CARDS = 25
MAX_CARD_INDEX = 20000
MAX_ERRORS = 5
def normalise(v):
return re.sub(r'\s+', '', f'{v}', flags=re.UNICODE).lower()
def validate_controller_id(serial_no, name, options):
if not name or name.strip() == '':
raise ValueError(ERR_INVALID_CONTROLLER_ID)
if options and CONF_CONTROLLERS in options:
for v in options[CONF_CONTROLLERS]:
if normalise(v[CONF_CONTROLLER_ID]) == normalise(name):
if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') != int(f'{serial_no}'):
raise ValueError(ERR_DUPLICATE_CONTROLLER_ID)
def validate_all_controllers(options):
if options and CONF_CONTROLLERS in options:
controllers = [normalise(v[CONF_CONTROLLER_ID]) for v in options[CONF_CONTROLLERS]]
if len(controllers) != len(set(controllers)):
raise ValueError(ERR_DUPLICATE_CONTROLLER_IDS)
def validate_door_id(name, options):
if not name or name.strip() == '':
raise ValueError(ERR_INVALID_DOOR_ID)
| if name.strip() != '-' and options and CONF_DOORS in options: | 8 | 2023-11-06 18:46:49+00:00 | 2k |
kyegomez/HeptapodLM | train.py | [
{
"identifier": "Autoregressive2DWrapper",
"path": "heptapod/at.py",
"snippet": "class Autoregressive2DWrapper(nn.Module):\n def __init__(self, net, matrix_size=32, pad_value=0):\n super().__init__()\n self.matrix_size = matrix_size\n self.pad_value = pad_value\n self.net = net\n\n @torch.no_grad()\n @eval_decorator\n def generate(\n self, start_matrix, eos_token=None, temperature=1.0, filter_thres=0.9, **kwargs\n ):\n b, h, w, device = *start_matrix.shape, start_matrix.device\n\n out = start_matrix\n\n for i in range(h):\n for j in range(w):\n logits = self.net(out, **kwargs)[:, i, j, :]\n filtered_logits = top_k(logits, thres=filter_thres)\n probs = F.softmax(filtered_logits / temperature, dim=-1)\n sample = torch.multinomial(probs, 1)\n out[:, i, j] = sample.squeeze(-1)\n\n return out\n\n def forward(self, x, **kwargs):\n x_inp, x_labels = x[:, :-1, :-1], x[:, 1:, 1:]\n logits = self.net(x_inp, **kwargs)\n return F.cross_entropy(rearrange(logits, \"b c h w -> b h w c\"), x_labels)"
},
{
"identifier": "NonLinearTransformer",
"path": "heptapod/model.py",
"snippet": "class NonLinearTransformer(nn.Module):\n def __init__(self, vocab_size, dim, depth, matrix_dim, window_size=3):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, dim)\n self.blocks = nn.ModuleList(\n [NonLinearTransformerBlock(dim) for _ in range(depth)]\n )\n self.rotary_emb = Rotary2DEmbedding(dim)\n self.local_attn = Local2DAttention(dim, window_size)\n self.to_logits = nn.Linear(dim, vocab_size)\n\n def forward(self, matrix):\n b, h, w = matrix.size()\n matrix = self.embedding(matrix)\n pos_emb = self.rotary_emb(h, w, device=matrix.device)\n\n for block in self.blocks:\n matrix = matrix + block(matrix)\n matrix = matrix + self.local_attn(matrix)\n\n matrix = matrix + pos_emb\n logits = self.to_logits(matrix)\n return logits"
}
] | import gzip
import random
import numpy as np
import torch
import torch.optim as optim
import tqdm
from torch.utils.data import DataLoader, Dataset
from heptapod.at import Autoregressive2DWrapper
from heptapod.model import NonLinearTransformer | 756 |
# Constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# Helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# Instantiate GPT-like decoder model
|
# Constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# Helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# Instantiate GPT-like decoder model | model = NonLinearTransformer(vocab_size=10000, dim=512, depth=6, matrix_dim=5) | 1 | 2023-11-01 06:07:50+00:00 | 2k |
shixiaoyu0216/SAC4IR | sacd/memory/per.py | [
{
"identifier": "LazyMultiStepMemory",
"path": "sacd/memory/base.py",
"snippet": "class LazyMultiStepMemory(LazyMemory):\n\n def __init__(self, capacity, state_shape, device, gamma=0.99,\n multi_step=3):\n super(LazyMultiStepMemory, self).__init__(\n capacity, state_shape, device)\n\n self.gamma = gamma\n self.multi_step = int(multi_step)\n if self.multi_step != 1:\n self.buff = MultiStepBuff(maxlen=self.multi_step)\n\n def append(self, state, action, reward, next_state, done):\n if self.multi_step != 1:\n self.buff.append(state, action, reward)\n\n if self.buff.is_full():\n state, action, reward = self.buff.get(self.gamma)\n self._append(state, action, reward, next_state, done)\n\n if done:\n while not self.buff.is_empty():\n state, action, reward = self.buff.get(self.gamma)\n self._append(state, action, reward, next_state, done)\n else:\n self._append(state, action, reward, next_state, done)"
},
{
"identifier": "SumTree",
"path": "sacd/memory/segment_tree.py",
"snippet": "class SumTree(SegmentTree):\n\n def __init__(self, size):\n super().__init__(size, operator.add, 0.0)\n\n def sum(self, start=0, end=None):\n return self._reduce(start, end)\n\n def find_prefixsum_idx(self, prefixsum):\n assert 0 <= prefixsum <= self.sum() + 1e-5\n idx = 1\n\n while idx < self._size:\n left = 2 * idx\n if self._values[left] > prefixsum:\n idx = left\n else:\n prefixsum -= self._values[left]\n idx = left + 1\n return idx - self._size"
},
{
"identifier": "MinTree",
"path": "sacd/memory/segment_tree.py",
"snippet": "class MinTree(SegmentTree):\n\n def __init__(self, size):\n super().__init__(size, min, float(\"inf\"))\n\n def min(self, start=0, end=None):\n return self._reduce(start, end)"
}
] | import numpy as np
import torch
from .base import LazyMultiStepMemory
from .segment_tree import SumTree, MinTree | 740 |
class LazyPrioritizedMultiStepMemory(LazyMultiStepMemory):
def __init__(self, capacity, state_shape, device, gamma=0.99,
multi_step=3, alpha=0.6, beta=0.4, beta_steps=2e5,
min_pa=0.0, max_pa=1.0, eps=0.01):
super().__init__(capacity, state_shape, device, gamma, multi_step)
self.alpha = alpha
self.beta = beta
self.beta_diff = (1.0 - beta) / beta_steps
self.min_pa = min_pa
self.max_pa = max_pa
self.eps = eps
self._cached = None
it_capacity = 1
while it_capacity < capacity:
it_capacity *= 2
self.it_sum = SumTree(it_capacity)
|
class LazyPrioritizedMultiStepMemory(LazyMultiStepMemory):
def __init__(self, capacity, state_shape, device, gamma=0.99,
multi_step=3, alpha=0.6, beta=0.4, beta_steps=2e5,
min_pa=0.0, max_pa=1.0, eps=0.01):
super().__init__(capacity, state_shape, device, gamma, multi_step)
self.alpha = alpha
self.beta = beta
self.beta_diff = (1.0 - beta) / beta_steps
self.min_pa = min_pa
self.max_pa = max_pa
self.eps = eps
self._cached = None
it_capacity = 1
while it_capacity < capacity:
it_capacity *= 2
self.it_sum = SumTree(it_capacity) | self.it_min = MinTree(it_capacity) | 2 | 2023-11-02 07:35:57+00:00 | 2k |
In-Network-Machine-Learning/QCMP | initiate_rules.py | [
{
"identifier": "init_path_weights",
"path": "q_table.py",
"snippet": "def init_path_weights(p4info_helper, ingress_sw, nhop_dmacs, nhop_ipv4s, ports):\n for i in range(50):\n write_path_weights(p4info_helper, ingress_sw=ingress_sw, value=i,\n nhop_dmac=nhop_dmacs[0], nhop_ipv4=nhop_ipv4s[0], port=ports[0])\n for i in range(50, 100):\n write_path_weights(p4info_helper, ingress_sw=ingress_sw, value=i,\n nhop_dmac=nhop_dmacs[1], nhop_ipv4=nhop_ipv4s[1], port=ports[1])"
},
{
"identifier": "readTableRules",
"path": "q_table.py",
"snippet": "def readTableRules(p4info_helper, sw):\n \"\"\"\n Reads the table entries from all tables on the switch.\n :param p4info_helper: the P4Info helper\n :param sw: the switch connection\n \"\"\"\n print('\\n----- Reading tables rules for %s -----' % sw.name)\n for response in sw.ReadTableEntries():\n for entity in response.entities:\n entry = entity.table_entry\n # TODO For extra credit, you can use the p4info_helper to translate\n # the IDs in the entry to names\n print(entry)\n print('-----')"
}
] | import sys
import argparse
import os
import pandas as pd
import grpc
import p4runtime_lib.bmv2
import p4runtime_lib.helper
from scapy.all import *
from scapy.layers.inet import _IPOption_HDR
from p4runtime_lib.error_utils import printGrpcError
from p4runtime_lib.switch import ShutdownAllSwitchConnections
from q_table import (init_path_weights, readTableRules) | 951 | # This file is part of the Planter extend project: QCMP.
# This program is a free software tool, which does ensemble in-network reinforcement learning for load balancing.
# licensed under Apache-2.0
#
# Utility: This file is used to initiate rules in the q-table
#
# Copyright (c) 2022-2023 Benjamin Rienecker Modified by Changgang Zheng
# Copyright (c) Computing Infrastructure Group, Department of Engineering Science, University of Oxford
#!/usr/bin/env python3
# Import P4Runtime lib from parent utils dir
# Probably there's a better way of doing this.
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../utils/'))
def main(p4info_file_path, bmv2_file_path):
# Instantiate a P4Runtime helper from the p4info file
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
# Create a switch connection object for all switches;
# this is backed by a P4Runtime gRPC connection.
# Also, dump all P4Runtime messages sent to switch to given txt files.
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0,
proto_dump_file='logs/s1-p4runtime-requests.txt')
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1,
proto_dump_file='logs/s2-p4runtime-requests.txt')
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2,
proto_dump_file='logs/s3-p4runtime-requests.txt')
# Send master arbitration update message to establish this controller as
# master (required by P4Runtime before performing any other write operation)
s1.MasterArbitrationUpdate()
s2.MasterArbitrationUpdate()
s3.MasterArbitrationUpdate()
nhop_dmacs = ["00:00:00:00:01:04", "00:00:00:00:01:05"]
nhop_ipv4s = ["10.0.2.0", "10.0.3.0"]
ports = [4, 5]
| # This file is part of the Planter extend project: QCMP.
# This program is a free software tool, which does ensemble in-network reinforcement learning for load balancing.
# licensed under Apache-2.0
#
# Utility: This file is used to initiate rules in the q-table
#
# Copyright (c) 2022-2023 Benjamin Rienecker Modified by Changgang Zheng
# Copyright (c) Computing Infrastructure Group, Department of Engineering Science, University of Oxford
#!/usr/bin/env python3
# Import P4Runtime lib from parent utils dir
# Probably there's a better way of doing this.
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../utils/'))
def main(p4info_file_path, bmv2_file_path):
# Instantiate a P4Runtime helper from the p4info file
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
# Create a switch connection object for all switches;
# this is backed by a P4Runtime gRPC connection.
# Also, dump all P4Runtime messages sent to switch to given txt files.
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0,
proto_dump_file='logs/s1-p4runtime-requests.txt')
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1,
proto_dump_file='logs/s2-p4runtime-requests.txt')
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2,
proto_dump_file='logs/s3-p4runtime-requests.txt')
# Send master arbitration update message to establish this controller as
# master (required by P4Runtime before performing any other write operation)
s1.MasterArbitrationUpdate()
s2.MasterArbitrationUpdate()
s3.MasterArbitrationUpdate()
nhop_dmacs = ["00:00:00:00:01:04", "00:00:00:00:01:05"]
nhop_ipv4s = ["10.0.2.0", "10.0.3.0"]
ports = [4, 5] | init_path_weights(p4info_helper, s1, nhop_dmacs, nhop_ipv4s, ports) | 0 | 2023-11-01 09:37:28+00:00 | 2k |
Fsoft-AIC/LSDM | vis_fitting_results.py | [
{
"identifier": "gen_human_meshes",
"path": "gen_human_meshes.py",
"snippet": "def gen_human_meshes(vertices_path, output_path):\n vertices = np.load(open(vertices_path, \"rb\"))\n # If your input human vertices are full resolution SMPL-X bodies, use mesh_0.obj\n # faces = trimesh.load(os.path.join(\"mesh_ds\", \"mesh_0.obj\"), process=False).faces\n faces = trimesh.load(os.path.join(\"mesh_ds\", \"mesh_2.obj\"), process=False).faces\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n print(\"Saving human meshes to\", output_path)\n for frame in tqdm(range(vertices.shape[0])):\n vertices_frame = vertices[frame]\n mesh = create_o3d_mesh_from_vertices_faces(vertices_frame, faces)\n vertex_colors = np.ones_like(vertices_frame)\n mesh.vertex_colors = o3d.utility.Vector3dVector(vertex_colors)\n o3d.io.write_triangle_mesh(os.path.join(output_path, \"human_\" + str(frame) + \".ply\"), mesh)"
},
{
"identifier": "gen_human_meshes_humanise",
"path": "gen_human_meshes.py",
"snippet": "def gen_human_meshes_humanise(vertices_path, body_faces, output_path):\n vertices = np.load(open(vertices_path, \"rb\"))\n # If your input human vertices are full resolution SMPL-X bodies, use mesh_0.obj\n # faces = trimesh.load(os.path.join(\"mesh_ds\", \"mesh_0.obj\"), process=False).faces\n faces = body_faces\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n print(\"Saving human meshes to\", output_path)\n for frame in tqdm(range(vertices.shape[0])):\n vertices_frame = vertices[frame]\n mesh = create_o3d_mesh_from_vertices_faces(vertices_frame, faces)\n vertex_colors = np.ones_like(vertices_frame)\n mesh.vertex_colors = o3d.utility.Vector3dVector(vertex_colors)\n o3d.io.write_triangle_mesh(os.path.join(output_path, \"human_\" + str(frame) + \".ply\"), mesh)"
}
] | import os
import numpy as np
import argparse
import open3d as o3d
import json
from pathlib import Path
from gen_human_meshes import gen_human_meshes, gen_human_meshes_humanise
from tqdm import tqdm | 723 |
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("--fitting_results_path", type=str, help="Path to the fitting results of some motion sequence")
parser.add_argument("--vertices_path", type=str, help="Path to human vertices of some motion sequence")
parser.add_argument("--datatype", default="PROXD", type=str, help="Data type")
args = parser.parse_args()
input_dir = Path(args.fitting_results_path)
vertices_path = Path(args.vertices_path)
seq_name = input_dir.stem
# Check if human meshes are there
human_mesh_dir = input_dir / 'human' / 'mesh'
if not human_mesh_dir.exists():
human_mesh_dir.mkdir()
if args.datatype == "PROXD":
gen_human_meshes(vertices_path=vertices_path, output_path=human_mesh_dir)
else:
body_faces = np.load(open(args.vertices_path[:-4] + "_faces.npy", "rb"))
|
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("--fitting_results_path", type=str, help="Path to the fitting results of some motion sequence")
parser.add_argument("--vertices_path", type=str, help="Path to human vertices of some motion sequence")
parser.add_argument("--datatype", default="PROXD", type=str, help="Data type")
args = parser.parse_args()
input_dir = Path(args.fitting_results_path)
vertices_path = Path(args.vertices_path)
seq_name = input_dir.stem
# Check if human meshes are there
human_mesh_dir = input_dir / 'human' / 'mesh'
if not human_mesh_dir.exists():
human_mesh_dir.mkdir()
if args.datatype == "PROXD":
gen_human_meshes(vertices_path=vertices_path, output_path=human_mesh_dir)
else:
body_faces = np.load(open(args.vertices_path[:-4] + "_faces.npy", "rb")) | gen_human_meshes_humanise(vertices_path, body_faces, output_path=human_mesh_dir) | 1 | 2023-11-06 07:55:51+00:00 | 2k |
molML/traversing_chem_space | active_learning/data_prep.py | [
{
"identifier": "molecular_graph_featurizer",
"path": "active_learning/utils.py",
"snippet": "def molecular_graph_featurizer(smiles: str, y=None, structural_feats: bool = True, functional_feats: bool = True):\n\n y = torch.tensor([y]).to(torch.long)\n\n mol = Chem.MolFromSmiles(smiles, sanitize=True)\n Chem.AssignStereochemistry(mol, cleanIt=True, force=True)\n\n # RDKIT Atom featurization\n x = atom_featurizer(mol, structural_feats, functional_feats)\n\n # Edge featurization\n edge_indices, edge_attrs = [], []\n for bond in mol.GetBonds():\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n\n edge_indices += [[i, j], [j, i]]\n\n edge_index = torch.tensor(edge_indices)\n edge_index = edge_index.t().to(torch.long).view(2, -1)\n\n # Sort indices.\n if edge_index.numel() > 0:\n perm = (edge_index[0] * x.size(0) + edge_index[1]).argsort()\n edge_index = edge_index[:, perm]\n\n if torch.isnan(x).any():\n return smiles\n # raise ValueError(f\"Featurizing {smiles} gave nan(s)\")\n\n graph = Data(x=x, edge_index=edge_index, smiles=smiles, y=y)\n\n return graph"
},
{
"identifier": "smiles_to_ecfp",
"path": "active_learning/utils.py",
"snippet": "def smiles_to_ecfp(smiles: list[str], radius: int = 2, nbits: int = 1024, silent: bool = True, to_array: bool = True) \\\n -> np.ndarray:\n \"\"\" Get a Numpy array of ECFPs from a list of SMILES strings \"\"\"\n from rdkit.Chem.AllChem import GetMorganFingerprintAsBitVect\n from rdkit.Chem import MolFromSmiles\n from rdkit.DataStructs import ConvertToNumpyArray\n\n if type(smiles) is str:\n smiles = [smiles]\n\n fp = [GetMorganFingerprintAsBitVect(MolFromSmiles(s), radius, nBits=nbits) for s in tqdm(smiles, disable=silent)]\n\n if not to_array:\n return fp\n\n output = []\n for f in fp:\n arr = np.zeros((1,))\n ConvertToNumpyArray(f, arr)\n output.append(arr)\n\n return np.asarray(output)"
},
{
"identifier": "get_tanimoto_matrix",
"path": "active_learning/utils.py",
"snippet": "def get_tanimoto_matrix(smiles: list[str], radius: int = 2, nBits: int = 1024, verbose: bool = True,\n scaffolds: bool = False, zero_diag: bool = True, as_vector: bool = False):\n \"\"\" Calculates a matrix of Tanimoto similarity scores for a list of SMILES string\"\"\"\n from active_learning.data_prep import smi_to_scaff\n\n # Make a fingerprint database\n db_fp = {}\n for smi in smiles:\n if scaffolds:\n m = Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False))\n else:\n m = Chem.MolFromSmiles(smi)\n fp = AllChem.GetMorganFingerprintAsBitVect(m, radius=radius, nBits=nBits)\n db_fp[smi] = fp\n\n smi_len = len(smiles)\n m = np.zeros([smi_len, smi_len], dtype=np.float16) # We use 16-bit floats to prevent giant matrices\n # Calculate upper triangle of matrix\n for i in tqdm(range(smi_len), disable=not verbose):\n for j in range(i, smi_len):\n m[i, j] = DataStructs.TanimotoSimilarity(db_fp[smiles[i]], db_fp[smiles[j]])\n # Fill in the lower triangle without having to loop (saves ~50% of time)\n m = m + m.T - np.diag(np.diag(m))\n # Fill the diagonal with 0's\n if zero_diag:\n np.fill_diagonal(m, 0)\n if as_vector:\n from scipy.spatial.distance import squareform\n m = squareform(m)\n\n return m"
},
{
"identifier": "check_featurizability",
"path": "active_learning/utils.py",
"snippet": "def check_featurizability(smiles: str):\n try:\n mol = Chem.MolFromSmiles(smiles, sanitize=True)\n Chem.AssignStereochemistry(mol, cleanIt=True, force=True)\n\n for atom in mol.GetAtoms():\n try:\n x_ = atom_props(atom)\n except:\n return False\n except:\n return False\n\n return True"
},
{
"identifier": "ROOT_DIR",
"path": "config.py",
"snippet": "ROOT_DIR = os.path.realpath(os.path.dirname(__file__))"
}
] | from active_learning.utils import molecular_graph_featurizer as smiles_to_graph
from active_learning.utils import smiles_to_ecfp, get_tanimoto_matrix, check_featurizability
from collections import OrderedDict
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit import Chem
from tqdm import tqdm
from typing import Any
from config import ROOT_DIR
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import torch
import os
import sys
import h5py
import h5py
import h5py | 1,588 |
def canonicalize(smiles: str, sanitize: bool = True):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles, sanitize=sanitize))
def get_data(random_state: int = 42, dataset: str = 'ALDH1'):
# read smiles from file and canonicalize them
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/inactives.smi')) as f:
inactives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/actives.smi')) as f:
actives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
# remove duplicates:
inactives = list(set(inactives))
actives = list(set(actives))
# remove intersecting molecules:
intersecting_mols = np.intersect1d(inactives, actives)
inactives = [smi for smi in inactives if smi not in intersecting_mols]
actives = [smi for smi in actives if smi not in intersecting_mols]
# remove molecules that have scaffolds that cannot be kekulized or featurized
inactives_, actives_ = [], []
for smi in tqdm(actives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
|
def canonicalize(smiles: str, sanitize: bool = True):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles, sanitize=sanitize))
def get_data(random_state: int = 42, dataset: str = 'ALDH1'):
# read smiles from file and canonicalize them
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/inactives.smi')) as f:
inactives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/actives.smi')) as f:
actives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
# remove duplicates:
inactives = list(set(inactives))
actives = list(set(actives))
# remove intersecting molecules:
intersecting_mols = np.intersect1d(inactives, actives)
inactives = [smi for smi in inactives if smi not in intersecting_mols]
actives = [smi for smi in actives if smi not in intersecting_mols]
# remove molecules that have scaffolds that cannot be kekulized or featurized
inactives_, actives_ = [], []
for smi in tqdm(actives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None: | if check_featurizability(smi): | 3 | 2023-11-10 08:53:40+00:00 | 2k |
yunik1004/SAiD | script/render.py | [
{
"identifier": "load_mesh",
"path": "said/util/mesh.py",
"snippet": "def load_mesh(mesh_path: str) -> trimesh.Trimesh:\n \"\"\"Load the mesh\n\n Parameters\n ----------\n filepath : str\n Path of the mesh file\n\n Returns\n -------\n trimesh.Trimesh\n Mesh object\n \"\"\"\n mesh = trimesh.load(mesh_path, process=False, maintain_order=True)\n return mesh"
},
{
"identifier": "parse_list",
"path": "said/util/parser.py",
"snippet": "def parse_list(file_path: str, typecast_func: Callable[[str], T]) -> List[T]:\n \"\"\"Parse the file into the list\n\n Parameters\n ----------\n file_path : str\n Path of the file\n typecast_func : Callable[[str], T]\n Type-casting function\n\n Returns\n -------\n List[T]\n Output list\n \"\"\"\n info_list = []\n with open(file_path, \"r\") as f:\n info_list = [typecast_func(line.strip()) for line in f.readlines()]\n\n return info_list"
},
{
"identifier": "load_blendshape_coeffs",
"path": "said/util/blendshape.py",
"snippet": "def load_blendshape_coeffs(coeffs_path: str) -> torch.FloatTensor:\n \"\"\"Load the blendshape coefficients file\n\n Parameters\n ----------\n coeffs_path : str\n Path of the blendshape coefficients file (csv format)\n\n Returns\n -------\n torch.FloatTensor\n (T_b, num_classes), Blendshape coefficients\n \"\"\"\n df = pd.read_csv(coeffs_path)\n coeffs = torch.FloatTensor(df.values)\n return coeffs"
}
] | import argparse
import os
import pathlib
import cv2
import numpy as np
from moviepy import editor as mpy
from said.util.mesh import load_mesh
from said.util.parser import parse_list
from said.util.blendshape import load_blendshape_coeffs
from rendering.render_visual import RendererObject, render_blendshape_coefficients | 1,256 | """Render the animation
"""
os.environ["PYOPENGL_PLATFORM"] = "egl"
def main() -> None:
"""Main function"""
default_data_dir = pathlib.Path(__file__).resolve().parent.parent / "data"
# Arguments
parser = argparse.ArgumentParser(description="Render the animation")
parser.add_argument(
"--neutral_path",
type=str,
default="../BlendVOCA/templates_head/FaceTalk_170731_00024_TA.obj",
help="Path of the neutral mesh",
)
parser.add_argument(
"--blendshapes_dir",
type=str,
default="../BlendVOCA/blendshapes_head/FaceTalk_170731_00024_TA",
help="Directory of the blendshape meshes",
)
parser.add_argument(
"--audio_path",
type=str,
default="../BlendVOCA/audio/FaceTalk_170731_00024_TA/sentence01.wav",
help="Path of the audio file",
)
parser.add_argument(
"--blendshape_coeffs_path",
type=str,
default="../BlendVOCA/blendshape_coeffs/FaceTalk_170731_00024_TA/sentence01.csv",
help="Path of the blendshape coefficient sequence",
)
parser.add_argument(
"--blendshape_list_path",
type=str,
default=default_data_dir / "ARKit_blendshapes.txt",
help="List of the blendshapes",
)
parser.add_argument(
"--show_difference",
type=bool,
default=False,
help="Show the vertex differences from the target blendshape coefficients as a heatmap",
)
parser.add_argument(
"--target_diff_blendshape_coeffs_path",
type=str,
default="../BlendVOCA/blendshape_coeffs/FaceTalk_170731_00024_TA/sentence01.csv",
help="Path of the target blendshape coefficient sequence to compute the vertex differences. Its length should be same as the source's.",
)
parser.add_argument(
"--max_diff",
type=float,
default=0.001,
help="Maximum threshold to visualize the vertex differences",
)
parser.add_argument(
"--fps",
type=int,
default=60,
help="FPS of the blendshape coefficients sequence",
)
parser.add_argument(
"--output_path",
type=str,
default="../out.mp4",
help="Path of the output video file",
)
parser.add_argument(
"--save_images",
type=bool,
default=False,
help="Save the image for each frame",
)
parser.add_argument(
"--output_images_dir",
type=str,
default="../out_imgs",
help="Saving directory of the output image for each frame",
)
args = parser.parse_args()
neutral_path = args.neutral_path
blendshapes_dir = args.blendshapes_dir
audio_path = args.audio_path
blendshape_coeffs_path = args.blendshape_coeffs_path
blendshape_list_path = args.blendshape_list_path
show_difference = args.show_difference
target_diff_blendshape_coeffs_path = args.target_diff_blendshape_coeffs_path
max_diff = args.max_diff
fps = args.fps
output_path = args.output_path
save_images = args.save_images
output_images_dir = args.output_images_dir
blendshape_name_list = parse_list(blendshape_list_path, str)
# Create renderer
renderer = RendererObject()
neutral_mesh = load_mesh(neutral_path)
blendshape_vectors = []
for bl_name in blendshape_name_list:
bl_path = os.path.join(blendshapes_dir, f"{bl_name}.obj")
bl_mesh = load_mesh(bl_path)
blendshape_vectors.append(bl_mesh.vertices.reshape((-1, 1)))
blendshapes_matrix = np.concatenate(blendshape_vectors, axis=1)
| """Render the animation
"""
os.environ["PYOPENGL_PLATFORM"] = "egl"
def main() -> None:
"""Main function"""
default_data_dir = pathlib.Path(__file__).resolve().parent.parent / "data"
# Arguments
parser = argparse.ArgumentParser(description="Render the animation")
parser.add_argument(
"--neutral_path",
type=str,
default="../BlendVOCA/templates_head/FaceTalk_170731_00024_TA.obj",
help="Path of the neutral mesh",
)
parser.add_argument(
"--blendshapes_dir",
type=str,
default="../BlendVOCA/blendshapes_head/FaceTalk_170731_00024_TA",
help="Directory of the blendshape meshes",
)
parser.add_argument(
"--audio_path",
type=str,
default="../BlendVOCA/audio/FaceTalk_170731_00024_TA/sentence01.wav",
help="Path of the audio file",
)
parser.add_argument(
"--blendshape_coeffs_path",
type=str,
default="../BlendVOCA/blendshape_coeffs/FaceTalk_170731_00024_TA/sentence01.csv",
help="Path of the blendshape coefficient sequence",
)
parser.add_argument(
"--blendshape_list_path",
type=str,
default=default_data_dir / "ARKit_blendshapes.txt",
help="List of the blendshapes",
)
parser.add_argument(
"--show_difference",
type=bool,
default=False,
help="Show the vertex differences from the target blendshape coefficients as a heatmap",
)
parser.add_argument(
"--target_diff_blendshape_coeffs_path",
type=str,
default="../BlendVOCA/blendshape_coeffs/FaceTalk_170731_00024_TA/sentence01.csv",
help="Path of the target blendshape coefficient sequence to compute the vertex differences. Its length should be same as the source's.",
)
parser.add_argument(
"--max_diff",
type=float,
default=0.001,
help="Maximum threshold to visualize the vertex differences",
)
parser.add_argument(
"--fps",
type=int,
default=60,
help="FPS of the blendshape coefficients sequence",
)
parser.add_argument(
"--output_path",
type=str,
default="../out.mp4",
help="Path of the output video file",
)
parser.add_argument(
"--save_images",
type=bool,
default=False,
help="Save the image for each frame",
)
parser.add_argument(
"--output_images_dir",
type=str,
default="../out_imgs",
help="Saving directory of the output image for each frame",
)
args = parser.parse_args()
neutral_path = args.neutral_path
blendshapes_dir = args.blendshapes_dir
audio_path = args.audio_path
blendshape_coeffs_path = args.blendshape_coeffs_path
blendshape_list_path = args.blendshape_list_path
show_difference = args.show_difference
target_diff_blendshape_coeffs_path = args.target_diff_blendshape_coeffs_path
max_diff = args.max_diff
fps = args.fps
output_path = args.output_path
save_images = args.save_images
output_images_dir = args.output_images_dir
blendshape_name_list = parse_list(blendshape_list_path, str)
# Create renderer
renderer = RendererObject()
neutral_mesh = load_mesh(neutral_path)
blendshape_vectors = []
for bl_name in blendshape_name_list:
bl_path = os.path.join(blendshapes_dir, f"{bl_name}.obj")
bl_mesh = load_mesh(bl_path)
blendshape_vectors.append(bl_mesh.vertices.reshape((-1, 1)))
blendshapes_matrix = np.concatenate(blendshape_vectors, axis=1) | blendshape_coeffs = load_blendshape_coeffs(blendshape_coeffs_path).numpy() | 2 | 2023-11-03 06:38:51+00:00 | 2k |
Subsets and Splits