repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
generative-skill-chaining/gsc-code | generative_skill_chaining/envs/pybullet/table/objects.py | [
{
"identifier": "body",
"path": "generative_skill_chaining/envs/pybullet/sim/body.py",
"snippet": "class Body:\nclass Link:\n def aabb(self) -> np.ndarray:\n def pose(self) -> math.Pose:\n def set_pose(self, pose: math.Pose) -> None:\n def twist(self) -> np.ndarray:\n def dof(self) -> int:\n def inertia(self) -> dyn.SpatialInertiad:\n def freeze(self) -> bool:\n def unfreeze(self) -> bool:\n def name(self) -> str:\n def pose(self) -> math.Pose:\n def inertia(self) -> str:\n def joint_limits(self) -> np.ndarray:"
},
{
"identifier": "math",
"path": "generative_skill_chaining/envs/pybullet/sim/math.py",
"snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:"
},
{
"identifier": "shapes",
"path": "generative_skill_chaining/envs/pybullet/sim/shapes.py",
"snippet": "def create_body(\n shapes: Union[\"Shape\", Sequence[\"Shape\"]],\n link_parents: Optional[Sequence[int]] = None,\n physics_id: int = 0,\n) -> int:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n def create_visual(self, physics_id: int, is_base: bool = False) -> Tuple[int, int]:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\nclass JointType(enum.IntEnum):\nclass Joint:\nclass Shape:\nclass Box(Shape):\nclass Cylinder(Shape):\nclass Sphere(Shape):\n REVOLUTE = p.JOINT_REVOLUTE\n PRISMATIC = p.JOINT_PRISMATIC\n SPHERICAL = p.JOINT_SPHERICAL\n FIXED = p.JOINT_FIXED"
},
{
"identifier": "object_state",
"path": "generative_skill_chaining/envs/pybullet/table/object_state.py",
"snippet": "class ObjectState:\n RANGES = {\n \"x\": (-0.3, 0.9),\n \"y\": (-0.5, 0.5),\n \"z\": (-0.1, 0.5),\n \"wx\": (-np.pi, np.pi),\n \"wy\": (-np.pi, np.pi),\n \"wz\": (-np.pi, np.pi),\n \"box_size_x\": (0.0, 0.1),\n \"box_size_y\": (0.0, 0.1),\n \"box_size_z\": (0.0, 0.2),\n \"head_length\": (0.0, 0.3),\n \"handle_length\": (0.0, 0.5),\n \"handle_y\": (-1.0, 1.0),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def aa(self) -> np.ndarray:\n def aa(self, aa: np.ndarray) -> None:\n def box_size(self) -> np.ndarray:\n def box_size(self, box_size: np.ndarray) -> None:\n def head_length(self) -> Union[float, np.ndarray]:\n def head_length(self, head_length: Union[float, np.ndarray]) -> None:\n def handle_length(self) -> Union[float, np.ndarray]:\n def handle_length(self, handle_length: Union[float, np.ndarray]) -> None:\n def handle_y(self) -> Union[float, np.ndarray]:\n def handle_y(self, handle_y: Union[float, np.ndarray]) -> None:\n def range(cls) -> np.ndarray:\n def pose(self) -> math.Pose:\n def set_pose(self, pose: math.Pose) -> None:\n def __repr__(self) -> str:"
}
] | import dataclasses
import itertools
import random
import numpy as np
import pybullet as p
import spatialdyn as dyn
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Type, Union
from ctrlutils import eigen
from generative_skill_chaining.envs.pybullet.sim import body, math, shapes
from generative_skill_chaining.envs.pybullet.table import object_state
from generative_skill_chaining.envs.pybullet.table.utils import load_config
from generative_skill_chaining.envs.pybullet.table.utils import load_config | 2,762 | self.body_id, link_id, 0, 0, physicsClientId=self.physics_id
)
def enable_collisions(self) -> None:
for link_id in range(self.dof):
p.setCollisionFilterGroupMask(
self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id
)
@property
def inertia(self) -> dyn.SpatialInertiad:
try:
return self._obj_inertia # type: ignore
except AttributeError:
pass
self._obj_inertia = super().inertia
if self._modified_axes:
self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj
T_world_to_obj = self.pose().to_eigen().inverse()
for link_id in range(self.dof):
link = body.Link(self.physics_id, self.body_id, link_id)
T_link_to_obj = T_world_to_obj * link.pose().to_eigen()
self._obj_inertia += link.inertia * T_link_to_obj
return self._obj_inertia
def state(self) -> object_state.ObjectState:
pose = self.pose()
aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))
self._state.pos = pose.pos
self._state.aa = aa.angle * aa.axis
return self._state
def set_state(self, state: object_state.ObjectState) -> None:
self.set_pose(state.pose())
def reset(self, action_skeleton: List) -> None:
pass
@classmethod
def create(
cls,
physics_id: int,
object_type: Optional[str],
object_kwargs: Dict[str, Any] = {},
object_groups: Dict[str, "ObjectGroup"] = {},
**kwargs,
) -> "Object":
object_class = Null if object_type is None else globals()[object_type]
if issubclass(object_class, Variant):
kwargs["object_groups"] = object_groups
object_kwargs = object_kwargs.copy()
object_kwargs.update(kwargs)
return object_class(physics_id=physics_id, **object_kwargs)
def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:
return isinstance(self, class_or_tuple)
def type(self) -> Type["Object"]:
return type(self)
@property
def size(self) -> np.ndarray:
raise NotImplementedError
@property
def bbox(self) -> np.ndarray:
"""Returns the bounding box in the object frame.
If the origin of the object is at its geometric center, this will be
equivalent to `(-0.5 * self.size, 0.5 * self.size)`.
Returns:
An array of shape [2, 3] (min/max, x/y/z).
"""
raise NotImplementedError
def convex_hulls(
self, world_frame: bool = True, project_2d: bool = False
) -> List[np.ndarray]:
"""Computes the object's convex hull.
These hulls will be used for rough collision checking. By default,
the vertices will be the 6 corners of the object's bounding box
(`Object.bbox`).
Args:
world_frame: Whether to transform the vertices in world frame or
leave them in object frame.
project_2d: Whether to return the 2d convex hull.
Returns:
List of arrays of shape [_, 3] or [_, 2], where each array is a
convex hull.
"""
pose = self.pose() if world_frame else None
vertices = compute_bbox_vertices(self.bbox, pose, project_2d)
return [vertices]
def aabb(self) -> np.ndarray:
"""Computes the axis-aligned bounding box from the object pose and size.
This should be more accurate than `super().aabb()`, which gets the aabb
from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*
link, while this returns the exact aabb for the entire object.
Returns:
An array of shape [2, 3] (min/max, x/y/z).
"""
vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)
xyz_min = vertices.min(axis=0)
xyz_max = vertices.max(axis=0)
return np.array([xyz_min, xyz_max])
@property
|
OBJECT_HIERARCHY = ["rack", "table", "hook", "box"]
def compute_bbox_vertices(
bbox: np.ndarray, pose: Optional[math.Pose] = None, project_2d: bool = False
) -> np.ndarray:
"""Computes the vertices of the given 3D bounding box.
Args:
bbox: Array of shape [2, 3] (min/max, x/y/z).
pose: Optional pose to transform the vertices.
project_2d: Whether to return 2D vertices or 3D vertices.
Returns:
Array of shape [6, 3] for 3D or [4, 2] for 2D.
"""
xs, ys, zs = bbox.T
if project_2d:
# 2D box with vertices in clockwise order.
vertices = np.array(
[[xs[0], ys[0]], [xs[0], ys[1]], [xs[1], ys[1]], [xs[1], ys[0]]]
)
if pose is not None:
vertices = np.concatenate(
(vertices, np.tile([zs.mean(), 1.0], (vertices.shape[0], 1))), axis=1
)
vertices = (vertices @ pose.to_eigen().matrix.T)[:, :2]
else:
# 3D box.
vertices = np.array(list(itertools.product(xs, ys, zs, [1.0])))
if pose is not None:
vertices = vertices @ pose.to_eigen().matrix.T
vertices = vertices[:, :3]
return vertices
@dataclasses.dataclass
class Object(body.Body):
name: str
is_static: bool = False
def __init__(
self, physics_id: int, body_id: int, name: str, is_static: bool = False
):
super().__init__(physics_id, body_id)
self.name = name
self.is_static = is_static
T_pybullet_to_obj = super().pose().to_eigen()
self._modified_axes = not T_pybullet_to_obj.is_approx(
eigen.Isometry3d.identity()
)
if self._modified_axes:
self._T_pybullet_to_obj = T_pybullet_to_obj
self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()
self._state = object_state.ObjectState()
def pose(self) -> math.Pose:
if not self._modified_axes:
return super().pose()
return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)
def set_pose(self, pose: math.Pose) -> None:
if not self._modified_axes:
return super().set_pose(pose)
return super().set_pose(
math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)
)
def disable_collisions(self) -> None:
for link_id in range(self.dof):
p.setCollisionFilterGroupMask(
self.body_id, link_id, 0, 0, physicsClientId=self.physics_id
)
def enable_collisions(self) -> None:
for link_id in range(self.dof):
p.setCollisionFilterGroupMask(
self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id
)
@property
def inertia(self) -> dyn.SpatialInertiad:
try:
return self._obj_inertia # type: ignore
except AttributeError:
pass
self._obj_inertia = super().inertia
if self._modified_axes:
self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj
T_world_to_obj = self.pose().to_eigen().inverse()
for link_id in range(self.dof):
link = body.Link(self.physics_id, self.body_id, link_id)
T_link_to_obj = T_world_to_obj * link.pose().to_eigen()
self._obj_inertia += link.inertia * T_link_to_obj
return self._obj_inertia
def state(self) -> object_state.ObjectState:
pose = self.pose()
aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))
self._state.pos = pose.pos
self._state.aa = aa.angle * aa.axis
return self._state
def set_state(self, state: object_state.ObjectState) -> None:
self.set_pose(state.pose())
def reset(self, action_skeleton: List) -> None:
pass
@classmethod
def create(
cls,
physics_id: int,
object_type: Optional[str],
object_kwargs: Dict[str, Any] = {},
object_groups: Dict[str, "ObjectGroup"] = {},
**kwargs,
) -> "Object":
object_class = Null if object_type is None else globals()[object_type]
if issubclass(object_class, Variant):
kwargs["object_groups"] = object_groups
object_kwargs = object_kwargs.copy()
object_kwargs.update(kwargs)
return object_class(physics_id=physics_id, **object_kwargs)
def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:
return isinstance(self, class_or_tuple)
def type(self) -> Type["Object"]:
return type(self)
@property
def size(self) -> np.ndarray:
raise NotImplementedError
@property
def bbox(self) -> np.ndarray:
"""Returns the bounding box in the object frame.
If the origin of the object is at its geometric center, this will be
equivalent to `(-0.5 * self.size, 0.5 * self.size)`.
Returns:
An array of shape [2, 3] (min/max, x/y/z).
"""
raise NotImplementedError
def convex_hulls(
self, world_frame: bool = True, project_2d: bool = False
) -> List[np.ndarray]:
"""Computes the object's convex hull.
These hulls will be used for rough collision checking. By default,
the vertices will be the 6 corners of the object's bounding box
(`Object.bbox`).
Args:
world_frame: Whether to transform the vertices in world frame or
leave them in object frame.
project_2d: Whether to return the 2d convex hull.
Returns:
List of arrays of shape [_, 3] or [_, 2], where each array is a
convex hull.
"""
pose = self.pose() if world_frame else None
vertices = compute_bbox_vertices(self.bbox, pose, project_2d)
return [vertices]
def aabb(self) -> np.ndarray:
"""Computes the axis-aligned bounding box from the object pose and size.
This should be more accurate than `super().aabb()`, which gets the aabb
from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*
link, while this returns the exact aabb for the entire object.
Returns:
An array of shape [2, 3] (min/max, x/y/z).
"""
vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)
xyz_min = vertices.min(axis=0)
xyz_max = vertices.max(axis=0)
return np.array([xyz_min, xyz_max])
@property | def shapes(self) -> Sequence[shapes.Shape]: | 2 | 2023-10-16 00:22:40+00:00 | 4k |
ChiyuSONG/dynamics-of-instruction-tuning | evaluate/pred.py | [
{
"identifier": "Assistant",
"path": "inference.py",
"snippet": "class Assistant:\n def __init__(self, model_name_or_path):\n tokenizer = LlamaTokenizer.from_pretrained(model_name_or_path)\n tokenizer.padding_side = \"left\"\n tokenizer.user_token_id, tokenizer.assistant_token_id, tokenizer.eot_token_id \\\n = tokenizer.convert_tokens_to_ids(ATTR_TO_SPECIAL_TOKEN[\"additional_special_tokens\"])\n model = LlamaForCausalLM.from_pretrained(model_name_or_path, device_map=\"auto\")\n model.tie_weights()\n model.eval()\n self.tokenizer = tokenizer\n self.model = model\n self.seed = 0\n # use greedy decoding as default\n self.config = GenerationConfig(\n max_new_tokens=1024,\n min_length=1,\n do_sample=False,\n output_scores=True,\n return_dict_in_generate=True,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=[tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.unk_token_id,\n tokenizer.eot_token_id, tokenizer.user_token_id, tokenizer.assistant_token_id],\n )\n set_seed(self.seed)\n\n def inference(self, batch):\n processed = process(batch, tokenizer=self.tokenizer)\n data_collator = DataCollatorForSupervisedDataset(tokenizer=self.tokenizer, pad_to_multiple_of=8)\n inputs = data_collator(processed)\n for key in inputs:\n inputs[key] = inputs[key].to(\"cuda\")\n outputs = self.model.generate(\n **inputs,\n generation_config = self.config\n )\n scores = outputs.scores[-1]\n sequences = self.tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)\n prefix = self.tokenizer.batch_decode(inputs[\"input_ids\"], skip_special_tokens=True)\n responses = [sequences[i][len(prefix[i]) : ].strip() for i in range(len(sequences))]\n return responses, scores"
},
{
"identifier": "IGNORE_INDEX",
"path": "train_sft.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "DataCollatorForSupervisedDataset",
"path": "train_sft.py",
"snippet": "class DataCollatorForSupervisedDataset(object):\n \"\"\"Collate examples for supervised fine-tuning.\"\"\"\n\n tokenizer: transformers.PreTrainedTokenizer\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:\n input_ids, labels, attention_mask = tuple([instance[key] for instance in instances] for key in (\"input_ids\", \"labels\", \"attention_mask\"))\n\n max_label_length = max(len(l) for l in labels)\n if self.pad_to_multiple_of is not None:\n max_label_length = (\n (max_label_length + self.pad_to_multiple_of - 1)\n // self.pad_to_multiple_of\n * self.pad_to_multiple_of\n )\n input_ids = self.pad_sequence(input_ids, self.tokenizer.pad_token_id, max_label_length)\n labels = self.pad_sequence(labels, IGNORE_INDEX, max_label_length)\n attention_mask = self.pad_sequence(attention_mask, 0, max_label_length)\n\n return dict(\n input_ids=input_ids,\n labels=labels,\n attention_mask=attention_mask,\n )\n\n def pad_sequence(self, feature, padding_value, max_label_length):\n for idx, instance in enumerate(feature):\n remainder = torch.LongTensor( [padding_value] * (max_label_length - len(instance)) )\n feature[idx] = torch.cat((instance, remainder), 0) if self.tokenizer.padding_side == \"right\" \\\n else torch.cat((remainder, instance), 0)\n return torch.stack(feature, dim = 0)"
}
] | import os
import sys
import torch
import json
import jsonlines
import copy
from pathlib import Path
from argparse import ArgumentParser
from tqdm import tqdm
from inference import Assistant
from train_sft import IGNORE_INDEX, DataCollatorForSupervisedDataset | 1,683 | sys.path.append(".")
def process(example, tokenizer):
processed = []
user = tokenizer.user_token_id
assistant = tokenizer.assistant_token_id
eot = tokenizer.eot_token_id
def tokenize(s):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s.strip()))
for choice in example["choices"]:
input_ids = []
labels = []
messages = copy.deepcopy(example["messages"])[:-1]
for message in messages:
input_ids.append(user if message["role"] == "user" else assistant)
labels.append(IGNORE_INDEX)
content = tokenize(message["content"]) + [eot]
input_ids.extend(content)
labels.extend([IGNORE_INDEX] * len(content))
input_ids.append(assistant)
labels.append(IGNORE_INDEX)
content = tokenize(choice) + [eot]
input_ids.extend(content)
labels.extend(content)
input_ids = input_ids[:2048]
labels = labels[:2048]
assert len(input_ids) == len(labels)
attention_mask = [1] * len(input_ids)
processed.append({'input_ids': torch.LongTensor(input_ids), 'labels': torch.LongTensor(labels),
'attention_mask': torch.LongTensor(attention_mask)})
return processed
def main():
parser = ArgumentParser()
parser.add_argument(
"--model_name_or_path", type=str, default="runs/runs-7b/curated-160/20231017-2215/checkpoint-33"
)
parser.add_argument(
"--eval_data_path", type=str, default="data/curated/valid"
)
args = parser.parse_args()
assistant = Assistant(args.model_name_or_path)
path = Path(args.eval_data_path)
data_files = [os.path.join(path, file.name) for file in path.glob("*.json")]
for data_file in data_files:
dir_name = os.path.dirname(data_file)
file_name = os.path.basename(data_file)
input_path = os.path.join(dir_name, file_name)
base, ckpname = os.path.split(args.model_name_or_path)
base, timestamp = os.path.split(base)
base, model_type = os.path.split(base)
base, model_sz = os.path.split(base)
assert "runs-" in model_sz
model_sz = model_sz.replace("runs-", "")
output_path = os.path.join("evaluate", "pred-data", os.path.split(dir_name)[-1], model_sz, model_type, ckpname, "pred_"+file_name)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
data = []
with open(input_path, 'r', encoding='utf8') as f:
for line in f:
data.append(json.loads(line))
for sample in tqdm(data):
if sample["question_format"] == 0:
test_sample = copy.deepcopy(sample)
test_sample["messages"] = test_sample["messages"][:-1]
responses, scores = assistant.inference([test_sample])
generated_response = responses[0] # string
generated_score = scores[0].tolist() # |V|
assert "generated_response" not in sample
sample["generated_response"] = generated_response
assert "generated_score" not in sample
sample["generated_score"] = generated_score
with jsonlines.open(output_path, mode="a") as f:
f.write(sample)
elif sample["question_format"] == 1:
assert "choices" in sample
assert len(sample["choices"]) == 3
assert "generated_ppl" not in sample
tokenizer = assistant.tokenizer
model = assistant.model
processed_samples = process(sample, tokenizer)
assert len(processed_samples) == 3
| sys.path.append(".")
def process(example, tokenizer):
processed = []
user = tokenizer.user_token_id
assistant = tokenizer.assistant_token_id
eot = tokenizer.eot_token_id
def tokenize(s):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s.strip()))
for choice in example["choices"]:
input_ids = []
labels = []
messages = copy.deepcopy(example["messages"])[:-1]
for message in messages:
input_ids.append(user if message["role"] == "user" else assistant)
labels.append(IGNORE_INDEX)
content = tokenize(message["content"]) + [eot]
input_ids.extend(content)
labels.extend([IGNORE_INDEX] * len(content))
input_ids.append(assistant)
labels.append(IGNORE_INDEX)
content = tokenize(choice) + [eot]
input_ids.extend(content)
labels.extend(content)
input_ids = input_ids[:2048]
labels = labels[:2048]
assert len(input_ids) == len(labels)
attention_mask = [1] * len(input_ids)
processed.append({'input_ids': torch.LongTensor(input_ids), 'labels': torch.LongTensor(labels),
'attention_mask': torch.LongTensor(attention_mask)})
return processed
def main():
parser = ArgumentParser()
parser.add_argument(
"--model_name_or_path", type=str, default="runs/runs-7b/curated-160/20231017-2215/checkpoint-33"
)
parser.add_argument(
"--eval_data_path", type=str, default="data/curated/valid"
)
args = parser.parse_args()
assistant = Assistant(args.model_name_or_path)
path = Path(args.eval_data_path)
data_files = [os.path.join(path, file.name) for file in path.glob("*.json")]
for data_file in data_files:
dir_name = os.path.dirname(data_file)
file_name = os.path.basename(data_file)
input_path = os.path.join(dir_name, file_name)
base, ckpname = os.path.split(args.model_name_or_path)
base, timestamp = os.path.split(base)
base, model_type = os.path.split(base)
base, model_sz = os.path.split(base)
assert "runs-" in model_sz
model_sz = model_sz.replace("runs-", "")
output_path = os.path.join("evaluate", "pred-data", os.path.split(dir_name)[-1], model_sz, model_type, ckpname, "pred_"+file_name)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
data = []
with open(input_path, 'r', encoding='utf8') as f:
for line in f:
data.append(json.loads(line))
for sample in tqdm(data):
if sample["question_format"] == 0:
test_sample = copy.deepcopy(sample)
test_sample["messages"] = test_sample["messages"][:-1]
responses, scores = assistant.inference([test_sample])
generated_response = responses[0] # string
generated_score = scores[0].tolist() # |V|
assert "generated_response" not in sample
sample["generated_response"] = generated_response
assert "generated_score" not in sample
sample["generated_score"] = generated_score
with jsonlines.open(output_path, mode="a") as f:
f.write(sample)
elif sample["question_format"] == 1:
assert "choices" in sample
assert len(sample["choices"]) == 3
assert "generated_ppl" not in sample
tokenizer = assistant.tokenizer
model = assistant.model
processed_samples = process(sample, tokenizer)
assert len(processed_samples) == 3 | data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer, pad_to_multiple_of=8) | 2 | 2023-10-17 07:41:58+00:00 | 4k |
akashgreninja/GreSec | backend/venv/lib/python3.10/site-packages/anyio/_core/_synchronization.py | [
{
"identifier": "cancel_shielded_checkpoint",
"path": "backend/venv/lib/python3.10/site-packages/anyio/lowlevel.py",
"snippet": "async def cancel_shielded_checkpoint() -> None:\n \"\"\"\n Allow the scheduler to switch to another task but without checking for cancellation.\n\n Equivalent to (but potentially more efficient than)::\n\n with CancelScope(shield=True):\n await checkpoint()\n\n\n .. versionadded:: 3.0\n\n \"\"\"\n await get_asynclib().cancel_shielded_checkpoint()"
},
{
"identifier": "checkpoint",
"path": "backend/venv/lib/python3.10/site-packages/anyio/lowlevel.py",
"snippet": "async def checkpoint() -> None:\n \"\"\"\n Check for cancellation and allow the scheduler to switch to another task.\n\n Equivalent to (but more efficient than)::\n\n await checkpoint_if_cancelled()\n await cancel_shielded_checkpoint()\n\n\n .. versionadded:: 3.0\n\n \"\"\"\n await get_asynclib().checkpoint()"
},
{
"identifier": "checkpoint_if_cancelled",
"path": "backend/venv/lib/python3.10/site-packages/anyio/lowlevel.py",
"snippet": "async def checkpoint_if_cancelled() -> None:\n \"\"\"\n Enter a checkpoint if the enclosing cancel scope has been cancelled.\n\n This does not allow the scheduler to switch to a different task.\n\n .. versionadded:: 3.0\n\n \"\"\"\n await get_asynclib().checkpoint_if_cancelled()"
},
{
"identifier": "DeprecatedAwaitable",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_compat.py",
"snippet": "class DeprecatedAwaitable:\n def __init__(self, func: Callable[..., DeprecatedAwaitable]):\n self._name = f\"{func.__module__}.{func.__qualname__}\"\n\n def __await__(self) -> Generator[None, None, None]:\n _warn_deprecation(self)\n if False:\n yield\n\n def __reduce__(self) -> tuple[type[None], tuple[()]]:\n return type(None), ()\n\n def _unwrap(self) -> None:\n return None"
},
{
"identifier": "get_asynclib",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_eventloop.py",
"snippet": "def get_asynclib(asynclib_name: str | None = None) -> Any:\n if asynclib_name is None:\n asynclib_name = sniffio.current_async_library()\n\n modulename = \"anyio._backends._\" + asynclib_name\n try:\n return sys.modules[modulename]\n except KeyError:\n return import_module(modulename)"
},
{
"identifier": "BusyResourceError",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_exceptions.py",
"snippet": "class BusyResourceError(Exception):\n \"\"\"Raised when two tasks are trying to read from or write to the same resource concurrently.\"\"\"\n\n def __init__(self, action: str):\n super().__init__(f\"Another task is already {action} this resource\")"
},
{
"identifier": "WouldBlock",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_exceptions.py",
"snippet": "class WouldBlock(Exception):\n \"\"\"Raised by ``X_nowait`` functions if ``X()`` would block.\"\"\""
},
{
"identifier": "CancelScope",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_tasks.py",
"snippet": "class CancelScope(DeprecatedAsyncContextManager[\"CancelScope\"]):\n \"\"\"\n Wraps a unit of work that can be made separately cancellable.\n\n :param deadline: The time (clock value) when this scope is cancelled automatically\n :param shield: ``True`` to shield the cancel scope from external cancellation\n \"\"\"\n\n def __new__(\n cls, *, deadline: float = math.inf, shield: bool = False\n ) -> CancelScope:\n return get_asynclib().CancelScope(shield=shield, deadline=deadline)\n\n def cancel(self) -> DeprecatedAwaitable:\n \"\"\"Cancel this scope immediately.\"\"\"\n raise NotImplementedError\n\n @property\n def deadline(self) -> float:\n \"\"\"\n The time (clock value) when this scope is cancelled automatically.\n\n Will be ``float('inf')`` if no timeout has been set.\n\n \"\"\"\n raise NotImplementedError\n\n @deadline.setter\n def deadline(self, value: float) -> None:\n raise NotImplementedError\n\n @property\n def cancel_called(self) -> bool:\n \"\"\"``True`` if :meth:`cancel` has been called.\"\"\"\n raise NotImplementedError\n\n @property\n def shield(self) -> bool:\n \"\"\"\n ``True`` if this scope is shielded from external cancellation.\n\n While a scope is shielded, it will not receive cancellations from outside.\n\n \"\"\"\n raise NotImplementedError\n\n @shield.setter\n def shield(self, value: bool) -> None:\n raise NotImplementedError\n\n def __enter__(self) -> CancelScope:\n raise NotImplementedError\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n raise NotImplementedError"
},
{
"identifier": "TaskInfo",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_testing.py",
"snippet": "class TaskInfo:\n \"\"\"\n Represents an asynchronous task.\n\n :ivar int id: the unique identifier of the task\n :ivar parent_id: the identifier of the parent task, if any\n :vartype parent_id: Optional[int]\n :ivar str name: the description of the task (if any)\n :ivar ~collections.abc.Coroutine coro: the coroutine object of the task\n \"\"\"\n\n __slots__ = \"_name\", \"id\", \"parent_id\", \"name\", \"coro\"\n\n def __init__(\n self,\n id: int,\n parent_id: int | None,\n name: str | None,\n coro: Generator[Any, Any, Any] | Awaitable[Any],\n ):\n func = get_current_task\n self._name = f\"{func.__module__}.{func.__qualname__}\"\n self.id: int = id\n self.parent_id: int | None = parent_id\n self.name: str | None = name\n self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, TaskInfo):\n return self.id == other.id\n\n return NotImplemented\n\n def __hash__(self) -> int:\n return hash(self.id)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})\"\n\n def __await__(self) -> Generator[None, None, TaskInfo]:\n _warn_deprecation(self)\n if False:\n yield\n\n return self\n\n def _unwrap(self) -> TaskInfo:\n return self"
},
{
"identifier": "get_current_task",
"path": "backend/venv/lib/python3.10/site-packages/anyio/_core/_testing.py",
"snippet": "def get_current_task() -> TaskInfo:\n \"\"\"\n Return the current task.\n\n :return: a representation of the current task\n\n \"\"\"\n return get_asynclib().get_current_task()"
}
] | from collections import deque
from dataclasses import dataclass
from types import TracebackType
from warnings import warn
from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
from ._compat import DeprecatedAwaitable
from ._eventloop import get_asynclib
from ._exceptions import BusyResourceError, WouldBlock
from ._tasks import CancelScope
from ._testing import TaskInfo, get_current_task | 3,299 |
raise
assert self._owner_task == task
else:
try:
await cancel_shielded_checkpoint()
except BaseException:
self.release()
raise
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
task = get_current_task()
if self._owner_task == task:
raise RuntimeError("Attempted to acquire an already held Lock")
if self._owner_task is not None:
raise WouldBlock
self._owner_task = task
def release(self) -> DeprecatedAwaitable:
"""Release the lock."""
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding this lock")
if self._waiters:
self._owner_task, event = self._waiters.popleft()
event.set()
else:
del self._owner_task
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is currently held."""
return self._owner_task is not None
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
class Condition:
_owner_task: TaskInfo | None = None
def __init__(self, lock: Lock | None = None):
self._lock = lock or Lock()
self._waiters: deque[Event] = deque()
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
def _check_acquired(self) -> None:
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding the underlying lock")
async def acquire(self) -> None:
"""Acquire the underlying lock."""
await self._lock.acquire()
self._owner_task = get_current_task()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
self._owner_task = get_current_task()
def release(self) -> DeprecatedAwaitable:
"""Release the underlying lock."""
self._lock.release()
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is set."""
return self._lock.locked()
def notify(self, n: int = 1) -> None:
"""Notify exactly n listeners."""
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set()
def notify_all(self) -> None:
"""Notify all the listeners."""
self._check_acquired()
for event in self._waiters:
event.set()
self._waiters.clear()
async def wait(self) -> None:
"""Wait for a notification."""
| from __future__ import annotations
@dataclass(frozen=True)
class EventStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
"""
tasks_waiting: int
@dataclass(frozen=True)
class CapacityLimiterStatistics:
"""
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
:ivar float total_tokens: total number of available tokens
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this
limiter
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
"""
borrowed_tokens: int
total_tokens: float
borrowers: tuple[object, ...]
tasks_waiting: int
@dataclass(frozen=True)
class LockStatistics:
"""
:ivar bool locked: flag indicating if this lock is locked or not
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not
held by any task)
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
"""
locked: bool
owner: TaskInfo | None
tasks_waiting: int
@dataclass(frozen=True)
class ConditionStatistics:
"""
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock`
"""
tasks_waiting: int
lock_statistics: LockStatistics
@dataclass(frozen=True)
class SemaphoreStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
"""
tasks_waiting: int
class Event:
def __new__(cls) -> Event:
return get_asynclib().Event()
def set(self) -> DeprecatedAwaitable:
"""Set the flag, notifying all listeners."""
raise NotImplementedError
def is_set(self) -> bool:
"""Return ``True`` if the flag is set, ``False`` if not."""
raise NotImplementedError
async def wait(self) -> None:
"""
Wait until the flag has been set.
If the flag has already been set when this method is called, it returns immediately.
"""
raise NotImplementedError
def statistics(self) -> EventStatistics:
"""Return statistics about the current state of this event."""
raise NotImplementedError
class Lock:
_owner_task: TaskInfo | None = None
def __init__(self) -> None:
self._waiters: deque[tuple[TaskInfo, Event]] = deque()
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
async def acquire(self) -> None:
"""Acquire the lock."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
task = get_current_task()
event = Event()
token = task, event
self._waiters.append(token)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(token)
elif self._owner_task == task:
self.release()
raise
assert self._owner_task == task
else:
try:
await cancel_shielded_checkpoint()
except BaseException:
self.release()
raise
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
task = get_current_task()
if self._owner_task == task:
raise RuntimeError("Attempted to acquire an already held Lock")
if self._owner_task is not None:
raise WouldBlock
self._owner_task = task
def release(self) -> DeprecatedAwaitable:
"""Release the lock."""
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding this lock")
if self._waiters:
self._owner_task, event = self._waiters.popleft()
event.set()
else:
del self._owner_task
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is currently held."""
return self._owner_task is not None
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
class Condition:
_owner_task: TaskInfo | None = None
def __init__(self, lock: Lock | None = None):
self._lock = lock or Lock()
self._waiters: deque[Event] = deque()
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
def _check_acquired(self) -> None:
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding the underlying lock")
async def acquire(self) -> None:
"""Acquire the underlying lock."""
await self._lock.acquire()
self._owner_task = get_current_task()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
self._owner_task = get_current_task()
def release(self) -> DeprecatedAwaitable:
"""Release the underlying lock."""
self._lock.release()
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is set."""
return self._lock.locked()
def notify(self, n: int = 1) -> None:
"""Notify exactly n listeners."""
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set()
def notify_all(self) -> None:
"""Notify all the listeners."""
self._check_acquired()
for event in self._waiters:
event.set()
self._waiters.clear()
async def wait(self) -> None:
"""Wait for a notification.""" | await checkpoint() | 1 | 2023-10-23 18:09:28+00:00 | 4k |
marmotlab/Context_Aware_Navigation | runner.py | [
{
"identifier": "PolicyNet",
"path": "model.py",
"snippet": "class PolicyNet(nn.Module):\r\n def __init__(self, input_dim, embedding_dim):\r\n super(PolicyNet, self).__init__()\r\n self.initial_embedding = nn.Linear(input_dim, embedding_dim) # layer for non-end position\r\n self.current_embedding = nn.Linear(embedding_dim * 2, embedding_dim)\r\n\r\n self.encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=6)\r\n self.decoder = Decoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)\r\n self.pointer = SingleHeadAttention(embedding_dim)\r\n\r\n def encode_graph(self, node_inputs, node_padding_mask, edge_mask):\r\n node_feature = self.initial_embedding(node_inputs)\r\n enhanced_node_feature = self.encoder(src=node_feature, key_padding_mask=node_padding_mask, attn_mask=edge_mask)\r\n\r\n return enhanced_node_feature\r\n\r\n def output_policy(self, enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask):\r\n current_edge = edge_inputs.permute(0, 2, 1)\r\n embedding_dim = enhanced_node_feature.size()[2]\r\n\r\n neigboring_feature = torch.gather(enhanced_node_feature, 1, current_edge.repeat(1, 1, embedding_dim))\r\n\r\n current_node_feature = torch.gather(enhanced_node_feature, 1, current_index.repeat(1, 1, embedding_dim))\r\n\r\n if edge_padding_mask is not None:\r\n current_mask = edge_padding_mask\r\n # print(current_mask)\r\n else:\r\n current_mask = None\r\n\r\n current_mask[:,:,0] = 1 # don't stay at current position\r\n enhanced_current_node_feature, _ = self.decoder(current_node_feature, enhanced_node_feature, node_padding_mask)\r\n enhanced_current_node_feature = self.current_embedding(torch.cat((enhanced_current_node_feature, current_node_feature), dim=-1))\r\n logp = self.pointer(enhanced_current_node_feature, neigboring_feature, current_mask)\r\n logp= logp.squeeze(1)\r\n return logp\r\n\r\n def forward(self, node_inputs, edge_inputs, current_index, node_padding_mask=None, edge_padding_mask=None, edge_mask=None):\r\n enhanced_node_feature = self.encode_graph(node_inputs, node_padding_mask, edge_mask)\r\n logp = self.output_policy(enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask)\r\n return logp\r"
},
{
"identifier": "QNet",
"path": "model.py",
"snippet": "class QNet(nn.Module):\r\n def __init__(self, input_dim, embedding_dim):\r\n super(QNet, self).__init__()\r\n self.initial_embedding = nn.Linear(input_dim, embedding_dim) # layer for non-end position\r\n self.action_embedding = nn.Linear(embedding_dim*3, embedding_dim)\r\n\r\n self.encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=6)\r\n self.decoder = Decoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)\r\n\r\n self.q_values_layer = nn.Linear(embedding_dim, 1)\r\n\r\n def encode_graph(self, node_inputs, node_padding_mask, edge_mask):\r\n embedding_feature = self.initial_embedding(node_inputs)\r\n embedding_feature = self.encoder(src=embedding_feature, key_padding_mask=node_padding_mask, attn_mask=edge_mask)\r\n\r\n return embedding_feature\r\n\r\n def output_q_values(self, enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask):\r\n k_size = edge_inputs.size()[2]\r\n current_edge = edge_inputs\r\n current_edge = current_edge.permute(0, 2, 1)\r\n embedding_dim = enhanced_node_feature.size()[2]\r\n\r\n neigboring_feature = torch.gather(enhanced_node_feature, 1, current_edge.repeat(1, 1, embedding_dim))\r\n\r\n current_node_feature = torch.gather(enhanced_node_feature, 1, current_index.repeat(1, 1, embedding_dim))\r\n\r\n enhanced_current_node_feature, attention_weights = self.decoder(current_node_feature, enhanced_node_feature, node_padding_mask)\r\n action_features = torch.cat((enhanced_current_node_feature.repeat(1, k_size, 1), current_node_feature.repeat(1, k_size, 1), neigboring_feature), dim=-1)\r\n action_features = self.action_embedding(action_features)\r\n q_values = self.q_values_layer(action_features)\r\n\r\n if edge_padding_mask is not None:\r\n current_mask = edge_padding_mask\r\n else:\r\n current_mask = None\r\n current_mask[:, :, 0] = 1 # don't stay at current position\r\n current_mask = current_mask.permute(0, 2, 1)\r\n zero = torch.zeros_like(q_values).to(q_values.device)\r\n q_values = torch.where(current_mask == 1, zero, q_values)\r\n return q_values, attention_weights\r\n\r\n def forward(self, node_inputs, edge_inputs, current_index, node_padding_mask=None, edge_padding_mask=None,\r\n edge_mask=None):\r\n enhanced_node_feature = self.encode_graph(node_inputs, node_padding_mask, edge_mask)\r\n q_values, attention_weights = self.output_q_values(enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask)\r\n return q_values, attention_weights\r"
},
{
"identifier": "Worker",
"path": "worker.py",
"snippet": "class Worker:\r\n def __init__(self, meta_agent_id, policy_net, q_net, global_step, device='cuda', greedy=False, save_image=True):\r\n self.device = device\r\n self.greedy = greedy\r\n self.metaAgentID = meta_agent_id\r\n self.global_step = global_step\r\n self.node_padding_size = NODE_PADDING_SIZE\r\n self.k_size = K_SIZE\r\n self.save_image = save_image\r\n\r\n self.env = Env(map_index=self.global_step, k_size=self.k_size, plot=save_image)\r\n self.local_policy_net = policy_net\r\n self.local_q_net = q_net\r\n\r\n self.current_node_index = 0\r\n self.travel_dist = 0\r\n self.robot_position = self.env.start_position\r\n\r\n self.episode_buffer = []\r\n self.perf_metrics = dict()\r\n for i in range(15):\r\n self.episode_buffer.append([])\r\n\r\n def get_observations(self):\r\n # get observations\r\n node_coords = copy.deepcopy(self.env.node_coords)\r\n graph = copy.deepcopy(self.env.graph)\r\n node_utility = copy.deepcopy(self.env.node_utility)\r\n indicator = copy.deepcopy(self.env.indicator)\r\n direction_vector = copy.deepcopy(self.env.direction_vector)\r\n # normalize observations\r\n node_coords = node_coords / 640\r\n node_utility = node_utility / 50\r\n n_nodes = node_coords.shape[0]\r\n node_utility_inputs = node_utility.reshape(n_nodes, 1)\r\n direction_nums = direction_vector.shape[0]\r\n direction_vector_inputs = direction_vector.reshape(direction_nums, 3)\r\n direction_vector_inputs[:, 2] /= 80\r\n node_inputs = np.concatenate((node_coords, node_utility_inputs, indicator, direction_vector_inputs), axis=1)\r\n node_inputs = torch.FloatTensor(node_inputs).unsqueeze(0).to(self.device) # (1, node_padding_size+1, 3)\r\n assert node_coords.shape[0] < self.node_padding_size\r\n padding = torch.nn.ZeroPad2d((0, 0, 0, self.node_padding_size - node_coords.shape[0]))\r\n node_inputs = padding(node_inputs)\r\n # calculate a mask to padded nodes\r\n node_padding_mask = torch.zeros((1, 1, node_coords.shape[0]), dtype=torch.int64).to(self.device)\r\n node_padding = torch.ones((1, 1, self.node_padding_size - node_coords.shape[0]), dtype=torch.int64).to(\r\n self.device)\r\n node_padding_mask = torch.cat((node_padding_mask, node_padding), dim=-1)\r\n # get the node index of the current robot position\r\n current_node_index = self.env.find_index_from_coords(self.robot_position)\r\n current_index = torch.tensor([current_node_index]).unsqueeze(0).unsqueeze(0).to(self.device) # (1,1,1)\r\n # prepare the adjacent list as padded edge inputs and the adjacent matrix as the edge mask\r\n graph = list(graph.values())\r\n edge_inputs = []\r\n for node in graph:\r\n node_edges = list(map(int, node))\r\n edge_inputs.append(node_edges)\r\n\r\n adjacent_matrix = self.calculate_edge_mask(edge_inputs)\r\n edge_mask = torch.from_numpy(adjacent_matrix).float().unsqueeze(0).to(self.device)\r\n assert len(edge_inputs) < self.node_padding_size\r\n padding = torch.nn.ConstantPad2d(\r\n (0, self.node_padding_size - len(edge_inputs), 0, self.node_padding_size - len(edge_inputs)), 1)\r\n edge_mask = padding(edge_mask)\r\n edge = edge_inputs[current_index]\r\n while len(edge) < self.k_size:\r\n edge.append(0)\r\n edge_inputs = torch.tensor(edge).unsqueeze(0).unsqueeze(0).to(self.device) # (1, 1, k_size)\r\n # calculate a mask for the padded edges (denoted by 0)\r\n edge_padding_mask = torch.zeros((1, 1, K_SIZE), dtype=torch.int64).to(self.device)\r\n one = torch.ones_like(edge_padding_mask, dtype=torch.int64).to(self.device)\r\n edge_padding_mask = torch.where(edge_inputs == 0, one, edge_padding_mask)\r\n\r\n observations = node_inputs, edge_inputs, current_index, node_padding_mask, edge_padding_mask, edge_mask\r\n return observations\r\n\r\n def select_node(self, observations):\r\n node_inputs, edge_inputs, current_index, node_padding_mask, edge_padding_mask, edge_mask = observations\r\n with torch.no_grad():\r\n logp_list = self.local_policy_net(node_inputs, edge_inputs, current_index, node_padding_mask,\r\n edge_padding_mask, edge_mask)\r\n if self.greedy:\r\n action_index = torch.argmax(logp_list, dim=1).long()\r\n else:\r\n action_index = torch.multinomial(logp_list.exp(), 1).long().squeeze(1)\r\n next_node_index = edge_inputs[0, 0, action_index.item()]\r\n next_position = self.env.node_coords[next_node_index]\r\n return next_position, action_index\r\n\r\n def save_observations(self, observations):\r\n node_inputs, edge_inputs, current_index, node_padding_mask, edge_padding_mask, edge_mask = observations\r\n self.episode_buffer[0] += copy.deepcopy(node_inputs)\r\n self.episode_buffer[1] += copy.deepcopy(edge_inputs)\r\n self.episode_buffer[2] += copy.deepcopy(current_index)\r\n self.episode_buffer[3] += copy.deepcopy(node_padding_mask)\r\n self.episode_buffer[4] += copy.deepcopy(edge_padding_mask)\r\n self.episode_buffer[5] += copy.deepcopy(edge_mask)\r\n\r\n def save_action(self, action_index):\r\n self.episode_buffer[6] += action_index.unsqueeze(0).unsqueeze(0)\r\n\r\n def save_reward_done(self, reward, done):\r\n self.episode_buffer[7] += copy.deepcopy(torch.FloatTensor([[[reward]]]).to(self.device))\r\n self.episode_buffer[8] += copy.deepcopy(torch.tensor([[[(int(done))]]]).to(self.device))\r\n\r\n def save_next_observations(self, observations):\r\n node_inputs, edge_inputs, current_index, node_padding_mask, edge_padding_mask, edge_mask = observations\r\n self.episode_buffer[9] += copy.deepcopy(node_inputs)\r\n self.episode_buffer[10] += copy.deepcopy(edge_inputs)\r\n self.episode_buffer[11] += copy.deepcopy(current_index)\r\n self.episode_buffer[12] += copy.deepcopy(node_padding_mask)\r\n self.episode_buffer[13] += copy.deepcopy(edge_padding_mask)\r\n self.episode_buffer[14] += copy.deepcopy(edge_mask)\r\n\r\n def run_episode(self, curr_episode):\r\n done = False\r\n observations = self.get_observations()\r\n for i in range(128):\r\n self.save_observations(observations)\r\n next_position, action_index = self.select_node(observations)\r\n self.save_action(action_index)\r\n reward, done, self.robot_position, self.travel_dist = self.env.step(self.robot_position, next_position, self.travel_dist)\r\n self.save_reward_done(reward, done)\r\n observations = self.get_observations()\r\n self.save_next_observations(observations)\r\n if self.save_image:\r\n if not os.path.exists(gifs_path):\r\n os.makedirs(gifs_path)\r\n self.env.plot_env(self.global_step, gifs_path, i, self.travel_dist)\r\n if done:\r\n break\r\n self.perf_metrics['travel_dist'] = self.travel_dist\r\n self.perf_metrics['explored_rate'] = self.env.explored_rate\r\n self.perf_metrics['success_rate'] = done\r\n if self.save_image:\r\n path = gifs_path\r\n self.make_gif(path, curr_episode)\r\n\r\n def work(self, currEpisode):\r\n self.run_episode(currEpisode)\r\n\r\n def calculate_edge_mask(self, edge_inputs):\r\n size = len(edge_inputs)\r\n bias_matrix = np.ones((size, size))\r\n for i in range(size):\r\n for j in range(size):\r\n if j in edge_inputs[i]:\r\n bias_matrix[i][j] = 0\r\n return bias_matrix\r\n \r\n def make_gif(self, path, n):\r\n with imageio.get_writer('{}/{}_explored_rate_{:.4g}.gif'.format(path, n, self.env.explored_rate), mode='I', duration=0.5) as writer:\r\n for frame in self.env.frame_files:\r\n image = imageio.imread(frame)\r\n writer.append_data(image)\r\n print('gif complete\\n')\r\n for filename in self.env.frame_files[:-1]:\r\n os.remove(filename)\r"
}
] | import torch
import ray
from model import PolicyNet, QNet
from worker import Worker
from parameter import *
| 3,250 |
class Runner(object):
def __init__(self, meta_agent_id):
self.meta_agent_id = meta_agent_id
self.device = torch.device('cuda') if USE_GPU else torch.device('cpu')
|
class Runner(object):
def __init__(self, meta_agent_id):
self.meta_agent_id = meta_agent_id
self.device = torch.device('cuda') if USE_GPU else torch.device('cpu')
| self.local_network = PolicyNet(INPUT_DIM, EMBEDDING_DIM)
| 0 | 2023-10-17 04:32:42+00:00 | 4k |
adarshxs/TokenTally | main.py | [
{
"identifier": "sidebar",
"path": "sidebar.py",
"snippet": "def sidebar():\n with st.sidebar:\n st.image(\"cutie.png\", use_column_width=True)\n st.title(\"About Token Tally\")\n st.info(\"Select your desired base model, parameters, and configuration to get an estimate of the required GPU memory and model size. Do contribute: https://github.com/adarshxs/TokenTally\")\n # Add a Products section with radio buttons to select a page\n product_options = [\"Overview\", \"LLM Cost Tool\", \"Transformer Memory Tool\", \"LLM Model Recommendation\"]\n selected_product = st.radio(\"Products\", product_options)\n \n st.warning(\"Notice: The logic for the final cost/token is yet to be implemented!\")\n \n return selected_product"
},
{
"identifier": "display_overview",
"path": "overview.py",
"snippet": "def display_overview():\n \n # read readme.md file and display it as markdown in streamlit\n \n with open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n # readme.replace(\"\", \"\") # not working don't know why\n st.markdown(readme)"
},
{
"identifier": "display_llm_cost_tool",
"path": "tools/llm_cost_calculator.py",
"snippet": "def display_llm_cost_tool():\n st.title(\"Token Tally: LLM Cost Estimator\")\n st.subheader(\"Estimate Your LLM's Token Toll Across Various Platforms and Configurations\")\n\n # Base model and configurations data\n base_models = load_base_models()\n quantization_data = load_quantization()\n gpu_data = load_gpus()\n gpu_providers_df = load_gpu_providers()\n\n model_names = [model[\"name\"] for model in base_models]\n selected_model_name = st.selectbox(\"Step 1: Select the Base Model\", model_names, key='base_model')\n selected_model = next(model for model in base_models if model[\"name\"] == selected_model_name)\n\n param_options = list(selected_model[\"params\"].keys())\n selected_params = st.selectbox(\"Step 2: Select the Number of Parameters\", param_options, key='params')\n \n config_names = list(quantization_data.keys())\n selected_config_name = st.selectbox(\"Step 3: Select the Configuration\", config_names, key='config')\n\n # calculate model size based on selected configuration\n model_size = selected_model[\"params\"][selected_params] * quantization_data[selected_config_name]\n\n col1, col2 = st.columns(2)\n with col1:\n st.subheader(\"Model Size\")\n st.markdown(f\"\"\"\n <div class=\"card\">\n <strong>Model Size: {model_size} GB</strong>\n </div>\n \"\"\", unsafe_allow_html=True)\n with col2:\n st.subheader(\"Minimum GPU Memory\")\n st.markdown(f\"\"\"\n <div class=\"card\">\n <strong>Min GPU Memory Required: {model_size * 1.2:.2f} GB</strong>\n </div>\n \"\"\", unsafe_allow_html=True)\n st.latex(r'''\n \\text{GPU Requirement} = 1.2 \\times \\text{Model Size}\n ''')\n\n cloud_providers = gpu_providers_df[\"Cloud\"].unique()\n selected_provider = st.selectbox(\"Step 4: Select the Cloud Provider\", cloud_providers, key='provider')\n\n suitable_gpu_types = gpu_providers_df[gpu_providers_df[\"Cloud\"] == selected_provider][\"GPU Type\"].unique()\n selected_gpu_type = st.selectbox(\"Step 5: Select the GPU Type\", suitable_gpu_types, key='gpu_type')\n selected_gpu_details = gpu_providers_df[(gpu_providers_df[\"Cloud\"] == selected_provider) & (gpu_providers_df[\"GPU Type\"] == selected_gpu_type)]\n\n st.subheader(\"Available GPU Variants\")\n selected_gpu_details = selected_gpu_details.reset_index(drop=True)\n selected_gpu_details.index = selected_gpu_details.index + 1\n st.table(selected_gpu_details)\n st.subheader(\"Cost per 1,000 tokens - For Selected Model\")\n # calculate TS_max\n TS_max = 1 # to be implemented!!!\n #TS = TS_max*(MO/100)\n # Compute Cost\n #CT = VMc / (TS*3600) \n\n st.markdown(\"\"\"\n <style>\n .card {\n background-color: #2f2f2f; \n border-radius: 5px;\n padding: 20px 30px;\n margin: 25px 0px;\n text-align: center;\n color: #ffffff;\n }\n </style>\n \"\"\", unsafe_allow_html=True)\n cost_p_1k_tokens_compute, cost_p_1k_tokens_memory = 0,0\n\n calculated_flops = selected_model[\"params\"][selected_params]\n # Populate the placeholder with the number_input, setting the default value to calculated_flops\n flops_per_token = st.number_input(\"FLOPs per Token = Model parameters in Billion * 2 (Considering batch size=1 and ignoring KV cache)\", min_value=1.0, value=float(calculated_flops))\n flops_per_gpu = st.number_input(\"FLOPs per GPU (TFLOPs) - Only available for A100 80GB considering 70% MFU\", min_value=1, value=200)\n num_gpus = st.number_input(\"Number of GPUs\", min_value=1, value=8)\n cost_per_hour = st.number_input(\"Cost per Hour (USD) - Refer ($)On-Demand in the above table only for A100 80GB\", min_value=0.01, value=40.97)\n memory_bandwidth_per_gpu = st.number_input(\"Memory Bandwidth per GPU (TB/s) - 2Tb/s for A100 80Gb and considering 60-70 % inference workloads\", min_value=0.1, value=1.3)\n\n if st.button(\"Calculate\"):\n cost_p_1k_tokens_compute, cost_p_1k_tokens_memory = cost_per_1k_tokens(\n flops_per_token, \n flops_per_gpu, \n num_gpus, \n cost_per_hour, \n memory_bandwidth_per_gpu\n )\n \n st.markdown(f\"\"\"\n <div class=\"card\">\n <strong>Estimated Cost per 1,000 Input tokens: ${cost_p_1k_tokens_compute:.6f}</strong><br>\n <strong>Estimated Cost per 1,000 Output tokens: ${cost_p_1k_tokens_memory:.6f}</strong>\n </div>\n \"\"\", unsafe_allow_html=True)"
},
{
"identifier": "display_transformer_memory_tool",
"path": "tools/transformer_memory_calculator.py",
"snippet": "def display_transformer_memory_tool():\n st.title(\"Transformer Memory Calculator\")\n\n # Creating UI elements for each argument:\n params = st.number_input(\"Number of Parameters\", min_value=1, value=20000000000, step=1)\n num_gpus = st.number_input(\"Number of GPUs used for training\", min_value=1, value=1, step=1)\n tensor_parallel_size = st.number_input(\"Tensor parallel degree\", min_value=1, value=1, step=1)\n pipeline_parallel_size = st.number_input(\"Pipeline parallel degree\", min_value=1, value=1, step=1)\n partition_activations = st.checkbox(\"Use ZeRO-R to partition activation memory?\")\n zero_stage = st.selectbox(\"Stage of the ZeRO optimizer\", [0, 1, 2, 3], index=1)\n checkpoint_activations = st.checkbox(\"Use Megatron-style activation checkpointing?\")\n batch_size_per_gpu = st.number_input(\"Batch size per GPU\", min_value=1, value=1, step=1)\n hidden_size = st.number_input(\"Dimension of the model's hidden size\", min_value=1, value=6144, step=1)\n num_attention_heads = st.number_input(\"Number of attention heads used in model\", min_value=1, value=64, step=1)\n sequence_length = st.number_input(\"Sequence length used for training\", min_value=1, value=2048, step=1)\n num_layers = st.number_input(\"Number of transformer layers used in model\", min_value=1, value=44, step=1)\n fp32_model = st.checkbox(\"Is model stored in fp32?\")\n fp32_grads = st.checkbox(\"Are grads stored in fp32?\")\n zero_allgather_bucket_size = st.number_input(\"Size of allgather buckets used by ZeRO\", min_value=1, value=int(5e8), step=1)\n zero3_max_live_params = st.number_input(\"Maximum number of parameters ZeRO3 keeps in GPU memory\", min_value=1, value=int(1e9), step=1)\n misc_mem_gb = st.number_input(\"Miscellaneous memory overhead\", min_value=0, value=0, step=1)\n\n # When the user clicks this button, calculate the memory:\n if st.button(\"Calculate Memory\"):\n # Create an object to mimic the argparse Namespace object:\n args = type('', (), {})()\n args.params = params\n args.num_gpus = num_gpus\n args.tensor_parallel_size = tensor_parallel_size\n args.pipeline_parallel_size = pipeline_parallel_size\n args.partition_activations = partition_activations\n args.zero_stage = zero_stage\n args.checkpoint_activations = checkpoint_activations\n args.batch_size_per_gpu = batch_size_per_gpu\n args.hidden_size = hidden_size\n args.num_attention_heads = num_attention_heads\n args.sequence_length = sequence_length\n args.num_layers = num_layers\n args.fp32_model = fp32_model\n args.fp32_grads = fp32_grads\n args.zero_allgather_bucket_size = zero_allgather_bucket_size\n args.zero3_max_live_params = zero3_max_live_params\n args.misc_mem_gb = misc_mem_gb\n \n calc_mem(args)"
},
{
"identifier": "display_llm_recomender_tool",
"path": "tools/llm_recomender.py",
"snippet": "def display_llm_recomender_tool():\n if 'value' not in st.session_state:\n st.session_state.value = \"8\"\n \"\"\"\n Display the Streamlit interface for the LLM Model Recommendation tool.\n \"\"\"\n\n # Preamble description\n st.markdown(\"\"\"\n ## **Welcome to the LLM Recommendation Tool!**\n\n Choosing the right **Large Language Model (LLM)** can be challenging given the variety of models available \n and the technical constraints of different systems.\n\n This tool is designed to assist users in selecting the most suitable LLM for their computational infrastructure, \n specifically considering the GPU memory constraints.\n\n Instructions:\n - **Specify your GPU Memory**: Adjust the available GPU memory using the input field.\n - **Get Recommendations**: The tool will recommend suitable LLMs based on your input.\n - **Understand the Recommendations**: A comprehensive breakdown explains the ranking and recommendation logic.\n\n Enter your GPU memory below!\n \"\"\")\n\n # Initialize GPU Memory input field\n gpu_memory_field = st.empty()\n st.session_state.value = gpu_memory_field.text_input(\"Available GPU Memory (GB)\", value=st.session_state.value)\n\n # Get recommendations and display them in a table\n results = recommend_model(float(st.session_state.value))\n results_df = pd.DataFrame(results).head(15)\n st.table(results_df)\n\n # Display the methodology and top recommendation\n top_model = results_df.iloc[0][\"Model Name\"]\n top_quantization = results_df.iloc[0][\"Quantization Type\"]\n top_gpu_req = results_df.iloc[0][\"GPU Requirement (GB)\"]\n top_model_size = results_df.iloc[0][\"Actual Model Size (GB)\"]\n value = st.session_state.value\n st.markdown(f\"\"\"\n ## **How it works**\n\n 1. **Available GPU Memory**: Based on your input, the currently available GPU memory on your system is **{value} GB**. The recommendation engine filters out models that surpass this memory footprint.\n\n 2. **Quantization**: \n - Models undergo *quantization* to shrink their size, albeit at a minor compromise on accuracy. This results in a model that's more resource-efficient.\n - Different quantization levels impact the model's size differently. For instance, adopting a {top_quantization} approach for the {top_model} compresses it to an approximate size of **{top_model_size:.2f} GB**.\n - Depending on the balance between accuracy and resource usage you're aiming for, you can choose between differently quantized models.\n\n 3. **Ranking & Recommendation Score**:\n - The recommendation score is a multifaceted metric, considering:\n * **GPU Memory Requirement**: Models demanding less GPU memory are scored higher. For example, {top_model} requires around **{top_gpu_req:.2f} GB**.\n * **Number of Parameters**: A higher parameter count usually indicates better performance, so these models get a favorable score.\n * **Inherent Performance Order**: Some models inherently outperform others due to architecture or training nuances. This intrinsic order also influences the ranking.\n - The recommendations you see are sorted descendingly based on this composite score.\n\n The top recommendation currently for your system, given the constraints, is the **{top_model}** model with {top_quantization} quantization.\n\n Harnessing this tool ensures you get the most bang for your buck — or in this case, the most AI prowess for your GPU memory!\n \"\"\")"
}
] | from sidebar import sidebar
from overview import display_overview
from tools.llm_cost_calculator import display_llm_cost_tool
from tools.transformer_memory_calculator import display_transformer_memory_tool
from tools.llm_recomender import display_llm_recomender_tool | 3,256 |
def main():
selected_product = sidebar()
if selected_product == "Overview":
display_overview()
elif selected_product == "LLM Cost Tool":
display_llm_cost_tool()
elif selected_product == "Transformer Memory Tool":
display_transformer_memory_tool()
elif selected_product == "LLM Model Recommendation":
|
def main():
selected_product = sidebar()
if selected_product == "Overview":
display_overview()
elif selected_product == "LLM Cost Tool":
display_llm_cost_tool()
elif selected_product == "Transformer Memory Tool":
display_transformer_memory_tool()
elif selected_product == "LLM Model Recommendation": | display_llm_recomender_tool() | 4 | 2023-10-18 06:16:47+00:00 | 4k |
WestlakeIntelligentRobotics/ConsensusLLM-code | modules/experiment/scalar_debate.py | [
{
"identifier": "Template",
"path": "modules/experiment/template.py",
"snippet": "class Template(ABC):\n \"\"\"\n A template class for designing and running experiments with multiple agents\n and rounds.\n\n This abstract class defines a template for designing experiments where \n multiple agents interact in multiple rounds. Subclasses must implement \n various methods to customize the behavior of the experiment, including \n generating questions, managing agents, updating experiment records, and \n performing post-processing.\n\n Attributes:\n _record (dict): A dictionary for recording experiment data.\n _n_agent (int): Number of agents participating in the experiment.\n _n_round (int): Number of rounds in the experiment.\n _n_experiment (int): Number of independent experiments to run.\n _lock (threading.Lock):\n A lock for ensuring thread safety during data updates.\n\n Subclasses should implement the following abstract methods:\n - _generate_question\n - _generate_agents\n - _update_record\n - _round_postprocess\n - _exp_postprocess\n\n Public Methods:\n - run: Run the experiment using a thread pool for concurrency.\n - save_record: Save the experiment record to a file.\n\n To use this template, create a subclass that defines the specific behavior\n of the experiment.\n \"\"\"\n def __init__(self, args):\n \"\"\"\n Initialize the Template with provided arguments.\n\n Initializes instance variables for managing the experiment.\n \"\"\"\n self._record = {} # A dictionary for recording data\n self._n_agent = args.agents # Number of agents\n self._n_round = args.rounds # Number of rounds\n self._n_experiment = args.n_exp # Number of experiments\n self._lock = threading.Lock() # Lock for thread safety\n\n @abstractmethod\n def _generate_question(self, agent, round) -> str:\n \"\"\"\n Generate a question for an agent in a specific round.\n\n Args:\n agent: An agent participating in the experiment.\n round: The current round of the experiment.\n\n Returns:\n str: The generated question.\n \"\"\"\n pass\n\n @abstractmethod\n def _generate_agents(self, simulation_ind):\n \"\"\"\n Generate a set of agents for a simulation.\n\n Args:\n simulation_ind: Index of the current simulation.\n\n Returns:\n list: A list of agent objects.\n \"\"\"\n pass\n\n @abstractmethod\n def _update_record(self, record, agent_contexts, simulation_ind, agents):\n \"\"\"\n Update the experiment record based on agent data.\n\n Args:\n record: The experiment record to be updated.\n agent_contexts: List of agent histories and data.\n simulation_ind: Index of the current simulation.\n agents: List of agents participating in the experiment.\n \"\"\"\n pass\n\n @abstractmethod\n def _round_postprocess(self, simulation_ind, round, results, agents):\n \"\"\"\n Perform post-processing for a round of the experiment.\n\n Args:\n simulation_ind: Index of the current simulation.\n round: The current round of the experiment.\n results: List of results from agents.\n agents: List of agents participating in the experiment.\n \"\"\"\n pass\n\n @abstractmethod\n def _exp_postprocess(self):\n \"\"\"\n Perform post-processing for the entire experiment.\n \"\"\"\n pass\n\n def run(self):\n \"\"\"\n Run the experiment using a thread pool for concurrency.\n \"\"\"\n try:\n with ThreadPoolExecutor(max_workers=self._n_experiment) as executor:\n progress = tqdm(total=self._n_experiment * self._n_round, \n desc=\"Processing\", dynamic_ncols=True)\n futures = {executor.submit(self._run_once, sim_ind, progress) \n for sim_ind in range(self._n_experiment)}\n\n for future in as_completed(futures):\n if future.exception() is not None:\n print(\"A thread raised an exception: \"\n f\"{future.exception()}\")\n progress.close()\n except Exception as e:\n print(f\"An exception occurred: {e}\")\n finally:\n self._exp_postprocess()\n\n def _run_once(self, simulation_ind, progress):\n \"\"\"\n Run a single simulation of the experiment.\n\n Args:\n simulation_ind: Index of the current simulation.\n progress: Progress bar for tracking the simulation's progress.\n \"\"\"\n agents = self._generate_agents(simulation_ind)\n try:\n for round in range(self._n_round):\n results = queue.Queue()\n n_thread = len(agents) if round < 4 else 1\n with ThreadPoolExecutor(n_thread) as agent_executor:\n futures = []\n for agent_ind, agent in enumerate(agents):\n question = self. _generate_question(agent, round)\n futures.append(agent_executor\n .submit(agent.answer, question, \n agent_ind, round, \n simulation_ind))\n\n for ind, future in enumerate(as_completed(futures)):\n if future.exception() is not None:\n print(\"A thread raised an exception: \"\n f\"{future.exception()}\")\n else:\n idx, result = future.result()\n results.put((idx, result))\n results = list(results.queue)\n results = sorted(results, key=lambda x: x[0])\n progress.update(1)\n self._round_postprocess(simulation_ind, round, results, agents)\n\n except Exception as e:\n print(f\"error:{e}\")\n finally:\n agent_contexts = [agent.get_history() for agent in agents]\n with self._lock:\n self._update_record(self._record, agent_contexts, \n simulation_ind, agents)\n\n def save_record(self, output_dir: str):\n \"\"\"\n Save the experiment record to a file.\n\n Args:\n output_dir: The directory where the record will be saved.\n\n Returns:\n Tuple: A tuple with a success indicator and the file path.\n \"\"\"\n try:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n data_file = output_dir + '/data.p'\n # Save the record to a pickle file\n pickle.dump(self._record, open(data_file, \"wb\"))\n return True, data_file\n except Exception as e:\n print(f\"An exception occurred while saving the file: {e}\")\n print(\"Saving to the current directory instead.\")\n # Backup in case of an exception\n pickle.dump(self._record, open(\"backup_output_file.p\", \"wb\"))\n return False, \"\""
},
{
"identifier": "Agent",
"path": "modules/llm/agent.py",
"snippet": "class Agent(GPT):\n def __init__(self, position, other_position, key: str, name=None, \n model: str = 'gpt-3.5-turbo-0613', temperature: float = 0.7):\n def name(self):\n def position(self):\n def position(self, value):\n def other_position(self):\n def other_position(self, value):\n def summarize_result(self):\n def answer(self, input, idx, round, simulation_ind, try_times=0) -> tuple:\n def summarize(self, agent_answers):\n def parse_output(self, output):"
},
{
"identifier": "api_keys",
"path": "modules/llm/api_key.py",
"snippet": ""
},
{
"identifier": "names",
"path": "modules/llm/role.py",
"snippet": ""
},
{
"identifier": "agent_role",
"path": "modules/prompt/scenario.py",
"snippet": ""
},
{
"identifier": "agent_output_form",
"path": "modules/prompt/form.py",
"snippet": ""
},
{
"identifier": "stubborn",
"path": "modules/prompt/personality.py",
"snippet": ""
},
{
"identifier": "gen_html",
"path": "modules/visual/gen_html.py",
"snippet": "def gen_html(data_path, html_dir):\n \"\"\"\n Generate HTML output for conversations.\n\n Args:\n data_path (str): The path to the data file.\n html_dir (str): The directory to save the generated HTML files.\n\n Generates HTML output for the conversations and saves them in the \n specified directory.\n \"\"\"\n results = read_conversations(data_path)\n\n for ind, res in enumerate(results):\n output_file = os.path.join(html_dir, f'simulation_{ind}.html')\n if os.path.exists(output_file):\n continue\n try:\n render_conversations_to_html(res, output_file, ind)\n print(f'HTML output has been written to {output_file}')\n except:\n continue"
},
{
"identifier": "plot_result",
"path": "modules/visual/plot.py",
"snippet": "def plot_result(data_path, pic_dir):\n results = read_from_file(data_path)\n E = len(results)\n N = len(results[0]) # Number of agents\n R = len(results[0][0]) # Number of rounds\n print(E, N, R)\n\n fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 9))\n for eval_id, agent_results in enumerate(results):\n row = eval_id // 3 # Determine the row for the subplot\n col = eval_id % 3 # Determine the co lumn for the subplot\n ax = axes[row, col]\n round0_values = [res[0] for res in agent_results]\n average_round0 = np.mean(round0_values)\n for agent_id, res in enumerate(agent_results):\n ax.plot(range(0, len(res)), res, label=f'Agent {agent_id + 1}',\n marker='o', markersize=3,\n alpha=1 - (1-0.4)/(len(agent_results)-1)*agent_id)\n ax.axhline(average_round0, color='red', linestyle='--', \n linewidth=0.5, label='Average value')\n ax.set_title(f'Case {eval_id + 1}')\n ax.set_xlabel('Round')\n ax.set_ylabel('Agent state')\n ax.set_ylim(0, 100)\n ax.set_xlim(0, len(res) - 1)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.legend()\n\n # Add vertical dashed lines for each round to all subplots\n for ax in axes.flatten():\n for round_num in range(1, R):\n ax.axvline(round_num, color='gray', linestyle='--', linewidth=0.5)\n\n # Adjust layout to prevent subplot overlap\n plt.tight_layout()\n # plt.savefig(pic_dir + '/result.svg', format='svg')\n plt.savefig(pic_dir + '/result.png')\n # Show the plot\n plt.show()"
}
] | import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
from .template import Template
from ..llm.agent import Agent, GPT
from ..llm.api_key import api_keys
from ..llm.role import names
from ..prompt.scenario import agent_role, game_description, round_description
from ..prompt.form import agent_output_form
from ..prompt.personality import stubborn, suggestible
from ..visual.gen_html import gen_html
from ..visual.plot import plot_result | 2,926 | """
MIT License
Copyright (c) [2023] [Intelligent Unmanned Systems Laboratory at
Westlake University]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM,
OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE, OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class ScalarDebate(Template):
"""
A class representing a simulation of scalar debate between multiple agents.
This class extends the Template class and provides functionality to set up
and run a simulation where multiple agents engage in debates, taking into
account their positions, personalities, and knowledge connectivity.
Args:
args: Command-line arguments and configuration.
connectivity_matrix: Matrix defining agent knowledge connectivity.
Raises:
ValueError: If arguments are invalid or insufficient.
"""
def __init__(self, args, connectivity_matrix):
super().__init__(args)
self._n_agents = args.agents
| """
MIT License
Copyright (c) [2023] [Intelligent Unmanned Systems Laboratory at
Westlake University]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM,
OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE, OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class ScalarDebate(Template):
"""
A class representing a simulation of scalar debate between multiple agents.
This class extends the Template class and provides functionality to set up
and run a simulation where multiple agents engage in debates, taking into
account their positions, personalities, and knowledge connectivity.
Args:
args: Command-line arguments and configuration.
connectivity_matrix: Matrix defining agent knowledge connectivity.
Raises:
ValueError: If arguments are invalid or insufficient.
"""
def __init__(self, args, connectivity_matrix):
super().__init__(args)
self._n_agents = args.agents | self._init_input = game_description + "\n\n" + agent_output_form | 4 | 2023-10-20 07:58:07+00:00 | 4k |
LzVv123456/Contrastive-Prototypical-Prompt | train.py | [
{
"identifier": "ProtoDataset",
"path": "datasets/proto.py",
"snippet": "class ProtoDataset(Dataset):\n def __init__(self, args, prototypes, prototypes_var, classes):\n self.args = args\n self.prototypes = prototypes\n self.prototypes_var = prototypes_var\n self.classes = classes\n assert len(self.prototypes) == len(self.prototypes_var) == len(classes)\n\n if self.args.use_mc_proto:\n assert type(self.prototypes) == list\n assert type(self.prototypes_var) == list\n index_mapping = []\n for idx in range(len(classes)):\n cur_protos = self.prototypes[idx].squeeze()\n cur_mapping = torch.full((len(cur_protos), 1), idx)\n index_mapping.append(cur_mapping)\n self.prototypes = torch.cat(self.prototypes, dim=0).cuda()\n self.prototypes_var = torch.cat(self.prototypes_var, dim=0).cuda()\n self.index_mapping = torch.cat(index_mapping, dim=0).squeeze().cuda()\n else:\n assert type(self.prototypes) == list\n assert type(self.prototypes_var) == list\n self.prototypes = torch.stack(self.prototypes, dim=0).cuda()\n self.prototypes_var = torch.stack(self.prototypes_var, dim=0).cuda()\n\n self.scale = torch.sqrt(self.prototypes_var) # (proto_num, embeding_dim)\n assert len(self.scale) == len(self.prototypes)\n\n\n def __len__(self):\n return self.prototypes.size(0)\n\n\n def __getitem__(self, idx):\n if self.args.proto_trans:\n proto_aug = self.proto_transform(idx)\n else:\n proto_aug = self.prototypes[idx]\n if self.args.use_mc_proto:\n label = self.classes[self.index_mapping[idx]]\n else:\n label = self.classes[idx]\n return proto_aug, label\n \n\n def proto_transform(self, idx): \n proto = self.prototypes[idx]\n gaussian_noise = torch.normal(torch.zeros(len(proto)), 1).cuda() * self.scale[idx]\n proto_aug = proto + gaussian_noise\n return proto_aug"
},
{
"identifier": "ProTLearner",
"path": "prompt.py",
"snippet": "class ProTLearner(nn.Module):\n def __init__(self, args, vit_model, prev_prompts=None):\n super().__init__()\n # learnable prompt\n self.args = args\n self.l_p = args.l_p # length of prompt\n self.d_p = args.d_p # deep prompt\n self.prompt_idx = 0\n self.vit_model = vit_model\n\n if self.d_p: \n self.n_p = len(vit_model.blocks) # number of the prompts\n self.insert_position = list(range(1, len(vit_model.blocks)))\n else:\n self.n_p = 1\n self.insert_position = []\n\n # initialize prompt\n if prev_prompts is None:\n self.prompts = nn.Parameter(torch.zeros(self.n_p, self.l_p, self.vit_model.embed_dim))\n else:\n self.prompts = nn.Parameter(prev_prompts)\n self.register_parameter(name='prompts', param=self.prompts)\n\n if not self.args.finetune_vit:\n # only update prompt\n self._set_only_prompt_trainable()\n\n\n def _set_only_prompt_trainable(self):\n # set all parameters except prompt fixed\n for name, param in self.named_parameters():\n if \"prompts\" not in name:\n param.requires_grad_(False)\n \n\n def get_prompt_param(self):\n trainable_parameter = []\n # set all parameters except prompt fixed\n for _, param in self.named_parameters():\n if param.requires_grad:\n trainable_parameter.append(param)\n return trainable_parameter \n\n\n def prepare_tokens(self, x):\n B, nc, w, h = x.shape\n x = self.vit_model.patch_embed(x) # patch linear embedding\n\n # add the [CLS] token to the embed patch tokens\n cls_tokens = self.vit_model.cls_token.expand(B, -1, -1)\n\n # add cls token\n x = torch.cat((cls_tokens, x), dim=1)\n\n # add positional encoding to each token\n try:\n pos_embed = self.vit_model.interpolate_pos_encoding(x, w, h)\n except:\n pos_embed = self.vit_model.pos_embed\n\n x = x + pos_embed\n\n # insert prompt token\n prompt = self.prompts[self.prompt_idx].unsqueeze(0).expand(B, -1, -1)\n cls_prompt = torch.cat((x[:,0,:].unsqueeze(1), prompt), dim=1)\n x = torch.cat((cls_prompt, x[:,1:,:]), dim=1)\n self.prompt_idx += 1\n\n return self.vit_model.pos_drop(x)\n\n def forward(self, x):\n # reset\n init_flag = True\n self.prompt_idx = 0\n # forward\n x = self.prepare_tokens(x)\n for idx, blk in enumerate(self.vit_model.blocks):\n if idx in self.insert_position:\n if self.d_p:\n x[:,1:self.args.l_p+1,:] = self.prompts[self.prompt_idx,:]\n else:\n if init_flag:\n prompt = self.prompts[self.prompt_idx,:].unsqueeze(0).expand(x.size(0), -1, -1)\n cls_prompt = torch.cat((x[:,0,:].unsqueeze(1), prompt), dim=1)\n x = torch.cat((cls_prompt, x[:,1:,:]), dim=1)\n else:\n x[:,1:self.args.l_p+1,:] = self.prompts[self.prompt_idx,:]\n init_flag = False\n self.prompt_idx += 1\n x = blk(x)\n \n if self.args.pretrain_method == 'mae':\n x = x[:, 1:, :].mean(dim=1) # global pool without cls token\n outcome = self.vit_model.fc_norm(x)\n return outcome\n else:\n x = self.vit_model.norm(x)\n return x[:, 0]"
},
{
"identifier": "PromptHead",
"path": "prompt.py",
"snippet": "class PromptHead(nn.Module):\n def __init__(self, args, vit_embed_dim):\n super().__init__()\n self.args = args\n self.vit_embed_dim = vit_embed_dim\n self.mlp = None\n self.linear_cls = None\n if args.add_mlp:\n self.set_mlp_neck()\n if args.add_cls:\n self.set_linear_cls()\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n utils.trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def set_mlp_neck(self):\n nlayers = self.args.mlp_layer_num\n bottleneck_dim = self.vit_embed_dim\n hidden_dim = self.args.mlp_hidden_dim\n use_bn = self.args.mlp_use_bn\n in_dim = self.vit_embed_dim\n \n if nlayers == 0:\n self.mlp = nn.Identity()\n else:\n nlayers = max(nlayers, 1)\n if nlayers == 1:\n self.mlp = nn.Linear(in_dim, bottleneck_dim)\n else:\n layers = [nn.Linear(in_dim, hidden_dim)]\n if use_bn:\n layers.append(nn.BatchNorm1d(hidden_dim))\n layers.append(nn.GELU())\n for _ in range(nlayers - 2):\n layers.append(nn.Linear(hidden_dim, hidden_dim))\n if use_bn:\n layers.append(nn.BatchNorm1d(hidden_dim))\n layers.append(nn.GELU())\n layers.append(nn.Linear(hidden_dim, bottleneck_dim))\n self.mlp = nn.Sequential(*layers)\n self.apply(self._init_weights)\n\n def set_linear_cls(self):\n self.linear_cls = nn.utils.weight_norm(nn.Linear(self.vit_embed_dim, self.args.cls_dim, bias=False))\n self.linear_cls.weight_g.data.fill_(1)\n\n def get_head_parameters(self):\n param = []\n if self.mlp:\n param += self.mlp.parameters()\n if self.linear_cls:\n param += self.linear_cls.parameters()\n return param\n\n def forward(self, x):\n if self.mlp:\n x = self.mlp(x)\n x = nn.functional.normalize(x, dim=-1, p=2)\n neck_output = x\n if self.linear_cls:\n x = self.linear_cls(x)\n return x, neck_output"
}
] | import torch
import utils
import copy
import losses
import prototype as prot
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, RandomSampler
from tqdm import tqdm
from datasets import ProtoDataset
from prompt import ProTLearner, PromptHead | 2,648 |
class Trainer(object):
def __init__(self, args, vit_model, train_dataset, gen_proto_dataset):
super().__init__()
self.args = args
self.vit_model = vit_model
self.dataset = train_dataset
self.gen_proto_dataset = gen_proto_dataset
self.proto = []
self.proto_var = []
self.mc_proto = []
self.mc_proto_var = []
self.prototype_prompts = []
self.prompter = None
self.head = None
self.data_loader = None
def get_optimizer(self, optm, trainable_parameter):
if optm == "adamw":
optimizer = torch.optim.AdamW(trainable_parameter) # to use with ViTs
elif optm == "sgd":
optimizer = torch.optim.SGD(trainable_parameter, lr=0, momentum=0.9) # lr and wd is set by scheduler
elif optm == "adam":
optimizer = torch.optim.Adam(trainable_parameter) # to use with ViTs
else:
raise NotImplementedError
return optimizer
def init_loss_func(self, classes_up2now):
if self.args.loss == 'protocon':
if self.args.use_mc_proto:
self.loss_function = losses.ProtoConLoss(self.args, self.mc_proto, classes_up2now, self.proto_dataloader)
else:
self.loss_function = losses.ProtoConLoss(self.args, self.proto, classes_up2now, self.proto_dataloader)
elif self.args.loss == 'supcon':
self.loss_function = losses.SupConLoss(self.args)
elif self.args.loss == 'ce':
self.loss_function = nn.CrossEntropyLoss()
else:
raise NotImplementedError
def get_loss(self, output, mlp_output, target, cur_classes, ):
if self.args.loss == 'ce':
if self.args.cls_dim == self.args.task_size: # relabel
new_target = torch.zeros(target.size())
for idx, label in enumerate(cur_classes):
new_target[target == label] = idx
target = new_target.cuda().long()
loss = self.loss_function(output, target)
else:
if self.args.loss == 'supcon':
mlp_output = mlp_output.unsqueeze(dim=1)
loss = self.loss_function(mlp_output, target)
return loss
def train_discriminate(self):
|
class Trainer(object):
def __init__(self, args, vit_model, train_dataset, gen_proto_dataset):
super().__init__()
self.args = args
self.vit_model = vit_model
self.dataset = train_dataset
self.gen_proto_dataset = gen_proto_dataset
self.proto = []
self.proto_var = []
self.mc_proto = []
self.mc_proto_var = []
self.prototype_prompts = []
self.prompter = None
self.head = None
self.data_loader = None
def get_optimizer(self, optm, trainable_parameter):
if optm == "adamw":
optimizer = torch.optim.AdamW(trainable_parameter) # to use with ViTs
elif optm == "sgd":
optimizer = torch.optim.SGD(trainable_parameter, lr=0, momentum=0.9) # lr and wd is set by scheduler
elif optm == "adam":
optimizer = torch.optim.Adam(trainable_parameter) # to use with ViTs
else:
raise NotImplementedError
return optimizer
def init_loss_func(self, classes_up2now):
if self.args.loss == 'protocon':
if self.args.use_mc_proto:
self.loss_function = losses.ProtoConLoss(self.args, self.mc_proto, classes_up2now, self.proto_dataloader)
else:
self.loss_function = losses.ProtoConLoss(self.args, self.proto, classes_up2now, self.proto_dataloader)
elif self.args.loss == 'supcon':
self.loss_function = losses.SupConLoss(self.args)
elif self.args.loss == 'ce':
self.loss_function = nn.CrossEntropyLoss()
else:
raise NotImplementedError
def get_loss(self, output, mlp_output, target, cur_classes, ):
if self.args.loss == 'ce':
if self.args.cls_dim == self.args.task_size: # relabel
new_target = torch.zeros(target.size())
for idx, label in enumerate(cur_classes):
new_target[target == label] = idx
target = new_target.cuda().long()
loss = self.loss_function(output, target)
else:
if self.args.loss == 'supcon':
mlp_output = mlp_output.unsqueeze(dim=1)
loss = self.loss_function(mlp_output, target)
return loss
def train_discriminate(self): | self.prompter = ProTLearner(self.args, self.vit_model) | 1 | 2023-10-16 21:28:42+00:00 | 4k |
inngest/inngest-py | inngest/_internal/middleware_lib/log.py | [
{
"identifier": "client_lib",
"path": "inngest/_internal/client_lib.py",
"snippet": "_DEV_SERVER_EVENT_KEY = \"NO_EVENT_KEY_SET\"\nclass Inngest:\n def api_origin(self) -> str:\n def event_api_origin(self) -> str:\n def event_key(self) -> str | None:\n def signing_key(self) -> str | None:\n def __init__(\n self,\n *,\n api_base_url: str | None = None,\n app_id: str,\n event_api_base_url: str | None = None,\n event_key: str | None = None,\n is_production: bool | None = None,\n logger: types.Logger | None = None,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n signing_key: str | None = None,\n ) -> None:\n def _build_send_request(\n self,\n events: list[event_lib.Event],\n ) -> types.MaybeError[httpx.Request]:\n def add_middleware(\n self,\n middleware: type[\n middleware_lib.Middleware | middleware_lib.MiddlewareSync\n ],\n ) -> None:\n def create_function(\n self,\n *,\n batch_events: function_config.Batch | None = None,\n cancel: list[function_config.Cancel] | None = None,\n debounce: function_config.Debounce | None = None,\n fn_id: str,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n name: str | None = None,\n on_failure: function.FunctionHandlerAsync\n | function.FunctionHandlerSync\n | None = None,\n rate_limit: function_config.RateLimit | None = None,\n retries: int | None = None,\n throttle: function_config.Throttle | None = None,\n trigger: function_config.TriggerCron | function_config.TriggerEvent,\n ) -> typing.Callable[\n def decorator(\n func: function.FunctionHandlerAsync | function.FunctionHandlerSync,\n ) -> function.Function:\n async def send(\n self,\n events: event_lib.Event | list[event_lib.Event],\n ) -> list[str]:\n def send_sync(\n self,\n events: event_lib.Event | list[event_lib.Event],\n ) -> list[str]:\n def set_logger(self, logger: types.Logger) -> None:\ndef _extract_ids(body: object) -> list[str]:"
},
{
"identifier": "function",
"path": "inngest/_internal/function.py",
"snippet": "class Context:\nclass _Config:\nclass FunctionHandlerAsync(typing.Protocol):\nclass FunctionHandlerSync(typing.Protocol):\nclass FunctionOpts(types.BaseModel):\nclass Function:\nclass _UserError(Exception):\n def __call__(\n self,\n ctx: Context,\n step: step_lib.Step,\n ) -> typing.Awaitable[types.Serializable]:\n def __call__(\n self,\n ctx: Context,\n step: step_lib.StepSync,\n ) -> types.Serializable:\ndef _is_function_handler_async(\n value: FunctionHandlerAsync | FunctionHandlerSync,\n) -> typing.TypeGuard[FunctionHandlerAsync]:\ndef _is_function_handler_sync(\n value: FunctionHandlerAsync | FunctionHandlerSync,\n) -> typing.TypeGuard[FunctionHandlerSync]:\n def convert_validation_error(\n self,\n err: pydantic.ValidationError,\n ) -> BaseException:\n def id(self) -> str:\n def is_handler_async(self) -> bool:\n def is_on_failure_handler_async(self) -> bool | None:\n def on_failure_fn_id(self) -> str | None:\n def __init__(\n self,\n opts: FunctionOpts,\n trigger: function_config.TriggerCron | function_config.TriggerEvent,\n handler: FunctionHandlerAsync | FunctionHandlerSync,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n ) -> None:\n async def call( # noqa: C901\n self,\n call: execution.Call,\n client: client_lib.Inngest,\n ctx: Context,\n fn_id: str,\n middleware: middleware_lib.MiddlewareManager,\n target_hashed_id: str | None,\n ) -> execution.CallResult:\n def call_sync( # noqa: C901\n self,\n call: execution.Call,\n client: client_lib.Inngest,\n ctx: Context,\n fn_id: str,\n middleware: middleware_lib.MiddlewareManager,\n target_hashed_id: str | None,\n ) -> execution.CallResult:\n def get_config(self, app_url: str) -> _Config:\n def get_id(self) -> str:\n def __init__(self, err: Exception) -> None:\ndef _remove_first_traceback_frame(err: Exception) -> None:"
},
{
"identifier": "types",
"path": "inngest/_internal/types.py",
"snippet": "T = typing.TypeVar(\"T\")\nclass EmptySentinel:\nclass BaseModel(pydantic.BaseModel):\n def __init__(\n __pydantic_self__, # noqa: N805\n *args: object,\n **kwargs: object,\n ) -> None:\n def convert_validation_error(\n self,\n err: pydantic.ValidationError,\n ) -> BaseException:\n def from_raw(\n cls: type[BaseModelT],\n raw: object,\n ) -> BaseModelT | Exception:\n def to_dict(self) -> MaybeError[dict[str, object]]:"
},
{
"identifier": "MiddlewareSync",
"path": "inngest/_internal/middleware_lib/middleware.py",
"snippet": "class MiddlewareSync:\n client: client_lib.Inngest\n\n def __init__(self, client: client_lib.Inngest) -> None:\n self.client = client\n\n def after_execution(self) -> None:\n \"\"\"\n After executing new code. Called multiple times per run when using\n steps.\n \"\"\"\n return None\n\n def before_execution(self) -> None:\n \"\"\"\n Before executing new code. Called multiple times per run when using\n steps.\n \"\"\"\n return None\n\n def before_response(self) -> None:\n \"\"\"\n After the output has been set and before the response is sent\n back to Inngest. This is where you can perform any final actions before\n the response is sent back to Inngest. Called multiple times per run when\n using steps. Not called for function middleware.\n \"\"\"\n return None\n\n def transform_input(\n self,\n ctx: function.Context,\n ) -> function.Context:\n \"\"\"\n Before calling a function or step. Used to replace certain arguments in\n the function. Called multiple times per run when using steps.\n \"\"\"\n return ctx\n\n def transform_output(\n self,\n output: execution.Output,\n ) -> execution.Output:\n \"\"\"\n After a function or step returns. Used to modify the returned data.\n Called multiple times per run when using steps. Not called when an error\n is thrown.\n \"\"\"\n return output"
}
] | from inngest._internal import client_lib, function, types
from .middleware import MiddlewareSync | 1,907 | from __future__ import annotations
class LoggerProxy:
_proxied_methods = (
"critical",
"debug",
"error",
"exception",
"fatal",
"info",
"log",
"warn",
"warning",
)
def __init__(self, logger: types.Logger) -> None:
self._is_enabled = False
self.logger = logger
def __getattr__(self, name: str) -> object:
if name in self._proxied_methods and not self._is_enabled:
# Return noop
return lambda *args, **kwargs: None
return getattr(self.logger, name)
def enable(self) -> None:
self._is_enabled = True
class LoggerMiddleware(MiddlewareSync):
def __init__(self, client: client_lib.Inngest) -> None:
super().__init__(client)
self.logger = LoggerProxy(client.logger)
def before_execution(self) -> None:
self.logger.enable()
def transform_input(
self,
| from __future__ import annotations
class LoggerProxy:
_proxied_methods = (
"critical",
"debug",
"error",
"exception",
"fatal",
"info",
"log",
"warn",
"warning",
)
def __init__(self, logger: types.Logger) -> None:
self._is_enabled = False
self.logger = logger
def __getattr__(self, name: str) -> object:
if name in self._proxied_methods and not self._is_enabled:
# Return noop
return lambda *args, **kwargs: None
return getattr(self.logger, name)
def enable(self) -> None:
self._is_enabled = True
class LoggerMiddleware(MiddlewareSync):
def __init__(self, client: client_lib.Inngest) -> None:
super().__init__(client)
self.logger = LoggerProxy(client.logger)
def before_execution(self) -> None:
self.logger.enable()
def transform_input(
self, | ctx: function.Context, | 1 | 2023-10-19 01:02:30+00:00 | 4k |
f0uriest/quadax | quadax/romberg.py | [
{
"identifier": "QuadratureInfo",
"path": "quadax/utils.py",
"snippet": "class QuadratureInfo(NamedTuple):\n \"\"\"Information about quadrature.\n\n Parameters\n ----------\n err : float\n Estimate of the error in the quadrature result.\n neval : int\n Number of evaluations of the integrand.\n status : int\n Flag indicating reason for termination. status of 0 means normal termination,\n any other value indicates a possible error. A human readable message can be\n obtained by ``print(quadax.STATUS[status])``\n info : dict or None\n Other information returned by the algorithm. See specific algorithm for\n details. Only present if ``full_output`` is True.\n \"\"\"\n\n err: float\n neval: int\n status: int\n info: Union[dict, None]"
},
{
"identifier": "bounded_while_loop",
"path": "quadax/utils.py",
"snippet": "def bounded_while_loop(condfun, bodyfun, init_val, bound):\n \"\"\"While loop for bounded number of iterations, implemented using cond and scan.\"\"\"\n # could do some fancy stuff with checkpointing here like in equinox but the loops\n # in quadax usually only do ~100 iterations max so probably not worth it.\n\n def scanfun(state, *args):\n return jax.lax.cond(condfun(state), bodyfun, lambda x: x, state), None\n\n return jax.lax.scan(scanfun, init_val, None, bound)[0]"
},
{
"identifier": "errorif",
"path": "quadax/utils.py",
"snippet": "def errorif(cond, err=ValueError, msg=\"\"):\n \"\"\"Raise an error if condition is met.\n\n Similar to assert but allows wider range of Error types, rather than\n just AssertionError.\n \"\"\"\n if cond:\n raise err(msg)"
},
{
"identifier": "map_interval",
"path": "quadax/utils.py",
"snippet": "def map_interval(fun, interval):\n \"\"\"Map a function over an arbitrary interval [a, b] to the interval [-1, 1].\n\n Transform a function such that integral(fun) on interval is the same as\n integral(fun_t) on interval_t\n\n Parameters\n ----------\n fun : callable\n Integrand to transform.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n\n Returns\n -------\n fun_t : callable\n Transformed integrand.\n interval_t : float\n New lower and upper limits of integration with possible breakpoints.\n \"\"\"\n interval = jnp.asarray(interval)\n a, b = interval[0], interval[-1]\n sgn = (-1) ** (a > b)\n a, b = jnp.minimum(a, b), jnp.maximum(a, b)\n # catch breakpoints that are outside the domain, replace with endpoints\n # this creates intervals of 0 length which will be ignored later\n interval = jnp.where(interval < a, a, interval)\n interval = jnp.where(interval > b, b, interval)\n interval = jnp.sort(interval)\n\n # bit mask to select mapping case\n # 0 : both sides finite\n # 1 : a = -inf, b finite\n # 2 : a finite, b = inf\n # 3 : both infinite\n bitmask = jnp.isinf(a) + 2 * jnp.isinf(b)\n mapfuns = [_map_linear, _map_ninfb, _map_ainf, _map_ninfinf]\n mapfuns_inv = [_map_linear_inv, _map_ninfb_inv, _map_ainf_inv, _map_ninfinf_inv]\n\n @jax.jit\n def fun_mapped(t, *args):\n x, w = jax.lax.switch(bitmask, mapfuns, t, a, b)\n return sgn * w * fun(x, *args)\n\n # map original breakpoints to new domain\n interval_t = jax.lax.switch(bitmask, mapfuns_inv, interval, a, b)\n # +/-inf gets mapped to +/-1 but numerically evaluates to nan so we replace that.\n interval_t = jnp.where(interval == jnp.inf, 1, interval_t)\n interval_t = jnp.where(interval == -jnp.inf, -1, interval_t)\n return fun_mapped, interval_t"
},
{
"identifier": "tanhsinh_transform",
"path": "quadax/utils.py",
"snippet": "def tanhsinh_transform(fun, interval):\n \"\"\"Transform a function by mapping with tanh-sinh.\n\n Transform a function such that integral(fun) on interval is the same as\n integral(fun_t) on interval_t\n\n Parameters\n ----------\n fun : callable\n Integrand to transform.\n interval : array-like\n Lower and upper limits of integration. Use np.inf to denote infinite intervals.\n\n Returns\n -------\n fun_t : callable\n Transformed integrand.\n interval_t : float\n New lower and upper limits.\n \"\"\"\n errorif(\n len(interval) != 2,\n NotImplementedError,\n \"tanh-sinh transformation with breakpoints not supported\",\n )\n # map a, b -> [-1, 1]\n fun, interval = map_interval(fun, interval)\n\n # map [-1, 1] to [-inf, inf], but with mass concentrated near 0\n xk = lambda t: jnp.tanh(jnp.pi / 2 * jnp.sinh(t))\n wk = lambda t: jnp.pi / 2 * jnp.cosh(t) / jnp.cosh(jnp.pi / 2 * jnp.sinh(t)) ** 2\n func = lambda t, *args: fun(xk(t), *args) * wk(t)\n\n # we generally only need to integrate ~[-3, 3] or ~[-4, 4]\n # we don't want to include the endpoint that maps to x==1 to avoid\n # possible singularities, so we find the largest t s.t. x(t) < 1\n # and use that as our interval\n def get_tmax(xmax):\n \"\"\"Inverse of tanh-sinh transform.\"\"\"\n tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x))\n sinhinv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1))\n return sinhinv(2 / jnp.pi * tanhinv(xmax))\n\n # inverse of tanh-sinh transformation for x = 1-eps\n tmax = get_tmax(jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps)\n interval_t = jnp.array([-tmax, tmax])\n return jax.jit(func), interval_t"
},
{
"identifier": "wrap_func",
"path": "quadax/utils.py",
"snippet": "def wrap_func(fun, args):\n \"\"\"Vectorize, jit, and mask out inf/nan.\"\"\"\n f = jax.eval_shape(fun, jnp.array(0.0), *args)\n # need to make sure we get the correct shape for array valued integrands\n outsig = \"(\" + \",\".join(\"n\" + str(i) for i in range(len(f.shape))) + \")\"\n\n @jax.jit\n @partial(jnp.vectorize, signature=\"()->\" + outsig)\n def wrapped(x):\n f = fun(x, *args)\n return jnp.where(jnp.isfinite(f), f, 0.0)\n\n return wrapped"
}
] | import jax
import jax.numpy as jnp
from .utils import (
QuadratureInfo,
bounded_while_loop,
errorif,
map_interval,
tanhsinh_transform,
wrap_func,
) | 2,501 | """Romberg integration aka adaptive trapezoid with Richardson extrapolation."""
def romberg(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
divmax=20,
norm=jnp.inf,
):
"""Romberg integration of a callable function or method.
Returns the integral of `fun` (a function of one variable) over `interval`.
Good for non-smooth or piecewise smooth integrands.
Not recommended for infinite intervals, or functions with singularities.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration. Use np.inf to denote infinite intervals.
args : tuple
additional arguments passed to fun
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float
Absolute and relative tolerances. If I1 and I2 are two
successive approximations to the integral, algorithm terminates
when abs(I1-I2) < max(epsabs, epsrel*|I2|)
divmax : int, optional
Maximum order of extrapolation. Default is 20.
Total number of function evaluations will be at
most 2**divmax + 1
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
Approximation to the integral
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* table : (ndarray, size(dixmax+1, divmax+1, ...)) Estimate of the integral
from each level of discretization and each step of extrapolation.
Notes
-----
Due to limitations on dynamically sized arrays in JAX, this algorithm is fully
sequential and does not vectorize integrand evaluations, so may not be the most
efficient on GPU/TPU.
Also, it is currently only forward mode differentiable.
"""
errorif(
len(interval) != 2,
NotImplementedError,
"Romberg integration with breakpoints not supported",
)
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
# map a, b -> [-1, 1]
fun, interval = map_interval(fun, interval)
| """Romberg integration aka adaptive trapezoid with Richardson extrapolation."""
def romberg(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
divmax=20,
norm=jnp.inf,
):
"""Romberg integration of a callable function or method.
Returns the integral of `fun` (a function of one variable) over `interval`.
Good for non-smooth or piecewise smooth integrands.
Not recommended for infinite intervals, or functions with singularities.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration. Use np.inf to denote infinite intervals.
args : tuple
additional arguments passed to fun
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float
Absolute and relative tolerances. If I1 and I2 are two
successive approximations to the integral, algorithm terminates
when abs(I1-I2) < max(epsabs, epsrel*|I2|)
divmax : int, optional
Maximum order of extrapolation. Default is 20.
Total number of function evaluations will be at
most 2**divmax + 1
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
Approximation to the integral
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* table : (ndarray, size(dixmax+1, divmax+1, ...)) Estimate of the integral
from each level of discretization and each step of extrapolation.
Notes
-----
Due to limitations on dynamically sized arrays in JAX, this algorithm is fully
sequential and does not vectorize integrand evaluations, so may not be the most
efficient on GPU/TPU.
Also, it is currently only forward mode differentiable.
"""
errorif(
len(interval) != 2,
NotImplementedError,
"Romberg integration with breakpoints not supported",
)
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
# map a, b -> [-1, 1]
fun, interval = map_interval(fun, interval) | vfunc = wrap_func(fun, args) | 5 | 2023-10-24 04:44:34+00:00 | 4k |
yixinliu233/SIGNET | main.py | [
{
"identifier": "GIN",
"path": "models.py",
"snippet": "class GIN(torch.nn.Module):\n def __init__(self, num_features, dim, num_gc_layers, pooling, readout):\n super(GIN, self).__init__()\n\n self.num_gc_layers = num_gc_layers\n self.pooling = pooling\n self.readout = readout\n\n self.convs = torch.nn.ModuleList()\n self.dim = dim\n self.pool = self.get_pool()\n\n for i in range(num_gc_layers):\n if i:\n nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n else:\n nn = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))\n conv = GINConv(nn)\n\n self.convs.append(conv)\n\n def forward(self, x, edge_index, batch, node_imp):\n\n if node_imp is not None:\n out, _ = torch_scatter.scatter_max(torch.reshape(node_imp.detach(), (1, -1)), batch)\n out = out.reshape(-1, 1)\n out = out[batch]\n node_imp /= out + eps\n node_imp = (2 * node_imp - 1)/(2 * scalar) + 1\n x = x * node_imp\n\n xs = []\n for i in range(self.num_gc_layers):\n\n x = F.relu(self.convs[i](x, edge_index))\n\n xs.append(x)\n\n if self.readout == 'last':\n graph_emb = self.pool(xs[-1], batch)\n elif self.readout == 'concat':\n graph_emb = torch.cat([self.pool(x, batch) for x in xs], 1)\n elif self.readout == 'add':\n graph_emb = 0\n for x in xs:\n graph_emb += self.pool(x, batch)\n\n return graph_emb, torch.cat(xs, 1)\n\n def get_pool(self):\n if self.pooling == 'add':\n pool = global_add_pool\n elif self.pooling == 'max':\n pool = global_max_pool\n else:\n raise ValueError(\"Pooling Name <{}> is Unknown\".format(self.pooling))\n return pool"
},
{
"identifier": "Explainer_GIN",
"path": "models.py",
"snippet": "class Explainer_GIN(torch.nn.Module):\n def __init__(self, num_features, dim, num_gc_layers, readout):\n super(Explainer_GIN, self).__init__()\n\n self.num_gc_layers = num_gc_layers\n self.readout = readout\n\n self.convs = torch.nn.ModuleList()\n\n for i in range(num_gc_layers):\n if i:\n nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n else:\n nn = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))\n conv = GINConv(nn)\n self.convs.append(conv)\n\n if self.readout == 'concat':\n self.mlp = Linear(dim * num_gc_layers, 1)\n else:\n self.mlp = Linear(dim, 1)\n\n def forward(self, x, edge_index, batch):\n xs = []\n for i in range(self.num_gc_layers):\n if i != self.num_gc_layers - 1:\n x = self.convs[i](x, edge_index)\n x = F.relu(x)\n else:\n x = self.convs[i](x, edge_index)\n xs.append(x)\n\n if self.readout == 'last':\n node_prob = xs[-1]\n elif self.readout == 'concat':\n node_prob = torch.cat([x for x in xs], 1)\n elif self.readout == 'add':\n node_prob = 0\n for x in xs:\n node_prob += x\n\n node_prob = self.mlp(node_prob)\n node_prob = softmax(node_prob, batch)\n return node_prob"
},
{
"identifier": "HyperGNN",
"path": "models.py",
"snippet": "class HyperGNN(torch.nn.Module):\n\n def __init__(self, input_dim, input_dim_edge, hidden_dim, num_gc_layers, pooling, readout):\n\n super(HyperGNN, self).__init__()\n\n self.num_node_features = input_dim\n if input_dim_edge:\n self.num_edge_features = input_dim_edge\n self.use_edge_attr = True\n else:\n self.num_edge_features = input_dim\n self.use_edge_attr = False\n self.nhid = hidden_dim\n self.enhid = hidden_dim\n self.num_convs = num_gc_layers\n self.pooling = pooling\n self.readout = readout\n self.convs = self.get_convs()\n self.pool = self.get_pool()\n\n\n def forward(self, x, edge_index, edge_attr, batch, edge_imp):\n\n if not self.use_edge_attr:\n a_, b_ = x[edge_index[0]], x[edge_index[1]]\n edge_attr = (a_ + b_) / 2\n\n hyperedge_index, edge_batch = DHT(edge_index, batch)\n\n if edge_imp is not None:\n out, _ = torch_scatter.scatter_max(torch.reshape(edge_imp, (1, -1)), edge_batch)\n out = out.reshape(-1, 1)\n out = out[edge_batch]\n edge_imp /= out + eps\n edge_imp = (2 * edge_imp - 1)/(2 * scalar) + 1\n edge_attr = edge_attr * edge_imp\n\n xs = []\n\n for _ in range(self.num_convs):\n edge_attr = F.relu( self.convs[_](edge_attr, hyperedge_index))\n xs.append(edge_attr)\n\n if self.readout == 'last':\n graph_emb = self.pool(xs[-1], edge_batch)\n elif self.readout == 'concat':\n graph_emb = torch.cat([self.pool(x, edge_batch) for x in xs], 1)\n elif self.readout == 'add':\n graph_emb = 0\n for x in xs:\n graph_emb += self.pool(x, edge_batch)\n\n return graph_emb, None\n\n def get_convs(self):\n convs = torch.nn.ModuleList()\n for i in range(self.num_convs):\n if i == 0:\n conv = HypergraphConv(self.num_edge_features, self.nhid)\n else:\n conv = HypergraphConv(self.nhid, self.nhid)\n convs.append(conv)\n\n return convs\n\n def get_pool(self):\n if self.pooling == 'add':\n pool = global_add_pool\n elif self.pooling == 'max':\n pool = global_max_pool\n else:\n raise ValueError(\"Pooling Name <{}> is Unknown\".format(self.pooling))\n\n return pool"
},
{
"identifier": "Explainer_MLP",
"path": "models.py",
"snippet": "class Explainer_MLP(torch.nn.Module):\n def __init__(self, num_features, dim, n_layers):\n super(Explainer_MLP, self).__init__()\n\n self.n_layers = n_layers\n self.mlps = torch.nn.ModuleList()\n\n for i in range(n_layers):\n if i:\n nn = Sequential(Linear(dim, dim))\n else:\n nn = Sequential(Linear(num_features, dim))\n self.mlps.append(nn)\n\n self.final_mlp = Linear(dim, 1)\n\n\n def forward(self, x, edge_index, batch):\n\n for i in range(self.n_layers):\n x = self.mlps[i](x)\n x = F.relu(x)\n\n node_prob = self.final_mlp(x)\n node_prob = softmax(node_prob, batch)\n return node_prob"
},
{
"identifier": "arg_parse",
"path": "arguments.py",
"snippet": "def arg_parse():\n parser = argparse.ArgumentParser(description='SIGNET')\n parser.add_argument('--dataset', type=str, default='mutag')\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--batch_size_test', type=int, default=9999)\n parser.add_argument('--log_interval', type=int, default=1)\n parser.add_argument('--num_trials', type=int, default=5)\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--lr', dest='lr', type=float, default=0.01)\n parser.add_argument('--epochs', type=int, default=500)\n parser.add_argument('--encoder_layers', type=int, default=5)\n parser.add_argument('--hidden_dim', type=int, default=16)\n parser.add_argument('--pooling', type=str, default='add', choices=['add', 'max'])\n parser.add_argument('--readout', type=str, default='concat', choices=['concat', 'add', 'last'])\n parser.add_argument('--explainer_model', type=str, default='gin', choices=['mlp', 'gin'])\n parser.add_argument('--explainer_layers', type=int, default=5)\n parser.add_argument('--explainer_hidden_dim', type=int, default=8)\n parser.add_argument('--explainer_readout', type=str, default='add', choices=['concat', 'add', 'last'])\n\n return parser.parse_args()"
},
{
"identifier": "get_data_loaders",
"path": "get_data_loaders.py",
"snippet": "def get_data_loaders(dataset_name, batch_size, batch_size_test=None, random_state=0, data_dir='data'):\n assert dataset_name in ['mutag', 'mnist0', 'mnist1'] # , 'bm_mn', 'bm_ms', 'bm_mt'\n\n if batch_size_test is None:\n batch_size_test = batch_size\n\n elif dataset_name == 'mutag':\n dataset = Mutag(root=data_dir + '/mutag')\n dataset.data.y = dataset.data.y.squeeze()\n dataset.data.y = 1 - dataset.data.y # we make the original class \"0\" as anomalies here\n split_idx = get_random_split_idx(dataset, random_state)\n loaders = get_loaders_mutag(batch_size, batch_size_test, dataset=dataset, split_idx=split_idx)\n num_feat = dataset.data.x.shape[1]\n num_edge_feat = 0\n\n elif dataset_name in ['mnist0', 'mnist1']:\n num_train, num_test_normal, num_test_anomaly = 1000, 400, 100\n if dataset_name == 'mnist0':\n normal_class = 0\n else:\n normal_class = 1\n train = MNIST75sp(root=data_dir + '/mnist', mode='train')\n test = MNIST75sp(root=data_dir + '/mnist', mode='test')\n loaders = get_loaders_mnist(batch_size, batch_size_test, train, test,\n normal_class, num_train, num_test_normal, num_test_anomaly, random_state)\n num_feat = train.data.x.shape[1]\n num_edge_feat = 0\n\n elif 'bm' in dataset_name:\n pattern = dataset_name[3:]\n transform = T.Compose([T.ToUndirected()])\n train = BM(root=data_dir + '/' + dataset_name, pattern=pattern, mode='train', pre_transform=transform)\n test = BM(root=data_dir + '/' + dataset_name, pattern=pattern, mode='test', pre_transform=transform)\n loaders = get_loaders_bm(batch_size, batch_size_test, train, test)\n num_feat = train.data.x.shape[1]\n num_edge_feat = 8\n\n meta = {'num_feat':num_feat, 'num_edge_feat':num_edge_feat}\n\n return loaders, meta"
},
{
"identifier": "get_ad_split_TU",
"path": "get_data_loaders_tuad.py",
"snippet": "def get_ad_split_TU(args, fold=5):\n DS = args.dataset\n path = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', DS)\n dataset = TUDataset(path, name=DS)\n data_list = []\n label_list = []\n\n for data in dataset:\n data_list.append(data)\n label_list.append(data.y.item())\n\n kfd = StratifiedKFold(n_splits=fold, random_state=0, shuffle=True)\n\n splits = []\n for k, (train_index, test_index) in enumerate(kfd.split(data_list, label_list)):\n splits.append((train_index, test_index))\n\n return splits"
},
{
"identifier": "get_data_loaders_TU",
"path": "get_data_loaders_tuad.py",
"snippet": "def get_data_loaders_TU(args, split):\n DS = args.dataset\n\n path = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', DS)\n\n if DS in ['IMDB-BINARY', 'REDDIT-BINARY', 'COLLAB']:\n dataset = TUDataset(path, name=DS, transform=(Constant(1, cat=False)))\n else:\n dataset = TUDataset(path, name=DS)\n\n dataset_num_features = dataset.num_node_features\n\n data_list = []\n label_list = []\n\n for data in dataset:\n data.edge_attr = None\n data_list.append(data)\n label_list.append(data.y.item())\n\n (train_index, test_index) = split\n data_train_ = [data_list[i] for i in train_index]\n data_test = [data_list[i] for i in test_index]\n\n data_train = []\n for data in data_train_:\n if data.y != 0:\n data_train.append(data)\n\n idx = 0\n for data in data_train:\n data.y = 0\n data['idx'] = idx\n idx += 1\n\n for data in data_test:\n data.y = 1 if data.y == 0 else 0\n\n dataloader = DataLoader(data_train, batch_size=args.batch_size, shuffle=True)\n dataloader_test = DataLoader(data_test, batch_size=args.batch_size_test, shuffle=True)\n meta = {'num_feat':dataset_num_features, 'num_train':len(data_train), 'num_edge_feat':0}\n loader_dict = {'train': dataloader, 'test': dataloader_test}\n\n return loader_dict, meta"
}
] | import torch
import numpy as np
import torch.nn as nn
import random
import warnings
from sklearn.metrics import roc_auc_score
from models import GIN, Explainer_GIN, HyperGNN, Explainer_MLP
from arguments import arg_parse
from get_data_loaders import get_data_loaders
from get_data_loaders_tuad import get_ad_split_TU, get_data_loaders_TU | 3,513 |
warnings.filterwarnings("ignore")
explainable_datasets = ['mutag', 'mnist0', 'mnist1', 'bm_mn', 'bm_ms', 'bm_mt']
class SIGNET(nn.Module):
def __init__(self, input_dim, input_dim_edge, args, device):
super(SIGNET, self).__init__()
self.device = device
self.embedding_dim = args.hidden_dim
if args.readout == 'concat':
self.embedding_dim *= args.encoder_layers
if args.explainer_model == 'mlp':
|
warnings.filterwarnings("ignore")
explainable_datasets = ['mutag', 'mnist0', 'mnist1', 'bm_mn', 'bm_ms', 'bm_mt']
class SIGNET(nn.Module):
def __init__(self, input_dim, input_dim_edge, args, device):
super(SIGNET, self).__init__()
self.device = device
self.embedding_dim = args.hidden_dim
if args.readout == 'concat':
self.embedding_dim *= args.encoder_layers
if args.explainer_model == 'mlp': | self.explainer = Explainer_MLP(input_dim, args.explainer_hidden_dim, args.explainer_layers) | 3 | 2023-10-18 04:23:35+00:00 | 4k |
smonsays/metax | metax/data/synthetic.py | [
{
"identifier": "DatasetGenerator",
"path": "metax/data/dataset/base.py",
"snippet": "class DatasetGenerator(abc.ABC):\n \"\"\"\n Abstract base class for generated datasets.\n\n Attributes:\n input_shape (tuple): The shape of the input data.\n output_dim (int): The dimensionality of the output data.\n \"\"\"\n def __init__(self, input_shape: Tuple[int], output_dim: int) -> None:\n self.input_shape = input_shape\n self.output_dim = output_dim\n\n @abc.abstractmethod\n def sample(self, rng: chex.PRNGKey, num_tasks: int, num_samples: int, mode: str) -> Dataset:\n \"\"\"\n Generate a batch of tasks.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator to use.\n num_tasks (int): The number of tasks to generate.\n num_samples (int): The number of samples per task.\n mode (str): The mode of the generated data (e.g. 'train', 'test', 'ood').\n\n Returns:\n A namedtuple `Dataset` (x, y) containing the input and output data for the generated tasks.\n x has shape (num_tasks, num_samples) + input_shape.\n y has shape (num_tasks, num_samples, output_dim).\n \"\"\"\n pass"
},
{
"identifier": "Dataloader",
"path": "metax/data/base.py",
"snippet": "class Dataloader(abc.ABC):\n def __init__(self, input_shape: Tuple[int], output_dim: int):\n self.input_shape = input_shape\n self.output_dim = output_dim\n\n @abc.abstractproperty\n def __len__(self):\n pass\n\n @abc.abstractproperty\n def sample_input(self):\n # Sample input should include batch dimension\n pass\n\n @abc.abstractmethod\n def __iter__(self):\n pass"
},
{
"identifier": "MetaDataset",
"path": "metax/data/base.py",
"snippet": "class MetaDataset(NamedTuple):\n train: Union[Dataset, MultitaskDataset]\n test: Union[Dataset, MultitaskDataset]"
},
{
"identifier": "family",
"path": "metax/data/dataset/family.py",
"snippet": "class Family(DatasetGenerator):\nclass Harmonic(Family):\nclass Linear(Family):\nclass Polynomial(Family):\nclass Sawtooth(Family):\nclass SinusoidFamily(Family):\n def __init__(self, fun_types: List = [\"harm\", \"lin\", \"poly\", \"saw\", \"sine\"]):\n def _rescale_linear(self, x, old_min, old_max, new_min, new_max):\n def sample_harmonics(self, rng, num_tasks, num_samples):\n def harmonic(input, a1, a2, b1, b2, frequency):\n def sample_linears(self, rng, num_tasks, num_samples):\n def linear(input, intercept, slope):\n def sample_polynomials(self, rng, num_tasks, num_samples):\n def polynomial(input, a, b, c):\n def sample_sawtooths(self, rng, num_tasks, num_samples):\n def sawtooth(input, amplitude, phase, width=1):\n def sample_sinusoids(self, rng, num_tasks, num_samples):\n def sinusoid(input, amplitude, phase):\n def sample(self, rng, num_tasks, num_samples, mode=None):\n def __init__(self):\n def __init__(self):\n def __init__(self):\n def __init__(self):\n def __init__(self):"
},
{
"identifier": "sinusoid",
"path": "metax/data/dataset/sinusoid.py",
"snippet": "@jnp.vectorize\ndef sinusoid(inputs, amplitude, phase):\n targets = amplitude * jnp.sin(inputs + phase)\n\n return targets"
},
{
"identifier": "create_metadataset",
"path": "metax/data/utils.py",
"snippet": "def create_metadataset(dataset: Union[Dataset, MultitaskDataset], shots):\n \"\"\"\n Split data into train and test set and create batches of tasks on leading axis.\n\n Args:\n shots: Number of samples used for train (support) set\n \"\"\"\n # Split all leafs into train and test shots\n dataset_train = jtu.tree_map(\n lambda x: jnp.split(x, indices_or_sections=(shots, ), axis=1)[0], dataset\n )\n dataset_test = jtu.tree_map(\n lambda x: jnp.split(x, indices_or_sections=(shots, ), axis=1)[1], dataset\n )\n\n return MetaDataset(train=dataset_train, test=dataset_test)"
}
] | import logging
import chex
import jax
from typing import List, Optional
from metax.data.dataset.base import DatasetGenerator
from .base import Dataloader, MetaDataset
from .dataset import family, sinusoid
from .utils import create_metadataset | 1,901 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class SyntheticMetaDataloader(Dataloader):
def __init__(
self,
data_generator: DatasetGenerator,
num_tasks: int,
shots_train: int,
shots_test: int,
meta_batch_size: int,
mode: str,
train_test_split: bool,
rng: chex.PRNGKey,
):
super().__init__(
input_shape=data_generator.input_shape,
output_dim=data_generator.output_dim
)
self.data_generator = data_generator
self.num_tasks = num_tasks
self.shots_train = shots_train
self.shots_test = shots_test
self.meta_batch_size = meta_batch_size
self.mode = mode
self.train_test_split = train_test_split
self.fixed_rng = rng
assert train_test_split or mode == "train", "mode must be train if train_test_split is False"
assert num_tasks % meta_batch_size == 0, "num_tasks must be divisible by meta_batch_size"
self.num_steps = num_tasks // meta_batch_size
self.shots = shots_train + shots_test
# Sample data to get placeholder_input
self._sample_input = self.data_generator.sample(
self.fixed_rng, 1, self.shots_train, mode="train"
).x[0]
@property
def sample_input(self):
return self._sample_input
def __len__(self):
return self.num_steps
def __iter__(self):
for rng in jax.random.split(self.fixed_rng, self.num_steps):
dataset = self.data_generator.sample(rng, self.meta_batch_size, self.shots, mode=self.mode)
if self.train_test_split:
# Split into train and test set
yield create_metadataset(dataset, self.shots_train)
else:
# No train_test split means, meta.train == meta.test set
yield MetaDataset(train=dataset, test=dataset)
def create_synthetic_metadataset(
name,
meta_batch_size,
shots_train,
shots_test,
train_test_split,
num_tasks_train,
num_tasks_test,
num_tasks_valid,
num_tasks_ood: Optional[int] = None,
ood_sets_hot: Optional[List[int]] = None,
seed: int = 0,
**kwargs,
):
if name == "family":
data_generator = family.Family()
elif name == "harmonic":
data_generator = family.Harmonic()
elif name == "linear":
data_generator = family.Linear()
elif name == "polynomial":
data_generator = family.Polynomial()
elif name == "sawtooth":
data_generator = family.Sawtooth()
elif name == "sinusoid_family":
data_generator = family.SinusoidFamily()
elif name == "sinusoid":
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class SyntheticMetaDataloader(Dataloader):
def __init__(
self,
data_generator: DatasetGenerator,
num_tasks: int,
shots_train: int,
shots_test: int,
meta_batch_size: int,
mode: str,
train_test_split: bool,
rng: chex.PRNGKey,
):
super().__init__(
input_shape=data_generator.input_shape,
output_dim=data_generator.output_dim
)
self.data_generator = data_generator
self.num_tasks = num_tasks
self.shots_train = shots_train
self.shots_test = shots_test
self.meta_batch_size = meta_batch_size
self.mode = mode
self.train_test_split = train_test_split
self.fixed_rng = rng
assert train_test_split or mode == "train", "mode must be train if train_test_split is False"
assert num_tasks % meta_batch_size == 0, "num_tasks must be divisible by meta_batch_size"
self.num_steps = num_tasks // meta_batch_size
self.shots = shots_train + shots_test
# Sample data to get placeholder_input
self._sample_input = self.data_generator.sample(
self.fixed_rng, 1, self.shots_train, mode="train"
).x[0]
@property
def sample_input(self):
return self._sample_input
def __len__(self):
return self.num_steps
def __iter__(self):
for rng in jax.random.split(self.fixed_rng, self.num_steps):
dataset = self.data_generator.sample(rng, self.meta_batch_size, self.shots, mode=self.mode)
if self.train_test_split:
# Split into train and test set
yield create_metadataset(dataset, self.shots_train)
else:
# No train_test split means, meta.train == meta.test set
yield MetaDataset(train=dataset, test=dataset)
def create_synthetic_metadataset(
name,
meta_batch_size,
shots_train,
shots_test,
train_test_split,
num_tasks_train,
num_tasks_test,
num_tasks_valid,
num_tasks_ood: Optional[int] = None,
ood_sets_hot: Optional[List[int]] = None,
seed: int = 0,
**kwargs,
):
if name == "family":
data_generator = family.Family()
elif name == "harmonic":
data_generator = family.Harmonic()
elif name == "linear":
data_generator = family.Linear()
elif name == "polynomial":
data_generator = family.Polynomial()
elif name == "sawtooth":
data_generator = family.Sawtooth()
elif name == "sinusoid_family":
data_generator = family.SinusoidFamily()
elif name == "sinusoid": | data_generator = sinusoid.Sinusoid() | 4 | 2023-10-19 16:36:20+00:00 | 4k |
claws-lab/XLingEval | correctness/correctness_get_gpt_answer.py | [
{
"identifier": "load_HealthQA",
"path": "dataloader/load_data.py",
"snippet": "def load_HealthQA(split: str, language: str = 'English', task: str = \"consistency\"):\n print(f\"Loading HealthQA with split {split} and Language {language} ...\")\n\n if osp.basename(os.getcwd()) == \"XLingHealth_Dataset\":\n path = \"HealthQA.xlsx\"\n\n else:\n path = osp.join(\"XLingHealth_Dataset\", \"HealthQA.xlsx\")\n\n raw_df = pd.read_excel(path, sheet_name=language)\n\n if task == \"verifiability\":\n return raw_df\n\n elif task in [\"consistency\", \"correctness\"]:\n df = raw_df[raw_df[\"label\"] == 1]\n return df\n\n else:\n raise ValueError(f\"Unknown task {task}\")"
},
{
"identifier": "load_MedicationQA",
"path": "dataloader/load_data.py",
"snippet": "def load_MedicationQA(language: str = \"English\", task: str = \"consistency\"):\n\n if osp.basename(os.getcwd()) == \"XLingHealth_Dataset\":\n path = \"MedicationQA.xlsx\"\n\n else:\n path = osp.join(\"XLingHealth_Dataset\", \"MedicationQA.xlsx\")\n\n raw_df = pd.read_excel(path, sheet_name=language)\n\n if task == \"verifiability\":\n raw_df[\"neg_sample\"] = [[x[const.ID]] + eval(x[\"neg_sample\"]) for _, x in raw_df.iterrows()]\n df = raw_df.explode(\"neg_sample\")\n df.drop(const.ID, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n # LiveQA does not provide negative samples, so we do negative sampling here.\n df[const.LABEL] = [1 if i % 5 == 0 else 0 for i in range(len(df))]\n df[const.ANSWER] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER].reset_index(drop=True)\n\n if language != \"English\":\n df[const.ANSWER_TRANSLATED] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER_TRANSLATED].reset_index(drop=True)\n\n\n return df\n\n else:\n return raw_df"
},
{
"identifier": "load_LiveQA",
"path": "dataloader/load_data.py",
"snippet": "def load_LiveQA(language=\"English\", task: str = \"consistency\"):\n if osp.basename(os.getcwd()) == \"XLingHealth_Dataset\":\n path = \"LiveQA.xlsx\"\n\n else:\n path = osp.join(\"XLingHealth_Dataset\", \"LiveQA.xlsx\")\n\n raw_df = pd.read_excel(path, sheet_name=language)\n\n if task == \"verifiability\":\n raw_df[\"neg_sample\"] = [[x[const.ID]] + eval(x[\"neg_sample\"]) for _, x in raw_df.iterrows()]\n df = raw_df.explode(\"neg_sample\")\n df.drop(const.ID, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n # LiveQA does not provide negative samples, so we do negative sampling here.\n df[const.LABEL] = [1 if i % 5 == 0 else 0 for i in range(len(df))]\n df[const.ANSWER] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER].reset_index(drop=True)\n\n if language != \"English\":\n df[const.ANSWER_TRANSLATED] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER_TRANSLATED].reset_index(drop=True)\n\n return df\n\n else:\n return raw_df"
},
{
"identifier": "set_constants",
"path": "const.py",
"snippet": "GPT_MODEL = \"gpt-3.5-turbo\"\nTEMPERATURES = [0.0, 0.25, 0.5, 0.75, 1.0]\nLANGUAGES = [\"English\", \"Spanish\", \"Chinese\", \"Hindi\"]\nLANGUAGE_CODES = [\"en\", \"es\", \"zh\", \"hi\"]\nHOME_DIR_LINUX = \"/home/ahren\"\nHOME_DIR_LINUX_SERVER = \"/nethome/yjin328\"\nID = \"id\"\nOPTION = \"option\"\nQUESTION = \"question\"\nQTEXT = \"qtext\"\nQUESTION_TRANSLATED = \"question_translated\"\nANSWER = \"answer\"\nANSWER_TRANSLATED = \"answer_translated\"\nERROR = \"error\"\nLABEL = \"label\"\nLANGUAGE = \"language\"\nTEMPERATURE = \"temperature\"\nPRED = \"pred\"\nPRED_BINARY = \"pred_binary\"\nPOSITIVE = \"positive\"\nNEGATIVE = \"negative\"\nLABEL2ID = {\n POSITIVE: 1,\n NEGATIVE: 0,\n}\nDATASET2LENGTH = {\n \"healthqa\": 1134,\n \"liveqa\": 245,\n \"medicationqa\": 690,\n}\nCONSISTENCY_METRICS_SIMILARITY = [\"bert_sim\",\n \"bertscore_P\",\n \"bertscore_R\", \"bertscore_F1\", \"unigram_jaccard\",\n \"bigram_jaccard\", \"length_mean\", \"length_std\"]\nMETRIC_NAME2FULLNAME = {\n \"macro_precision\": \"Macro Precision\",\n \"macro_recall\": \"Macro Recall\",\n \"macro_f1\": \"Macro F1\",\n \"accuracy\": \"Accuracy\",\n \"auc\": \"AUC\",\n 'bert_sim': r\"$\\mathrm{sim}_{\\mathrm{sent}}$\",\n 'bertscore_P': \"BERTScore (Precision)\",\n 'bertscore_R': \"BERTScore (Recall)\",\n 'bertscore_F1': r\"$\\mathrm{BERTScore}$\",\n 'unigram_jaccard': r\"$\\mathrm{sim}_{\\mathrm{1-gram}}$\",\n 'bigram_jaccard': r\"$\\mathrm{sim}_{\\mathrm{2-gram}}$\",\n 'length_mean': \"Length\",\n 'length_std': \"Std. of Length\",\n 'hdp_mean': r\"$\\mathrm{sim}_{\\mathrm{HDP}}$\", # \"Avg. Topical Similarity (HDP)\",\n 'lda20_mean': r\"$\\mathrm{sim}_{\\mathrm{LDA}}^{20}$\", # \"Avg. Topical Similarity (LDA w/ 20 Topics)\",\n 'hdp_std': \"Std. Topical Similarity (HDP)\",\n}\nVERIFIABILITY_METRICS_VISUALIZATION = [\"macro_precision\", \"macro_recall\", \"macro_f1\",\n \"accuracy\", \"auc\"]\nCONSISTENCY_METRICS_TOPIC_MODELING = [\"hdp_mean\", \"hdp_std\"]\nLANG2SHORT = {\n \"English\": \"en\",\n \"Chinese\": \"zh-cn\",\n \"Hindi\": \"hi-in\",\n \"Spanish\": \"es\",\n}\nMNLI_LABEL2ID = {\n \"entailment\": 0,\n \"neutral\": 1,\n \"contradiction\": 2,\n}\nTRANSLATE = \"translate\"\nMEDICAL = \"medical\"\nPARAPHRASE = \"paraphrase\"\nTRAIN = \"train\"\nDEV = \"dev\"\nTEST = \"test\"\nCHINESE_HINDI_PUNCTUATION = \",。?!;:।॥\"\nCONFIDENCE_LIKERT_SCALE = \"\"\"\nConfidence of your evaluation: \nVery confident (5): I have checked all aspects of the input sentences thoroughly. I am absolutely certain of my evaluation.\nQuite confident (4): I am quite confident about my evaluation. It is unlikely, though possible, that I missed some elements that could otherwise have impacted my evaluation.\nSomewhat confident (3): I am moderately confident about my evaluation. There is a chance I missed some aspects.\nNot very confident (2): I am not very confident about my evaluation. I am able to defend my evaluation, but it is quite likely that I missed or did not understand some key details of the inputs.\nNot confident (1): My evaluation is an educated guess.\n\"\"\""
}
] | import os
import time
import traceback
import sys
import pandas as pd
from os import path as osp
from dataloader.load_data import load_HealthQA, load_MedicationQA, load_LiveQA
from setup import project_setup, openai_setup
from utils_chatgpt import get_response
from const import set_constants
from argparse import ArgumentParser | 2,794 |
llm_answer_list = []
for idx, row in data_df.iterrows():
retry = True
if idx%100 == 0:
print("Index: ", idx)
while retry:
try:
message_list=[{'role': 'system', 'content': f'You are Health GPT and You answer to health and medical related queries in {lang}. Your answers should be in one or more paragraphs without listing points/lists and should be in {lang}.'}]
messages = message_list.copy()
if lang=="English":
prompt = prompt = row['question']
else:
prompt = row['translated_question_'+lang]
messages.append({'role': 'user', 'content': prompt})
llm_response = get_response(open_ai_object_list[model_list_index], messages, constants['GPT_MODEL'], constants['DEPLOYMENT_ID'])
llm_answer_list.append(llm_response)
time.sleep(0.3)
retry = False
model_use_count += 1
if model_use_count%3 == 0:
model_list_index += 1
model_list_index = model_list_index%total_models_num
model_use_count = 0
except Exception as e:
print("Error at index: ", idx)
traceback.print_exc()
model_list_index += 1
model_list_index = model_list_index%total_models_num
model_use_count = 0
print("Error: ", e)
#check if the error contains the substring Request timed out: HTTPSConnectionPool or rate limit
if "Request timed out: HTTPSConnectionPool" in str(e) or "rate limit" in str(e) or "timed out" or "No Response" in str(e):
print("Sleeping for 10 seconds")
time.sleep(10)
continue
else:
if llm_response:
llm_answer_list.append(llm_response)
else:
if "This model's maximum context length is 8192 tokens" in str(e):
llm_answer_list.append("Max Context Length Exceeded")
else:
llm_answer_list.append(str(e))
print("LLM Response: ", llm_response)
retry = False
continue
data_df["llm_answer_"+lang] = llm_answer_list
return data_df
if __name__ == "__main__":
#add an argument to get the dataset path
parser = ArgumentParser()
parser.add_argument("--dataset_path", type=str, required=True)
parser.add_argument("--model", type=str, required=True)
args = parser.parse_args()
dataset_path = args.dataset_path
model = args.model
lang_list = ["English", "Hindi", "Chinese", "Spanish"]
file_name = dataset_path.split(".")[0]
#get the directory path from the dataset path
directory_path = "/".join(dataset_path.split("/")[:-1])
os.chdir(directory_path)
print("Current working directory: {}".format(os.getcwd()))
constants = set_constants(model)
print(constants)
project_setup()
open_ai_object_list = openai_setup()
total_models_num = len(open_ai_object_list)
#if dataset path ends with pkl, then use pandas read_pickle method
if dataset_path.endswith("pkl"):
data_df = pd.read_pickle(dataset_path)
else:
df_li = []
for lang in lang_list:
if "HealthQA" in args.dataset_path:
# Only consider the dev set for HealthQA
df = load_HealthQA(split="dev",
language=lang, task="correctness")
elif "MedicationQA" in args.dataset_path:
df = load_MedicationQA(language=lang, task="correctness")
elif "LiveQA" in args.dataset_path:
| sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
def get_eval(data_df, lang, open_ai_object_list, constants):
print("Lang: ", lang)
model_use_count = 0
model_list_index = 0
llm_answer_list = []
for idx, row in data_df.iterrows():
retry = True
if idx%100 == 0:
print("Index: ", idx)
while retry:
try:
message_list=[{'role': 'system', 'content': f'You are Health GPT and You answer to health and medical related queries in {lang}. Your answers should be in one or more paragraphs without listing points/lists and should be in {lang}.'}]
messages = message_list.copy()
if lang=="English":
prompt = prompt = row['question']
else:
prompt = row['translated_question_'+lang]
messages.append({'role': 'user', 'content': prompt})
llm_response = get_response(open_ai_object_list[model_list_index], messages, constants['GPT_MODEL'], constants['DEPLOYMENT_ID'])
llm_answer_list.append(llm_response)
time.sleep(0.3)
retry = False
model_use_count += 1
if model_use_count%3 == 0:
model_list_index += 1
model_list_index = model_list_index%total_models_num
model_use_count = 0
except Exception as e:
print("Error at index: ", idx)
traceback.print_exc()
model_list_index += 1
model_list_index = model_list_index%total_models_num
model_use_count = 0
print("Error: ", e)
#check if the error contains the substring Request timed out: HTTPSConnectionPool or rate limit
if "Request timed out: HTTPSConnectionPool" in str(e) or "rate limit" in str(e) or "timed out" or "No Response" in str(e):
print("Sleeping for 10 seconds")
time.sleep(10)
continue
else:
if llm_response:
llm_answer_list.append(llm_response)
else:
if "This model's maximum context length is 8192 tokens" in str(e):
llm_answer_list.append("Max Context Length Exceeded")
else:
llm_answer_list.append(str(e))
print("LLM Response: ", llm_response)
retry = False
continue
data_df["llm_answer_"+lang] = llm_answer_list
return data_df
if __name__ == "__main__":
#add an argument to get the dataset path
parser = ArgumentParser()
parser.add_argument("--dataset_path", type=str, required=True)
parser.add_argument("--model", type=str, required=True)
args = parser.parse_args()
dataset_path = args.dataset_path
model = args.model
lang_list = ["English", "Hindi", "Chinese", "Spanish"]
file_name = dataset_path.split(".")[0]
#get the directory path from the dataset path
directory_path = "/".join(dataset_path.split("/")[:-1])
os.chdir(directory_path)
print("Current working directory: {}".format(os.getcwd()))
constants = set_constants(model)
print(constants)
project_setup()
open_ai_object_list = openai_setup()
total_models_num = len(open_ai_object_list)
#if dataset path ends with pkl, then use pandas read_pickle method
if dataset_path.endswith("pkl"):
data_df = pd.read_pickle(dataset_path)
else:
df_li = []
for lang in lang_list:
if "HealthQA" in args.dataset_path:
# Only consider the dev set for HealthQA
df = load_HealthQA(split="dev",
language=lang, task="correctness")
elif "MedicationQA" in args.dataset_path:
df = load_MedicationQA(language=lang, task="correctness")
elif "LiveQA" in args.dataset_path: | df = load_LiveQA(language=lang, task="correctness") | 2 | 2023-10-18 17:35:42+00:00 | 4k |
RF-Tar-Railt/satori-python | src/satori/model.py | [
{
"identifier": "Element",
"path": "src/satori/element.py",
"snippet": "class Element:\n @classmethod\n def from_raw(cls: Type[TE], raw: RawElement) -> TE:\n _fields = {f.name for f in fields(cls)}\n attrs = {k: v for k, v in raw.attrs.items() if k in _fields}\n result = cls(**attrs) # type: ignore\n for k, v in raw.attrs.items():\n if k not in _fields:\n setattr(result, k, v)\n return result\n\n def get_type(self) -> str:\n return self.__class__.__name__.lower()\n\n def __str__(self) -> str:\n def _attr(key: str, value: Any):\n if value is True:\n return key\n if value is False:\n return f\"no-{key}\"\n if isinstance(value, (int, float)):\n return f\"{key}={value}\"\n return f'{key}=\"{escape(str(value))}\"'\n\n attrs = \" \".join(_attr(k, v) for k, v in vars(self).items() if not k.startswith(\"_\"))\n attrs = f\" {attrs}\" if attrs else \"\"\n return f\"<{self.get_type()}{attrs}/>\""
},
{
"identifier": "transform",
"path": "src/satori/element.py",
"snippet": "def transform(elements: List[RawElement]) -> List[Element]:\n msg = []\n for elem in elements:\n if elem.type in ELEMENT_TYPE_MAP:\n seg_cls = ELEMENT_TYPE_MAP[elem.type]\n msg.append(seg_cls.from_raw(elem))\n elif elem.type in (\"a\", \"link\"):\n msg.append(Link.from_raw(elem))\n elif elem.type == \"button\":\n msg.append(Button.from_raw(elem))\n elif elem.type in STYLE_TYPE_MAP:\n seg_cls = STYLE_TYPE_MAP[elem.type]\n msg.append(seg_cls.from_raw(elem))\n elif elem.type in (\"br\", \"newline\"):\n msg.append(Br(\"\\n\"))\n elif elem.type == \"message\":\n msg.append(Message.from_raw(elem)(*transform(elem.children)))\n elif elem.type == \"quote\":\n msg.append(Quote.from_raw(elem)(*transform(elem.children)))\n else:\n msg.append(Custom(elem.type, elem.attrs)(*transform(elem.children)))\n return msg"
},
{
"identifier": "parse",
"path": "src/satori/parser.py",
"snippet": "def parse(src: str):\n tokens: List[Union[Token, RawElement]] = []\n\n def push_text(text: str):\n if text:\n tokens.append(RawElement(type=\"text\", attrs={\"text\": text}))\n\n def parse_content(source: str):\n push_text(unescape(source))\n\n while tag_map := tag_pat.search(src):\n parse_content(src[: tag_map.start()])\n src = src[tag_map.end() :]\n if tag_map.group(0).startswith(\"<!--\"):\n continue\n close, tag, attr_str, empty = tag_map.groups()\n tkn = Token(\n type=tag or \"template\",\n close=close,\n empty=empty,\n attrs={},\n source=tag_map.group(0),\n )\n while attr_map := attr_pat.search(attr_str):\n key, value1, value2 = attr_map.groups()\n value = value1 or value2\n if value:\n tkn.attrs[key] = unescape(value)\n elif key.startswith(\"no-\"):\n tkn.attrs[key] = False\n else:\n tkn.attrs[key] = True\n attr_str = attr_str[attr_map.end() :]\n tokens.append(tkn)\n\n parse_content(src)\n\n stack = [RawElement(type=\"template\")]\n\n def rollback(i: int):\n while i:\n child = stack.pop(0)\n source = stack[0].children.pop(-1)\n stack[0].children.append(RawElement(type=\"text\", attrs={\"text\": source}))\n stack[0].children.extend(child.children)\n i -= 1\n\n for tkn in tokens:\n if isinstance(tkn, RawElement):\n stack[0].children.append(tkn)\n elif tkn.close:\n index = 0\n while index < len(stack) and stack[index].type != tkn.type:\n index += 1\n if index == len(stack):\n stack[0].children.append(RawElement(type=\"text\", attrs={\"text\": tkn.source}))\n else:\n rollback(index)\n elm = stack.pop(0)\n elm.source = None\n else:\n elm = RawElement(type=tkn.type, attrs=tkn.attrs)\n stack[0].children.append(elm)\n if not tkn.empty:\n elm.source = tkn.source\n stack.insert(0, elm)\n\n rollback(len(stack) - 1)\n return stack[0].children"
}
] | from dataclasses import asdict, dataclass
from datetime import datetime
from enum import IntEnum
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
from .element import Element, transform
from .parser import parse | 2,337 | data["user"] = User.parse(raw["user"])
if "joined_at" in raw:
data["joined_at"] = datetime.fromtimestamp(int(raw["joined_at"]) / 1000)
return cls(**data)
def dump(self):
res = {}
if self.user:
res["user"] = self.user.dump()
if self.nick or self.name:
res["nick"] = self.nick or self.name
if self.avatar:
res["avatar"] = self.avatar
if self.joined_at:
res["joined_at"] = int(self.joined_at.timestamp() * 1000)
return res
@dataclass
class Role:
id: str
name: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
return cls(**raw)
def dump(self):
res = {"id": self.id}
if self.name:
res["name"] = self.name
return res
class LoginStatus(IntEnum):
OFFLINE = 0
ONLINE = 1
CONNECT = 2
DISCONNECT = 3
RECONNECT = 4
@dataclass
class Login:
status: LoginStatus
user: Optional[User] = None
self_id: Optional[str] = None
platform: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
data = raw.copy()
if "user" in raw:
data["user"] = User(**raw["user"])
data["status"] = LoginStatus(data["status"])
return cls(**data)
def dump(self):
res: Dict[str, Any] = {"status": self.status.value}
if self.user:
res["user"] = self.user.dump()
if self.self_id:
res["self_id"] = self.self_id
if self.platform:
res["platform"] = self.platform
return res
@dataclass
class ArgvInteraction:
name: str
arguments: list
options: Any
def dump(self):
return asdict(self)
@dataclass
class ButtonInteraction:
id: str
def dump(self):
return asdict(self)
class Opcode(IntEnum):
EVENT = 0
PING = 1
PONG = 2
IDENTIFY = 3
READY = 4
@dataclass
class Identify:
token: Optional[str] = None
sequence: Optional[int] = None
@dataclass
class Ready:
logins: List[Login]
@dataclass
class MessageObject:
id: str
content: List[Element]
channel: Optional[Channel] = None
guild: Optional[Guild] = None
member: Optional[Member] = None
user: Optional[User] = None
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
@classmethod
def parse(cls, raw: dict):
data = {
"id": raw["id"],
|
class ChannelType(IntEnum):
TEXT = 0
VOICE = 1
CATEGORY = 2
DIRECT = 3
@dataclass
class Channel:
id: str
type: ChannelType
name: Optional[str] = None
parent_id: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
data = raw.copy()
data["type"] = ChannelType(raw["type"])
return cls(**data)
def dump(self):
res = {"id": self.id, "type": self.type.value}
if self.name:
res["name"] = self.name
if self.parent_id:
res["parent_id"] = self.parent_id
return res
@dataclass
class Guild:
id: str
name: Optional[str] = None
avatar: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
return cls(**raw)
def dump(self):
res = {"id": self.id}
if self.name:
res["name"] = self.name
if self.avatar:
res["avatar"] = self.avatar
return res
@dataclass
class User:
id: str
name: Optional[str] = None
nick: Optional[str] = None
avatar: Optional[str] = None
is_bot: Optional[bool] = None
@classmethod
def parse(cls, raw: dict):
return cls(**raw)
def dump(self):
res: Dict[str, Any] = {"id": self.id}
if self.name:
res["name"] = self.name
if self.nick:
res["nick"] = self.nick
if self.avatar:
res["avatar"] = self.avatar
if self.is_bot:
res["is_bot"] = self.is_bot
return res
@dataclass
class Member:
user: Optional[User] = None
nick: Optional[str] = None
name: Optional[str] = None
avatar: Optional[str] = None
joined_at: Optional[datetime] = None
@classmethod
def parse(cls, raw: dict):
data = raw.copy()
if "user" in raw:
data["user"] = User.parse(raw["user"])
if "joined_at" in raw:
data["joined_at"] = datetime.fromtimestamp(int(raw["joined_at"]) / 1000)
return cls(**data)
def dump(self):
res = {}
if self.user:
res["user"] = self.user.dump()
if self.nick or self.name:
res["nick"] = self.nick or self.name
if self.avatar:
res["avatar"] = self.avatar
if self.joined_at:
res["joined_at"] = int(self.joined_at.timestamp() * 1000)
return res
@dataclass
class Role:
id: str
name: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
return cls(**raw)
def dump(self):
res = {"id": self.id}
if self.name:
res["name"] = self.name
return res
class LoginStatus(IntEnum):
OFFLINE = 0
ONLINE = 1
CONNECT = 2
DISCONNECT = 3
RECONNECT = 4
@dataclass
class Login:
status: LoginStatus
user: Optional[User] = None
self_id: Optional[str] = None
platform: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
data = raw.copy()
if "user" in raw:
data["user"] = User(**raw["user"])
data["status"] = LoginStatus(data["status"])
return cls(**data)
def dump(self):
res: Dict[str, Any] = {"status": self.status.value}
if self.user:
res["user"] = self.user.dump()
if self.self_id:
res["self_id"] = self.self_id
if self.platform:
res["platform"] = self.platform
return res
@dataclass
class ArgvInteraction:
name: str
arguments: list
options: Any
def dump(self):
return asdict(self)
@dataclass
class ButtonInteraction:
id: str
def dump(self):
return asdict(self)
class Opcode(IntEnum):
EVENT = 0
PING = 1
PONG = 2
IDENTIFY = 3
READY = 4
@dataclass
class Identify:
token: Optional[str] = None
sequence: Optional[int] = None
@dataclass
class Ready:
logins: List[Login]
@dataclass
class MessageObject:
id: str
content: List[Element]
channel: Optional[Channel] = None
guild: Optional[Guild] = None
member: Optional[Member] = None
user: Optional[User] = None
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
@classmethod
def parse(cls, raw: dict):
data = {
"id": raw["id"], | "content": transform(parse(raw["content"])), | 1 | 2023-10-18 11:09:34+00:00 | 4k |
zju3dv/nr_in_a_room | tools/check_pose.py | [
{
"identifier": "create_sphere_lookat_poses",
"path": "data_gen/data_geo_utils.py",
"snippet": "def create_sphere_lookat_poses(\n radius: float, n_poses: int, n_circles: float, up_dir=\"y\", phi_begin=20, phi_end=90\n):\n deg2rad = np.pi / 180\n # y up\n phi_list = np.linspace(phi_begin * deg2rad, phi_end * deg2rad, n_poses)\n theta_list = np.linspace(0, 360 * deg2rad * n_circles, n_poses)\n poses = []\n eyes = []\n for phi, theta in zip(phi_list, theta_list):\n if up_dir == \"y\":\n eye = np.array(\n [\n radius * np.sin(phi) * np.sin(theta),\n radius * np.cos(phi),\n radius * np.sin(phi) * np.cos(theta),\n ]\n )\n pose = lookat(eye, target=[0, 0, 0], up=[0, 1, 0])\n elif up_dir == \"z\":\n eye = np.array(\n [\n radius * np.sin(phi) * np.sin(theta),\n radius * np.sin(phi) * np.cos(theta),\n radius * np.cos(phi),\n ]\n )\n pose = lookat(eye, target=[0, 0, 0], up=[0, 0, 1])\n pose = np.linalg.inv(pose)\n poses += [pose]\n eyes += [eye]\n return poses, eyes"
},
{
"identifier": "O3dVisualizer",
"path": "tools/O3dVisualizer.py",
"snippet": "class O3dVisualizer:\n def __init__(self):\n self.geometries = []\n\n def add_o3d_geometry(self, geometry):\n self.geometries.append(geometry)\n\n def add_line_set(self, points, lines, colors=None, radius=0.008):\n # line_set = o3d.geometry.LineSet(\n # points=o3d.utility.Vector3dVector(points),\n # lines=o3d.utility.Vector2iVector(lines)\n # )\n if colors is None:\n colors = [\n [random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)]\n for i in range(len(lines))\n ]\n # line_set.colors = o3d.utility.Vector3dVector(colors)\n # self.geometries.append(line_set)\n mesh = LineMesh(points, lines, colors, radius=radius)\n self.geometries.extend(mesh.cylinder_segments)\n\n def add_np_points(\n self, points, color=None, size=None, resolution=3, with_normal=False\n ):\n if size == None:\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(points[:, :3])\n pcd = colorize_open3d_pcd(pcd)\n self.geometries.append(pcd)\n else:\n points = points[:, :3]\n mesh = o3d.geometry.TriangleMesh()\n for idx, pt in enumerate(points):\n mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(\n radius=size, resolution=resolution\n )\n if with_normal:\n mesh_sphere.compute_vertex_normals()\n transform = np.eye(4)\n transform[0:3, 3] = pt\n mesh_sphere.transform(transform)\n if type(color) == np.ndarray:\n if color.size == 3:\n mesh_sphere.paint_uniform_color(color)\n else:\n mesh_sphere.paint_uniform_color(color[idx, :])\n else:\n mesh_sphere.paint_uniform_color([1.0, 0.0, 0.0])\n mesh += mesh_sphere\n self.geometries.append(mesh)\n\n def text_3d(\n self,\n text,\n pos,\n direction=None,\n degree=0.0,\n font=\"DejaVu Sans Mono for Powerline.ttf\",\n font_size=16,\n ):\n \"\"\"\n Generate a 3D text point cloud used for visualization.\n :param text: content of the text\n :param pos: 3D xyz position of the text upper left corner\n :param direction: 3D normalized direction of where the text faces\n :param degree: in plane rotation of text\n :param font: Name of the font - change it according to your system\n :param font_size: size of the font\n :return: o3d.geoemtry.PointCloud object\n \"\"\"\n if direction is None:\n direction = (0.0, 0.0, 1.0)\n\n from PIL import Image, ImageFont, ImageDraw\n from pyquaternion import Quaternion\n\n font_obj = ImageFont.truetype(font, font_size)\n font_dim = font_obj.getsize(text)\n\n img = Image.new(\"RGB\", font_dim, color=(255, 255, 255))\n draw = ImageDraw.Draw(img)\n draw.text((0, 0), text, font=font_obj, fill=(0, 0, 0))\n img = np.asarray(img)\n img_mask = img[:, :, 0] < 128\n indices = np.indices([*img.shape[0:2], 1])[:, img_mask, 0].reshape(3, -1).T\n\n pcd = o3d.geometry.PointCloud()\n pcd.colors = o3d.utility.Vector3dVector(img[img_mask, :].astype(float) / 255.0)\n pcd.points = o3d.utility.Vector3dVector(indices / 100.0)\n\n raxis = np.cross([0.0, 0.0, 1.0], direction)\n if np.linalg.norm(raxis) < 1e-6:\n raxis = (0.0, 0.0, 1.0)\n trans = (\n Quaternion(axis=raxis, radians=np.arccos(direction[2]))\n * Quaternion(axis=direction, degrees=degree)\n ).transformation_matrix\n trans[0:3, 3] = np.asarray(pos)\n pcd.transform(trans)\n return pcd\n\n def run_visualize(self):\n o3d.visualization.draw_geometries(self.geometries)"
}
] | import numpy as np
import argparse
import sys
import os
import open3d as o3d
import matplotlib.pyplot as plt
from data_gen.data_geo_utils import create_sphere_lookat_poses
from tools.O3dVisualizer import O3dVisualizer
from utils.util import * | 2,689 |
sys.path.append(os.getcwd()) # noqa
# from datasets.geo_utils import observe_angle_distance
# from render_tools.render_utils import *
def spheric_pose(theta, phi, radius, height):
trans_t = lambda t: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, -0.9 * t],
[0, 0, 1, t],
[0, 0, 0, 1],
]
)
rot_phi = lambda phi: np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
]
)
rot_theta = lambda th: np.array(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
]
)
c2w = rot_theta(theta) @ rot_phi(phi) @ trans_t(radius)
c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w
c2w[2, 3] += height
return c2w[:3]
def create_spheric_poses(radius, downward_deg, height, n_poses):
"""
Create circular poses around z axis.
Inputs:
radius: the (negative) height and the radius of the circle.
Outputs:
spheric_poses: (n_poses, 3, 4) the poses in the circular path
"""
spheric_poses = []
for th in np.linspace(0, 2 * np.pi, n_poses + 1)[:-1]:
pose = np.eye(4)
pose[:3, :4] = spheric_pose(th, -(downward_deg * np.pi / 180), radius, height)
fix_rot = np.eye(4)
fix_rot[:3, :3] = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)
pose = fix_rot @ pose
spheric_poses += [pose] # 36 degree view downwards
return np.stack(spheric_poses, 0)
def draw_poses(visualizer, poses):
camera_centers = []
lines_pt, lines_idx, lines_color = [], [], []
idx = 0
for frame_id, pose in enumerate(poses):
Twc = pose
# for nerf_synthetic, we need some transformation
fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)
Twc[:3, :3] = Twc[:3, :3] @ fix_rot
center = Twc[:3, 3]
camera_centers.append(center)
# draw axis
# RGB -> right, down, forward
axis_size = 0.1
# for .T, you can follow https://stackoverflow.com/questions/12148351/
axis_pts = (Twc[:3, :3] @ (np.eye(3) * axis_size)).T + center
lines_pt += [center, axis_pts[0, :], axis_pts[1, :], axis_pts[2, :]]
lines_idx += [
[idx * 4 + 0, idx * 4 + 1],
[idx * 4 + 0, idx * 4 + 2],
[idx * 4 + 0, idx * 4 + 3],
]
lines_color += [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
idx += 1
# draw line via cylinder, which we can control the line thickness
visualizer.add_line_set(lines_pt, lines_idx, colors=lines_color, radius=0.003)
# draw line via LineSet
# line_set = o3d.geometry.LineSet(
# points=o3d.utility.Vector3dVector(np.array(lines_pt)),
# lines=o3d.utility.Vector2iVector(np.array(lines_idx)),
# )
# line_set.colors = o3d.utility.Vector3dVector(lines_color)
# visualizer.add_o3d_geometry(line_set)
camera_centers = np.array(camera_centers)
visualizer.add_np_points(
camera_centers,
color=map_to_color(np.arange(0, len(poses)), cmap="plasma"),
size=0.01,
)
if __name__ == "__main__":
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument("--pcd", default=None)
args = parser.parse_args()
|
sys.path.append(os.getcwd()) # noqa
# from datasets.geo_utils import observe_angle_distance
# from render_tools.render_utils import *
def spheric_pose(theta, phi, radius, height):
trans_t = lambda t: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, -0.9 * t],
[0, 0, 1, t],
[0, 0, 0, 1],
]
)
rot_phi = lambda phi: np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
]
)
rot_theta = lambda th: np.array(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
]
)
c2w = rot_theta(theta) @ rot_phi(phi) @ trans_t(radius)
c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w
c2w[2, 3] += height
return c2w[:3]
def create_spheric_poses(radius, downward_deg, height, n_poses):
"""
Create circular poses around z axis.
Inputs:
radius: the (negative) height and the radius of the circle.
Outputs:
spheric_poses: (n_poses, 3, 4) the poses in the circular path
"""
spheric_poses = []
for th in np.linspace(0, 2 * np.pi, n_poses + 1)[:-1]:
pose = np.eye(4)
pose[:3, :4] = spheric_pose(th, -(downward_deg * np.pi / 180), radius, height)
fix_rot = np.eye(4)
fix_rot[:3, :3] = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)
pose = fix_rot @ pose
spheric_poses += [pose] # 36 degree view downwards
return np.stack(spheric_poses, 0)
def draw_poses(visualizer, poses):
camera_centers = []
lines_pt, lines_idx, lines_color = [], [], []
idx = 0
for frame_id, pose in enumerate(poses):
Twc = pose
# for nerf_synthetic, we need some transformation
fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)
Twc[:3, :3] = Twc[:3, :3] @ fix_rot
center = Twc[:3, 3]
camera_centers.append(center)
# draw axis
# RGB -> right, down, forward
axis_size = 0.1
# for .T, you can follow https://stackoverflow.com/questions/12148351/
axis_pts = (Twc[:3, :3] @ (np.eye(3) * axis_size)).T + center
lines_pt += [center, axis_pts[0, :], axis_pts[1, :], axis_pts[2, :]]
lines_idx += [
[idx * 4 + 0, idx * 4 + 1],
[idx * 4 + 0, idx * 4 + 2],
[idx * 4 + 0, idx * 4 + 3],
]
lines_color += [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
idx += 1
# draw line via cylinder, which we can control the line thickness
visualizer.add_line_set(lines_pt, lines_idx, colors=lines_color, radius=0.003)
# draw line via LineSet
# line_set = o3d.geometry.LineSet(
# points=o3d.utility.Vector3dVector(np.array(lines_pt)),
# lines=o3d.utility.Vector2iVector(np.array(lines_idx)),
# )
# line_set.colors = o3d.utility.Vector3dVector(lines_color)
# visualizer.add_o3d_geometry(line_set)
camera_centers = np.array(camera_centers)
visualizer.add_np_points(
camera_centers,
color=map_to_color(np.arange(0, len(poses)), cmap="plasma"),
size=0.01,
)
if __name__ == "__main__":
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument("--pcd", default=None)
args = parser.parse_args()
| visualizer = O3dVisualizer() | 1 | 2023-10-15 08:41:29+00:00 | 4k |
ShramanPramanick/VoLTA | Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/fcos.py | [
{
"identifier": "make_fcos_loss_evaluator",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/loss.py",
"snippet": "def make_fcos_loss_evaluator(cfg):\n loss_evaluator = FCOSLossComputation(cfg)\n return loss_evaluator"
},
{
"identifier": "make_center_anchor_generator",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/anchor_generator.py",
"snippet": "def make_center_anchor_generator(config):\n anchor_sizes = config.MODEL.RPN.ANCHOR_SIZES\n aspect_ratios = config.MODEL.RPN.ASPECT_RATIOS\n anchor_strides = config.MODEL.RPN.ANCHOR_STRIDE\n straddle_thresh = config.MODEL.RPN.STRADDLE_THRESH\n octave = config.MODEL.RPN.OCTAVE\n scales_per_octave = config.MODEL.RPN.SCALES_PER_OCTAVE\n anchor_shift = config.MODEL.RPN.ANCHOR_SHIFT\n use_relative = config.MODEL.RPN.USE_RELATIVE_SIZE\n\n if config.MODEL.RPN.USE_FPN:\n assert len(anchor_strides) == len(anchor_sizes), \"Only support FPN now\"\n new_anchor_sizes = []\n for size in anchor_sizes:\n per_layer_anchor_sizes = []\n for scale_per_octave in range(scales_per_octave):\n octave_scale = octave ** (scale_per_octave / float(scales_per_octave))\n per_layer_anchor_sizes.append(octave_scale * size)\n new_anchor_sizes.append(tuple(per_layer_anchor_sizes))\n else:\n assert len(anchor_strides) == 1, \"Non-FPN should have a single ANCHOR_STRIDE\"\n new_anchor_sizes = anchor_sizes\n\n anchor_generator = CenterAnchorGenerator(\n tuple(new_anchor_sizes), aspect_ratios, anchor_strides, straddle_thresh, anchor_shift, use_relative\n )\n return anchor_generator"
},
{
"identifier": "make_fcos_postprocessor",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/inference.py",
"snippet": "def make_fcos_postprocessor(config, is_train=False):\n pre_nms_thresh = config.MODEL.FCOS.INFERENCE_TH\n if is_train:\n pre_nms_thresh = config.MODEL.FCOS.INFERENCE_TH_TRAIN\n pre_nms_top_n = config.MODEL.FCOS.PRE_NMS_TOP_N\n fpn_post_nms_top_n = config.MODEL.FCOS.DETECTIONS_PER_IMG\n if is_train:\n pre_nms_top_n = config.MODEL.FCOS.PRE_NMS_TOP_N_TRAIN\n fpn_post_nms_top_n = config.MODEL.FCOS.POST_NMS_TOP_N_TRAIN\n nms_thresh = config.MODEL.FCOS.NMS_TH\n\n box_selector = FCOSPostProcessor(\n pre_nms_thresh=pre_nms_thresh,\n pre_nms_top_n=pre_nms_top_n,\n nms_thresh=nms_thresh,\n fpn_post_nms_top_n=fpn_post_nms_top_n,\n min_size=0,\n num_classes=config.MODEL.FCOS.NUM_CLASSES,\n )\n\n return box_selector"
}
] | import math
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.layers import Scale, DFConv2d
from .loss import make_fcos_loss_evaluator
from .anchor_generator import make_center_anchor_generator
from .inference import make_fcos_postprocessor | 1,809 |
@registry.RPN_HEADS.register("FCOSHead")
class FCOSHead(torch.nn.Module):
def __init__(self, cfg):
super(FCOSHead, self).__init__()
# TODO: Implement the sigmoid version first.
num_classes = cfg.MODEL.FCOS.NUM_CLASSES - 1
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
use_gn = cfg.MODEL.FCOS.USE_GN
use_bn = cfg.MODEL.FCOS.USE_BN
use_dcn_in_tower = cfg.MODEL.FCOS.USE_DFCONV
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS
self.centerness_on_reg = cfg.MODEL.FCOS.CENTERNESS_ON_REG
cls_tower = []
bbox_tower = []
for i in range(cfg.MODEL.FCOS.NUM_CONVS):
if use_dcn_in_tower and i == cfg.MODEL.FCOS.NUM_CONVS - 1:
conv_func = DFConv2d
else:
conv_func = nn.Conv2d
cls_tower.append(conv_func(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=True))
if use_gn:
cls_tower.append(nn.GroupNorm(32, in_channels))
if use_bn:
cls_tower.append(nn.BatchNorm2d(in_channels))
cls_tower.append(nn.ReLU())
bbox_tower.append(conv_func(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=True))
if use_gn:
bbox_tower.append(nn.GroupNorm(32, in_channels))
if use_bn:
bbox_tower.append(nn.BatchNorm2d(in_channels))
bbox_tower.append(nn.ReLU())
self.add_module("cls_tower", nn.Sequential(*cls_tower))
self.add_module("bbox_tower", nn.Sequential(*bbox_tower))
self.cls_logits = nn.Conv2d(in_channels, num_classes, kernel_size=3, stride=1, padding=1)
self.bbox_pred = nn.Conv2d(in_channels, 4, kernel_size=3, stride=1, padding=1)
self.centerness = nn.Conv2d(in_channels, 1, kernel_size=3, stride=1, padding=1)
# initialization
for modules in [self.cls_tower, self.bbox_tower, self.cls_logits, self.bbox_pred, self.centerness]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# initialize the bias for focal loss
prior_prob = cfg.MODEL.FCOS.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
def forward(self, x):
logits = []
bbox_reg = []
centerness = []
for l, feature in enumerate(x):
cls_tower = self.cls_tower(feature)
box_tower = self.bbox_tower(feature)
logits.append(self.cls_logits(cls_tower))
if self.centerness_on_reg:
centerness.append(self.centerness(box_tower))
else:
centerness.append(self.centerness(cls_tower))
bbox_pred = self.scales[l](self.bbox_pred(box_tower))
if self.norm_reg_targets:
bbox_pred = F.relu(bbox_pred)
if self.training:
bbox_reg.append(bbox_pred)
else:
bbox_reg.append(bbox_pred * self.fpn_strides[l])
else:
bbox_reg.append(torch.exp(bbox_pred))
return logits, bbox_reg, centerness
class FCOSModule(torch.nn.Module):
"""
Module for FCOS computation. Takes feature maps from the backbone and
FCOS outputs and losses. Only Test on FPN now.
"""
def __init__(self, cfg):
super(FCOSModule, self).__init__()
head = FCOSHead(cfg)
box_selector_train = make_fcos_postprocessor(cfg, is_train=True)
box_selector_test = make_fcos_postprocessor(cfg, is_train=False)
loss_evaluator = make_fcos_loss_evaluator(cfg)
self.cfg = cfg
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
if not cfg.MODEL.RPN_ONLY:
|
@registry.RPN_HEADS.register("FCOSHead")
class FCOSHead(torch.nn.Module):
def __init__(self, cfg):
super(FCOSHead, self).__init__()
# TODO: Implement the sigmoid version first.
num_classes = cfg.MODEL.FCOS.NUM_CLASSES - 1
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
use_gn = cfg.MODEL.FCOS.USE_GN
use_bn = cfg.MODEL.FCOS.USE_BN
use_dcn_in_tower = cfg.MODEL.FCOS.USE_DFCONV
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS
self.centerness_on_reg = cfg.MODEL.FCOS.CENTERNESS_ON_REG
cls_tower = []
bbox_tower = []
for i in range(cfg.MODEL.FCOS.NUM_CONVS):
if use_dcn_in_tower and i == cfg.MODEL.FCOS.NUM_CONVS - 1:
conv_func = DFConv2d
else:
conv_func = nn.Conv2d
cls_tower.append(conv_func(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=True))
if use_gn:
cls_tower.append(nn.GroupNorm(32, in_channels))
if use_bn:
cls_tower.append(nn.BatchNorm2d(in_channels))
cls_tower.append(nn.ReLU())
bbox_tower.append(conv_func(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=True))
if use_gn:
bbox_tower.append(nn.GroupNorm(32, in_channels))
if use_bn:
bbox_tower.append(nn.BatchNorm2d(in_channels))
bbox_tower.append(nn.ReLU())
self.add_module("cls_tower", nn.Sequential(*cls_tower))
self.add_module("bbox_tower", nn.Sequential(*bbox_tower))
self.cls_logits = nn.Conv2d(in_channels, num_classes, kernel_size=3, stride=1, padding=1)
self.bbox_pred = nn.Conv2d(in_channels, 4, kernel_size=3, stride=1, padding=1)
self.centerness = nn.Conv2d(in_channels, 1, kernel_size=3, stride=1, padding=1)
# initialization
for modules in [self.cls_tower, self.bbox_tower, self.cls_logits, self.bbox_pred, self.centerness]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# initialize the bias for focal loss
prior_prob = cfg.MODEL.FCOS.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
def forward(self, x):
logits = []
bbox_reg = []
centerness = []
for l, feature in enumerate(x):
cls_tower = self.cls_tower(feature)
box_tower = self.bbox_tower(feature)
logits.append(self.cls_logits(cls_tower))
if self.centerness_on_reg:
centerness.append(self.centerness(box_tower))
else:
centerness.append(self.centerness(cls_tower))
bbox_pred = self.scales[l](self.bbox_pred(box_tower))
if self.norm_reg_targets:
bbox_pred = F.relu(bbox_pred)
if self.training:
bbox_reg.append(bbox_pred)
else:
bbox_reg.append(bbox_pred * self.fpn_strides[l])
else:
bbox_reg.append(torch.exp(bbox_pred))
return logits, bbox_reg, centerness
class FCOSModule(torch.nn.Module):
"""
Module for FCOS computation. Takes feature maps from the backbone and
FCOS outputs and losses. Only Test on FPN now.
"""
def __init__(self, cfg):
super(FCOSModule, self).__init__()
head = FCOSHead(cfg)
box_selector_train = make_fcos_postprocessor(cfg, is_train=True)
box_selector_test = make_fcos_postprocessor(cfg, is_train=False)
loss_evaluator = make_fcos_loss_evaluator(cfg)
self.cfg = cfg
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
if not cfg.MODEL.RPN_ONLY: | self.anchor_generator = make_center_anchor_generator(cfg) | 1 | 2023-10-23 04:07:08+00:00 | 4k |
earthcube-lab/textnoisr | scripts/generate_figures.py | [
{
"identifier": "CharNoiseAugmenter",
"path": "textnoisr/noise.py",
"snippet": "class CharNoiseAugmenter:\n r\"\"\"Add noise into text according to a noise level measured between 0 and 1.\n\n It will add noise to a string by modifying each character\n according to a probability and a list of actions.\n Possible actions are `insert`, `swap`, `substitute` and `delete`.\n\n For actions `insert` and `substitute`, new characters are drawn from `character_set`\n which is the set of ascii letters (lower and upper) by default.\n The `swap` action swaps 2 consecutive characters,\n but **one character can not be swapped twice**.\n So if a pair of characters has been swapped,\n we move to the next pair of characters.\n\n With enough samples, the CER of the output tends to the noise level\n (terms and conditions may apply,\n see details in [docs/how_this_works.md](how_this_works.md)).\n\n Args:\n noise_level: between 0 and 1, it corresponds to the level of noise to add to the\n text. In most cases (see details above for caveats),\n the Character Error Rate of the output will converge to this value.\n For `swap` actions, it is impossible to have a CER greater than 0.54214,\n so an exception is raised in this case.\n actions: list of actions to use to add noise. Available actions are *insert*,\n *swap*, *substitute* and *delete*.\n Defaults to `[insert, swap, substitute, delete]`.\n character_set: set of characters from which character will be drawn for\n *insert* or *substitute* actions. Defaults to string.ascii_letters.\n seed: A seed to ensure reproducibility.\n Defaults to `None`.\n natural_language_swap_correction: A correction factor to take into account the\n fact that natural language is not random.\n Defaults to 1.052, which is the correction factor for English.\n\n Raises:\n ValueError: If the action is not one of the available actions.\n \"\"\"\n\n _AVAILABLE_ACTIONS = (\"insert\", \"swap\", \"substitute\", \"delete\")\n\n def __init__(\n self,\n noise_level: float,\n actions: tuple[str, ...] = _AVAILABLE_ACTIONS,\n character_set: tuple[str, ...] = tuple(string.ascii_letters),\n seed: int | None = None,\n natural_language_swap_correction: float = 1.052,\n ) -> None:\n self.actions = [\n x for i, x in enumerate(actions) if x not in actions[:i]\n ] # To avoid using list(set(actions))\n self.character_set = character_set\n self.noise_level = noise_level\n self.random = random.Random(seed) # nosec\n self.natural_language_swap_correction = natural_language_swap_correction\n\n # checks\n unsupported_actions = [\n a for a in self.actions if a not in CharNoiseAugmenter._AVAILABLE_ACTIONS\n ]\n if unsupported_actions:\n raise ValueError(\n f\"You provide unsupported actions: {unsupported_actions}. Available\"\n f\" actions are {CharNoiseAugmenter._AVAILABLE_ACTIONS}\"\n )\n if not 0 <= self.noise_level <= 1:\n raise ValueError(\n \"Noise level must be between 0 and 1 (included), you provide\"\n f\" {self.noise_level}\"\n )\n if (\n self.noise_level\n > unbias.MAX_SWAP_LEVEL / self.natural_language_swap_correction\n ) & (\"swap\" in self.actions):\n raise ValueError(\n \"You cannot have a CER greater than\"\n f\" {unbias.MAX_SWAP_LEVEL / self.natural_language_swap_correction} when\"\n \" using action `swap`\"\n )\n\n def _random_success(self, p: float) -> bool:\n \"\"\"Determine whether a random event is successful based on a probability value.\n\n Args:\n p: The probability value for the random event (must be between 0 and 1).\n\n Returns:\n True with probability `p`, False otherwise.\n \"\"\"\n return self.random.random() < p # nosec\n\n def _random_char(self, p: float, character_set: tuple[str, ...]) -> str:\n \"\"\"Return a random character with probability `p`, or an empty string.\n\n Args:\n p: A value between 0 and 1 representing the probability to return a random\n character\n character_set: A character set, for `random.choice()` to choose from.\n\n Returns:\n A random character with probability `p`, or an empty string.\n \"\"\"\n return self._random_success(p) * self.random.choice(character_set) # nosec\n\n def insert_random_chars(self, text: str, p: float) -> str:\n \"\"\"Insert random characters into a string.\n\n For each character in the input string, a random character is inserted after it\n with probability `p`. The random characters are chosen from self.character_set.\n\n Args:\n text: The input string to be modified.\n p: probability to insert random char\n\n Returns:\n A string derived from `text` with random characters potentially inserted\n after each character.\n \"\"\"\n return \"\".join(char + self._random_char(p, self.character_set) for char in text)\n\n def _choose_another_character(self, char):\n other_char = self.random.choice(self.character_set)\n while other_char == char:\n other_char = self.random.choice(self.character_set)\n return other_char\n\n def substitute_random_chars(self, text: str, p: float) -> str:\n \"\"\"Substitute random characters of a string.\n\n Each character of the input string is substituted with another random one with\n probability `p`. The random characters are chosen from the self.character_set.\n\n Args:\n text: The input string to be modified.\n p: probability to substitute a character\n\n Returns:\n A string derived from `text` with potentially substituted characters.\n \"\"\"\n return \"\".join(\n self._choose_another_character(char) if self._random_success(p) else char\n for char in text\n )\n\n def delete_random_chars(self, text: str, p: float) -> str:\n \"\"\"Delete random characters of a string.\n\n Each character of the input string is deleted with another random one with\n probability `p`.\n\n Args:\n text: The input string to be modified.\n p: probability to delete random character\n\n Returns:\n A string derived from `text` with potentially deleted characters.\n \"\"\"\n return \"\".join([\"\" if self._random_success(p) else char for char in text])\n\n def consecutive_swap_random_chars(self, text: str, p: float) -> str:\n \"\"\"Swap random consecutive characters of a string.\n\n Each character of the input string is swapped with the next one with\n a probability linked to `p` (i.e. after a unbiasing).\n Notice that a character can only be swapped once.\n\n Args:\n text: The input string to be modified.\n p: probability for a character to be swapped.\n It is modified for the CER of the result to converge to this value.\n\n Returns:\n A string derived from `text` with potentially swapped characters.\n \"\"\"\n p = unbias.unbias_swap(p, len(text), self.natural_language_swap_correction)\n\n result = []\n was_swapped = False\n for current_char, next_char in zip_longest(text, text[1:], fillvalue=\"\"):\n if not was_swapped:\n if self._random_success(p):\n result.extend([next_char, current_char])\n was_swapped = True\n continue\n result.append(current_char)\n was_swapped = False\n return \"\".join(result)\n\n def add_noise(self, text: str | list[str]) -> str | list[str]:\n \"\"\"Add noise to a text. The text can be splitted into words.\n\n Args:\n text: The text on which to add noise.\n\n Returns:\n The text with noise.\n \"\"\"\n if isinstance(text, list):\n p = unbias.unbias_split_into_words(self.noise_level, text)\n else:\n p = self.noise_level\n\n p_effective = unbias.unbias_several_action(p, len(self.actions))\n\n for action in self.actions:\n match action:\n case \"insert\":\n action_function = self.insert_random_chars\n case \"swap\":\n action_function = self.consecutive_swap_random_chars\n case \"substitute\":\n action_function = self.substitute_random_chars\n case \"delete\":\n action_function = self.delete_random_chars\n case _:\n raise ValueError(\n \"Action should be one of\"\n f\" {CharNoiseAugmenter._AVAILABLE_ACTIONS!r}\"\n )\n\n if isinstance(text, list):\n text = [action_function(word, p_effective) for word in text]\n else:\n text = action_function(text, p_effective)\n return text"
},
{
"identifier": "MAX_SWAP_LEVEL",
"path": "textnoisr/noise_unbiasing.py",
"snippet": "MAX_SWAP_LEVEL = 4 - 2 * math.sqrt(3) - sys.float_info.epsilon"
}
] | import argparse
import logging
import sys
import time
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
from pathlib import Path
from datasets import load_dataset
from evaluate import load
from nlpaug.augmenter.char import RandomCharAug
from textnoisr.noise import CharNoiseAugmenter
from textnoisr.noise_unbiasing import MAX_SWAP_LEVEL | 2,552 | """Generate figures for the documentation.
## Pre-requisites
You'll need to install the following packages:
```sh
pip install matplotlib nlpaug
```
If you don't have [Roboto](https://fonts.google.com/specimen/Roboto) installed, the
default font will be used.
## Usage
From the root of the project, run:
```sh
python scripts/generate_figures.py
```
It will download the `rotten_tomatoes` dataset and generate the figures in
`docs/images/`. For more options, see:
```sh
python scripts/generate_figures.py --help
```
"""
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
cer = load("cer")
ACTIONS = ("delete", "insert", "substitute", "swap")
STYLE_DIR = Path(__file__).parent.parent / "styles"
OUTPUT_DIR = Path(__file__).parent.parent / "docs" / "images"
DEFAULT_DATASET = "rotten_tomatoes"
DEFAULT_SPLIT = "train"
DEFAULT_N_SAMPLES = 17
def max_level(actions):
if "swap" in actions:
| """Generate figures for the documentation.
## Pre-requisites
You'll need to install the following packages:
```sh
pip install matplotlib nlpaug
```
If you don't have [Roboto](https://fonts.google.com/specimen/Roboto) installed, the
default font will be used.
## Usage
From the root of the project, run:
```sh
python scripts/generate_figures.py
```
It will download the `rotten_tomatoes` dataset and generate the figures in
`docs/images/`. For more options, see:
```sh
python scripts/generate_figures.py --help
```
"""
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
cer = load("cer")
ACTIONS = ("delete", "insert", "substitute", "swap")
STYLE_DIR = Path(__file__).parent.parent / "styles"
OUTPUT_DIR = Path(__file__).parent.parent / "docs" / "images"
DEFAULT_DATASET = "rotten_tomatoes"
DEFAULT_SPLIT = "train"
DEFAULT_N_SAMPLES = 17
def max_level(actions):
if "swap" in actions: | return MAX_SWAP_LEVEL / 1.052 - 0.005 | 1 | 2023-10-18 19:28:34+00:00 | 4k |
WenzhengZhang/Seq2seqCoref | check_align.py | [
{
"identifier": "CorefAllMetrics",
"path": "metrics.py",
"snippet": "class CorefAllMetrics(object):\n \"\"\"\n Wrapper for coreference resolution metrics.\n \"\"\"\n\n @staticmethod\n def _get_mention_to_x(clusters: List[list]) -> dict:\n mention_to_x = {}\n for cluster in clusters:\n for m in cluster:\n mention_to_x[m] = tuple(cluster)\n return mention_to_x\n\n def _compute_mention_detect_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]):\n # mention detection evaluation\n mention_evaluator = MentionEvaluator()\n results = {}\n predicted_mentions = list(self._get_mention_to_x(\n predicted_clusters).keys())\n gold_mentions = list(self._get_mention_to_x(gold_clusters).keys())\n mention_evaluator.update(predicted_mentions, gold_mentions)\n mention_precision, mention_recall, mention_f1 = \\\n mention_evaluator.get_prf()\n results['precision'] = mention_precision\n results['recall'] = mention_recall\n results['f1'] = mention_f1\n return results\n\n def _compute_coref_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]) \\\n -> Dict[str, Dict[str, float]]:\n \"\"\"\n Compute all coreference metrics given a list of gold cluster and a list of predicted clusters.\n \"\"\"\n mention_to_predicted = self._get_mention_to_x(predicted_clusters)\n mention_to_gold = self._get_mention_to_x(gold_clusters)\n result = {}\n metric_name_evals = [('muc', Evaluator(muc)),\n ('b_cubed', Evaluator(b_cubed)),\n ('ceaf', Evaluator(ceafe))]\n\n for name, evaluator in metric_name_evals:\n evaluator.update(predicted_clusters, gold_clusters,\n mention_to_predicted, mention_to_gold)\n result[name] = {\n 'precision': evaluator.get_precision(),\n 'recall': evaluator.get_recall(),\n 'f1': evaluator.get_f1()\n }\n\n result['average'] = {\n 'precision': sum(\n [result[k]['precision'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'recall': sum(\n [result[k]['recall'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'f1': sum([result[k]['f1'] for k, _ in metric_name_evals]) / len(\n metric_name_evals)\n }\n\n return result\n\n @staticmethod\n def _average_nested_dict(\n list_nested_dict: List[Dict[str, Dict[str, float]]]) -> Dict[\n str, Dict[str, float]]:\n \"\"\"\n Given a list of 2-level nested dict, compute the average.\n \"\"\"\n result_dict = {}\n\n # sum up all values\n for outer_dict in list_nested_dict:\n for key_outer, value_outer in outer_dict.items():\n if key_outer not in result_dict:\n result_dict[key_outer] = {}\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[\n key_outer].get(\n key_inner, 0.0) + value_inner\n\n # take the average\n for key_outer, value_outer in result_dict.items():\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[key_outer][\n key_inner] / len(\n list_nested_dict)\n\n return result_dict\n\n def get_all_metrics(self, labels: List[List[List[Tuple[int, int]]]],\n preds: List[List[List[Tuple[int, int]]]]) \\\n -> Dict[str, Dict[str, Dict[str, float]]]:\n \"\"\"\n Compute all metrics for coreference resolution.\n In input are given two list of mention groups, for example:\n [ # this is the corpus level, with a list of documents\n [ # this is the document level, with a list of mention clusters\n [ # this is the cluster level, with a list of spans\n (5, 7),\n (11, 19),\n ...\n ],\n ...\n ]\n ]\n \"\"\"\n assert len(labels) == len(preds)\n result = {}\n\n # compute micro-averaged scores (treat all clusters from all docs as a single list of clusters)\n gold_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(labels) for cluster in clusters\n ]\n predicted_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(preds) for cluster in clusters\n ]\n coref_ment_results = self._compute_coref_metrics(gold_clusters,\n predicted_clusters)\n ment_results = self._compute_mention_detect_metrics(gold_clusters,\n predicted_clusters)\n coref_ment_results['mention_detect'] = ment_results\n result['micro'] = coref_ment_results\n\n # compute macro-averaged scores (compute p/r/f1 for each doc first, then take average per doc)\n doc_metrics = []\n for gold_clusters, predicted_clusters in zip(labels, preds):\n doc_metrics.append(self._compute_coref_metrics(\n gold_clusters, predicted_clusters\n ))\n result['macro'] = self._average_nested_dict(doc_metrics)\n\n return result"
},
{
"identifier": "get_document_predicts",
"path": "data.py",
"snippet": "class JointDataset(Dataset):\nclass CorefDataset(Dataset):\nclass ConstrainedDataCollator:\n def __init__(self, tokenizer,\n data_args, train_args, split):\n def __len__(self):\n def set_samples(self, epoch):\n def _load_single_data(self, data_dir,\n data_name,\n max_len,\n thred):\n def load_dataset(self):\n def __getitem__(self, index):\n def __init__(self, tokenizer,\n data_args, train_args, split):\n def __len__(self):\n def load_dataset(self):\n def __getitem__(self, index):\ndef get_document_predicts(doc_preds: List[List]) -> List[\ndef normalize_word(word, use_br_dict=False):\ndef parse_int_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n thred, is_tagging):\ndef parse_short_target_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n align_mode, thred, split_sentence):\ndef parse_nonint_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map,\n tokenizer,\n add_mention_end,\n thred):\n def __call__(self, features, return_tensors=None):"
}
] | import os
import json
import re
import argparse
from collections import defaultdict
from metrics import CorefAllMetrics
from typing import Dict
from data import get_document_predicts, SPECIAL_IDS, parse_short_target_tokens
from transformers import T5Tokenizer
from preprocess import SPEAKER_START, SPEAKER_END, MENTION_START, \
MENTION_END, COPY
from preprocess_mark_sentence import SPECIAL_IDS as MARK_SPECIAL_IDS
from preprocess_mark_sentence import SENTENCE_START, SENTENCE_END | 2,416 |
def load_data(data_dir, tokenizer):
def load_split(split):
max_len = 4096
data_path = os.path.join(
data_dir,
f'{split}.t5-small.english.{max_len}.jsonlines')
samples = []
doc_labels = {}
with open(data_path, 'r') as f:
for line in f:
item = json.loads(line)
doc_key = item['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
target_seq = tokenizer.convert_tokens_to_ids(
item['target_short_sentence'])
sample = {'doc_key': doc_key,
'sentence': tokenizer.convert_tokens_to_ids(
item['sentence']),
'target_seq': target_seq,
'subtoken_map': item['subtoken_map'],
'seg_clusters': [[tuple(m) for m in c] for c in item[
'seg_clusters'] if len(c) >= 2],
'offset': item['offset']
}
doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[
'gold_clusters']]
samples.append(sample)
return samples, doc_labels
samples_dev, dev_labels = load_split('dev')
samples_test, test_labels = load_split('test')
return samples_dev, samples_test, dev_labels, test_labels
def oracle_align(doc_labels, samples, tokenizer, align_mode, mark_sentence) -> \
Dict:
documents_to_chunk_data = defaultdict(list)
documents_to_chunk_gold = defaultdict(list)
predictions = {}
golds = {}
last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key'])
for sample in samples:
doc_key = sample['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
# require convert to ids first
input_ids = sample['sentence']
subtoken_map = sample['subtoken_map']
offset = sample['offset']
# remove bos
predict_ids = sample['target_seq']
gold_data = sample['seg_clusters']
special_ids = MARK_SPECIAL_IDS if mark_sentence else SPECIAL_IDS
pred_data, aligned_input_ids, aligned_pred_ids = \
parse_short_target_tokens(input_ids, predict_ids,
special_ids, subtoken_map,
tokenizer,
align_mode, 2, mark_sentence)
# list of (m1,m2)
documents_to_chunk_data[doc_id].extend(pred_data)
documents_to_chunk_gold[doc_id].extend(gold_data)
if doc_id != last_doc_id:
predictions[last_doc_id] = get_document_predicts(
documents_to_chunk_data[
last_doc_id])
golds[last_doc_id] = get_document_predicts(
documents_to_chunk_gold[
last_doc_id])
last_doc_id = doc_id
# final one
predictions[last_doc_id] = get_document_predicts(
documents_to_chunk_data[last_doc_id]
)
golds[last_doc_id] = get_document_predicts(
documents_to_chunk_gold[last_doc_id]
)
# print(predictions)
predictions_list = []
labels_list = []
golds_list = []
for document_id, doc_label in doc_labels.items():
predictions_list.append(predictions[document_id])
labels_list.append(doc_label)
golds_list.append(golds[document_id])
|
def load_data(data_dir, tokenizer):
def load_split(split):
max_len = 4096
data_path = os.path.join(
data_dir,
f'{split}.t5-small.english.{max_len}.jsonlines')
samples = []
doc_labels = {}
with open(data_path, 'r') as f:
for line in f:
item = json.loads(line)
doc_key = item['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
target_seq = tokenizer.convert_tokens_to_ids(
item['target_short_sentence'])
sample = {'doc_key': doc_key,
'sentence': tokenizer.convert_tokens_to_ids(
item['sentence']),
'target_seq': target_seq,
'subtoken_map': item['subtoken_map'],
'seg_clusters': [[tuple(m) for m in c] for c in item[
'seg_clusters'] if len(c) >= 2],
'offset': item['offset']
}
doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[
'gold_clusters']]
samples.append(sample)
return samples, doc_labels
samples_dev, dev_labels = load_split('dev')
samples_test, test_labels = load_split('test')
return samples_dev, samples_test, dev_labels, test_labels
def oracle_align(doc_labels, samples, tokenizer, align_mode, mark_sentence) -> \
Dict:
documents_to_chunk_data = defaultdict(list)
documents_to_chunk_gold = defaultdict(list)
predictions = {}
golds = {}
last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key'])
for sample in samples:
doc_key = sample['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
# require convert to ids first
input_ids = sample['sentence']
subtoken_map = sample['subtoken_map']
offset = sample['offset']
# remove bos
predict_ids = sample['target_seq']
gold_data = sample['seg_clusters']
special_ids = MARK_SPECIAL_IDS if mark_sentence else SPECIAL_IDS
pred_data, aligned_input_ids, aligned_pred_ids = \
parse_short_target_tokens(input_ids, predict_ids,
special_ids, subtoken_map,
tokenizer,
align_mode, 2, mark_sentence)
# list of (m1,m2)
documents_to_chunk_data[doc_id].extend(pred_data)
documents_to_chunk_gold[doc_id].extend(gold_data)
if doc_id != last_doc_id:
predictions[last_doc_id] = get_document_predicts(
documents_to_chunk_data[
last_doc_id])
golds[last_doc_id] = get_document_predicts(
documents_to_chunk_gold[
last_doc_id])
last_doc_id = doc_id
# final one
predictions[last_doc_id] = get_document_predicts(
documents_to_chunk_data[last_doc_id]
)
golds[last_doc_id] = get_document_predicts(
documents_to_chunk_gold[last_doc_id]
)
# print(predictions)
predictions_list = []
labels_list = []
golds_list = []
for document_id, doc_label in doc_labels.items():
predictions_list.append(predictions[document_id])
labels_list.append(doc_label)
golds_list.append(golds[document_id])
| metrics = CorefAllMetrics().get_all_metrics(labels_list, | 0 | 2023-10-17 17:39:16+00:00 | 4k |
oven-lab/tuya_cloud_map_extractor | custom_components/tuya_cloud_map_extractor/config_flow.py | [
{
"identifier": "get_map",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/main.py",
"snippet": "def get_map(\n server: str, client_id: str, secret_key: str, device_id: str, colors={}, settings={}, urls={}\n) -> Image:\n \"\"\"Downloads and parses vacuum map from tuya cloud.\"\"\"\n render_path = settings[\"path_enabled\"]\n last = settings[\"last\"]\n if urls != {}:\n time = datetime.strptime(urls[\"time\"], \"%H:%M:%S\")\n now = datetime.now().strftime(\"%H:%M:%S\")\n now = datetime.strptime(now, \"%H:%M:%S\")\n delta = now-time\n minutes_delta = math.ceil(delta.total_seconds() / 60)\n if minutes_delta < 59:\n link = {}\n link[\"result\"] = urls[\"links\"]\n else:\n link = get_download_link(server, client_id, secret_key, device_id)\n else:\n link = get_download_link(server, client_id, secret_key, device_id)\n\n try:\n map_link = link[\"result\"][0][\"map_url\"]\n response = download(map_link)\n except Exception as e:\n _LOGGER.error(\"Encountered an error, please include the following data in your github issue: \" + str(base64.b64encode(json.dumps(link).encode())))\n raise e\n\n if response.status_code != 200:\n _LOGGER.warning(\"Got \" + str(response.status_code) + \" from server while downloading map.\")\n\n _LOGGER.debug(\n \"Response: \"\n + str(response.status_code)\n + str(base64.b64encode(response.content))\n + str(base64.b64encode(bytes(str(link), \"utf-8\")))\n )\n\n try:\n header, mapDataArr = parse_map(response)\n image = render_layout(raw_map=mapDataArr, header=header, colors=colors)\n except Exception as e:\n _LOGGER.error(\n \"Unsupported data type. Include the following data in a github issue to request the data format to be added: \"\n + str(response.status_code)\n + str(base64.b64encode(response.content))\n + str(base64.b64encode(bytes(str(link), \"utf-8\")))\n + \" Thank you!\"\n )\n raise e\n\n if urls == {}:\n header[\"urls\"] = {\n \"links\": link[\"result\"],\n \"time\": datetime.now().strftime(\"%H:%M:%S\"),\n }\n else:\n header[\"urls\"] = urls\n\n if render_path:\n _LOGGER.debug(\"Rendering path\")\n try:\n path_link = link[\"result\"][1][\"map_url\"]\n except:\n _LOGGER.error(\"Your vacuum doesn't return a path\")\n return flip(header, image, settings)\n\n if \"path_color\" not in colors:\n colors[\"path_color\"] = [0, 255, 0]\n \n scale = int(1080/image.size[0])\n image = image.resize((image.size[0]*scale, image.size[1]*scale), resample=Image.BOX)\n response = download(path_link)\n if response.status_code != 200:\n _LOGGER.warning(\"Got \" + str(response.status_code) + \" from server while downloading path.\")\n raise FileNotFoundError\n \n _LOGGER.debug(\n \"Response path: \"\n + str(response.status_code)\n + str(base64.b64encode(response.content))\n )\n\n try:\n path = parse_path(response, scale=scale, header=header)\n except Exception as e:\n _LOGGER.error(\"Failed to parse path: \" + str(base64.b64encode(response.content)))\n raise e\n \n draw = ImageDraw.Draw(image, 'RGBA')\n draw.line(path, fill=tuple(colors[\"path_color\"]), width=2)\n\n x, y = header[\"pileX\"], header[\"pileY\"]\n if header[\"version\"] in [[0], [1]]:\n point = _format_path_point({'x': x, 'y': y}, False)\n elif header[\"version\"] == \"custom0\":\n point = map_to_image([x, y], header[\"mapResolution\"], header[\"x_min\"], header[\"y_min\"])\n\n x = point[0]*scale\n y = point[1]*scale\n \n draw.ellipse([(x-10, y-10), (x+10, y+10)], outline=(255, 255, 255), fill=(0, 255, 0), width=2)\n\n if last:\n x, y = path[-2], path[-1]\n else:\n x, y = path[0], path[1]\n\n draw.ellipse([(x-7, y-7), (x+7, y+7)], outline=(255, 255, 255), fill=(0, 0, 255), width=2)\n\n if \"area\" in header and header[\"version\"] == \"custom0\":\n for area in header[\"area\"]:\n coords = []\n for i in area[\"vertexs\"]:\n coords.append(i[0]*scale)\n coords.append(i[1]*scale)\n if area[\"type\"] == \"forbid\":\n draw.polygon(coords, outline=(255,255,255), width=1, fill=(255, 210, 0, 128))\n else:\n draw.polygon(coords, outline=(255,255,255), width=1, fill=(255, 255, 255, 64))\n\n return flip(header, image, settings)\n \n return flip(header, image, settings)"
},
{
"identifier": "ClientIDError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py",
"snippet": "class ClientIDError(Exception):\n pass"
},
{
"identifier": "ClientSecretError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py",
"snippet": "class ClientSecretError(Exception):\n pass"
},
{
"identifier": "DeviceIDError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py",
"snippet": "class DeviceIDError(Exception):\n pass"
},
{
"identifier": "ServerError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py",
"snippet": "class ServerError(Exception):\n pass"
}
] | import logging
import voluptuous as vol
from typing import Any
from .tuya_vacuum_map_extractor import (
get_map,
ClientIDError,
ClientSecretError,
DeviceIDError,
ServerError,
)
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.selector import selector
from homeassistant.const import (
CONF_NAME,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DEVICE_ID,
)
from .const import * | 1,925 | from __future__ import annotations
CONF_SERVERS = {
CONF_SERVER_CHINA: "China",
CONF_SERVER_WEST_AMERICA: "Western America",
CONF_SERVER_EAST_AMERICA: "Eastern America",
CONF_SERVER_CENTRAL_EUROPE: "Central Europe",
CONF_SERVER_WEST_EUROPE: "Western Europe",
CONF_SERVER_INDIA: "India"
}
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 3
def __init__(self) -> None:
super().__init__()
self.map_header = {}
self._config_data = {}
async def async_step_user(self, user_input=None):
default_server = CONF_SERVER_CENTRAL_EUROPE
default_name = "Vacuum map"
default_client_id = ""
default_client_secret = ""
default_device_id = ""
errors = {}
if user_input is not None:
try:
headers, image = await validate(self.hass, user_input)
self.map_header = headers
if user_input[CONF_COLORS]:
del user_input[CONF_COLORS]
self._config_data.update(user_input)
return await self.async_step_colorconf()
del user_input[CONF_COLORS]
self._config_data.update(user_input)
data = create_entry_data(
self._config_data.copy(), self.map_header.copy()
)
return self.async_create_entry(title=data.pop(CONF_NAME), data=data)
except ClientIDError:
errors[CONF_CLIENT_ID] = "client_id"
except ClientSecretError:
errors[CONF_CLIENT_SECRET] = "client_secret"
except DeviceIDError:
errors[CONF_DEVICE_ID] = "device_id"
| from __future__ import annotations
CONF_SERVERS = {
CONF_SERVER_CHINA: "China",
CONF_SERVER_WEST_AMERICA: "Western America",
CONF_SERVER_EAST_AMERICA: "Eastern America",
CONF_SERVER_CENTRAL_EUROPE: "Central Europe",
CONF_SERVER_WEST_EUROPE: "Western Europe",
CONF_SERVER_INDIA: "India"
}
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 3
def __init__(self) -> None:
super().__init__()
self.map_header = {}
self._config_data = {}
async def async_step_user(self, user_input=None):
default_server = CONF_SERVER_CENTRAL_EUROPE
default_name = "Vacuum map"
default_client_id = ""
default_client_secret = ""
default_device_id = ""
errors = {}
if user_input is not None:
try:
headers, image = await validate(self.hass, user_input)
self.map_header = headers
if user_input[CONF_COLORS]:
del user_input[CONF_COLORS]
self._config_data.update(user_input)
return await self.async_step_colorconf()
del user_input[CONF_COLORS]
self._config_data.update(user_input)
data = create_entry_data(
self._config_data.copy(), self.map_header.copy()
)
return self.async_create_entry(title=data.pop(CONF_NAME), data=data)
except ClientIDError:
errors[CONF_CLIENT_ID] = "client_id"
except ClientSecretError:
errors[CONF_CLIENT_SECRET] = "client_secret"
except DeviceIDError:
errors[CONF_DEVICE_ID] = "device_id" | except ServerError: | 4 | 2023-10-22 10:48:25+00:00 | 4k |
mlbio-epfl/hume | hume.py | [
{
"identifier": "parse_args",
"path": "argparser.py",
"snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--phi1_path', \n type=str,\n required=True,\n help=\"Path to the embeddings in first representation space\")\n\n parser.add_argument('--phi2_path',\n type=str,\n required=True,\n help=\"Path to the embeddings in second representation space\")\n\n parser.add_argument('--phi1_path_val',\n type=str,\n help=\"Path to the embeddings in first representation space to compute metrics.\"\n \" If not provided phi1_path will be also used for evaluation.\")\n\n parser.add_argument('--phi2_path_val',\n type=str,\n help=\"Path to the embeddings in second representation space to compute metrics.\"\n \" If not provided phi2_path will be also used for evaluation.\")\n\n parser.add_argument('--gt_labels_path',\n type=str,\n required=True,\n help=\"Path to ground truth labeling to compute metrics\")\n\n parser.add_argument('--k',\n type=int,\n default=10,\n help=\"Number of classes\")\n\n parser.add_argument('--inner_lr',\n type=float,\n default=0.001,\n help=\"Step size for the inner optimization\")\n\n parser.add_argument('--outer_lr',\n type=float,\n default=0.001,\n help=\"Step size for the task encoder's updates\")\n\n parser.add_argument('--tau',\n type=float,\n default=0.1,\n help=\"Temperature hyperparameter\")\n\n parser.add_argument('--H_reg',\n type=float,\n default=10.,\n help=\"Entropy regularization coefficient\")\n\n parser.add_argument('--num_iters',\n type=int,\n default=1000,\n help=\"Number of training iterations\")\n\n parser.add_argument('--adaptation_steps',\n type=int,\n default=300,\n help=\"Number of inner iterations to fit linear model\")\n\n parser.add_argument('--num_subsets',\n type=int,\n default=20,\n help=\"Number of (Xtr, Xte) subsets for averaging HUME's loss\")\n\n parser.add_argument('--subset_size',\n type=int,\n default=10000,\n help=\"Size of union of each (Xtr, Xte) subset\")\n\n parser.add_argument('--train_fraction',\n type=float,\n default=0.9,\n help=\"Fraction of args.subset_size to define size of Xtr\")\n\n parser.add_argument('--no_anneal',\n dest='anneal',\n action='store_false',\n help=\"Turn off temperature and learning rate annealing\")\n\n parser.add_argument('--no_rand_init',\n dest='rand_init',\n action='store_false',\n help=\"Start from random inner w0 at each outer iter or generate random w0 once\")\n\n parser.add_argument('--device',\n type=str,\n default=\"cuda\",\n help=\"Use cuda or cpu\")\n\n parser.add_argument('--exp_path',\n type=str,\n default=\"./linear_tasks/\",\n help=\"Path to save experiment's results\")\n\n parser.add_argument('--save_all',\n action='store_true',\n help=\"If used then task_encoder is saved at each iteration\")\n\n parser.add_argument('--seed',\n type=int,\n default=42,\n help='Random seed')\n \n return parser.parse_args(args)"
},
{
"identifier": "Sparsemax",
"path": "activations.py",
"snippet": "class Sparsemax(torch.nn.Module):\n\n def __init__(self, dim=0):\n self.dim = dim\n super(Sparsemax, self).__init__()\n\n def forward(self, input):\n return sparsemax(input, self.dim)"
},
{
"identifier": "fix_seed",
"path": "utils.py",
"snippet": "def fix_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "get_cv_score",
"path": "utils.py",
"snippet": "def get_cv_score(X, y):\n cv = KFold(n_splits=10, random_state=1, shuffle=True)\n clf = LogisticRegression(penalty=None)\n scores = cross_val_score(clf, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n return np.mean(scores)"
},
{
"identifier": "check_both_none_or_not_none",
"path": "utils.py",
"snippet": "def check_both_none_or_not_none(arg1, arg2):\n return (arg1 is None and arg2 is None) or (arg1 is not None and arg2 is not None)"
},
{
"identifier": "cluster_acc",
"path": "metrics.py",
"snippet": "def cluster_acc(y_pred, y_true, return_matched=False):\n \"\"\"\n Calculate clustering accuracy. Require scipy installed\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n # Return\n accuracy, in [0,1]\n \"\"\"\n y_true = y_true.astype(np.int64)\n assert y_pred.size == y_true.size\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n row_ind, col_ind = linear_sum_assignment(w.max() - w)\n\n if return_matched:\n matched = np.array(list(map(lambda i: col_ind[i], y_pred)))\n return w[row_ind, col_ind].sum() / y_pred.size, matched\n else:\n return w[row_ind, col_ind].sum() / y_pred.size"
},
{
"identifier": "cluster_ari",
"path": "metrics.py",
"snippet": "def cluster_ari(y_pred, y_true):\n \"\"\"\n Calculate adjusted rand index. Require scikit-learn installed\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n # Return\n ARI, in [0,1]\n \"\"\"\n return adjusted_rand_score(y_true, y_pred)"
}
] | import os
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import learn2learn as l2l
import numpy as np
from tqdm import tqdm
from argparser import parse_args
from activations import Sparsemax
from utils import fix_seed, get_cv_score, check_both_none_or_not_none
from metrics import cluster_acc, cluster_ari | 2,848 | else:
phi1_val = np.copy(phi1)
phi2_val = np.copy(phi2)
y_true_val = np.load(args.gt_labels_path)
assert phi1.shape[0] == phi2.shape[0]
assert phi1_val.shape[0] == phi2_val.shape[0]
assert phi1_val.shape[0] == y_true_val.shape[0]
n_train = phi1.shape[0]
d1, d2 = phi2.shape[1], phi1.shape[1]
subset_size = min(n_train, args.subset_size)
# Instantiate linear layer for the inner optimization (Equation 5)
inner_linear = nn.Linear(d1, args.k, bias=True).to(device)
inner_linear = l2l.algorithms.MAML(inner_linear, lr=args.inner_lr)
# Instantiate task encoder with orthogonal weights parametrization (Equation 3)
task_encoder = nn.Linear(d2, args.k, bias=False).to(device)
task_encoder = nn.utils.parametrizations.orthogonal(task_encoder)
all_parameters = list(task_encoder.parameters())
optimizer = torch.optim.Adam(all_parameters, lr=args.outer_lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 200],
gamma=0.1 if args.anneal else 1.0
)
old_lr = args.outer_lr
tau = args.tau
sparsemax_act = Sparsemax(dim=1)
for i in tqdm(range(args.num_iters)):
optimizer.zero_grad()
mean_train_error = 0.0
mean_valid_error = 0.0
mean_valid_acc = 0.0
mean_train_acc = 0.0
mean_label_dist = 0.0
mean_sparsity = 0.0
for j in range(args.num_subsets):
# Sample X_tr and X_te
subset = np.random.choice(n_train, size=subset_size, replace=False)
subset_tr = subset[:int(subset_size * args.train_fraction)]
subset_te = subset[int(subset_size * args.train_fraction):]
phi1_tr = torch.from_numpy(phi1[subset_tr]).to(device)
phi1_te = torch.from_numpy(phi1[subset_te]).to(device)
phi2_tr = torch.from_numpy(phi2[subset_tr]).to(device)
phi2_te = torch.from_numpy(phi2[subset_te]).to(device)
# Get labels using current task encoder
task_labels_tr = sparsemax_act(task_encoder(phi1_tr) / tau)
task_labels_te = sparsemax_act(task_encoder(phi1_te) / tau)
task_labels_all = torch.cat((task_labels_tr, task_labels_te))
"""
Perform inner optimization from the random initialization or
from fixed w0 (corresponds to Cold Start BLO for Equation 5)
"""
if args.rand_init:
inner_linear.reset_parameters()
learner = inner_linear.clone()
for step in range(args.adaptation_steps):
train_error = F.cross_entropy(learner(phi2_tr), task_labels_tr)
learner.adapt(train_error)
# Compute HUME's objective (Equation 7)
label_dist = task_labels_all.mean(0)
entr = torch.special.entr(label_dist)
valid_error = F.cross_entropy(learner(phi2_te), task_labels_te)
# Accumulate gradients across args.num_subsets
(valid_error - args.H_reg * entr.sum()).backward()
# Compute training stats
mean_train_error += train_error.item()
mean_train_acc += torch.eq(
learner(phi2_tr).argmax(1),
task_labels_tr.argmax(1)
).float().mean().item()
mean_valid_error += valid_error.item()
mean_valid_acc += torch.eq(
learner(phi2_te).argmax(1),
task_labels_te.argmax(1)
).float().mean().item()
mean_label_dist += label_dist.detach().cpu().numpy()
mean_sparsity += task_labels_all[torch.arange(task_labels_all.shape[0]),
task_labels_all.argmax(1)].mean().item()
# Average gradients over args.num_subsets and update the task encoder parameters
for p in all_parameters:
p.grad.data.mul_(1.0 / args.num_subsets)
print(f"Grad norm: {torch.norm(p.grad.data).item()}")
nn.utils.clip_grad_norm_(task_encoder.parameters(), 1.0)
optimizer.step()
scheduler.step()
# Anneal step size and temperature
if scheduler.get_last_lr()[0] != old_lr:
print("Annealed Learning rate")
old_lr = scheduler.get_last_lr()[0]
print("Annealed Temperature")
tau = tau / 10
print()
# Print train stats
print("Train stats:")
print(f"Mean TrainError {mean_train_error / args.num_subsets}")
print(f"Mean ValidError {mean_valid_error / args.num_subsets}")
print(f"Mean TrainAcc {mean_train_acc / args.num_subsets}")
print(f"Mean ValidAcc {mean_valid_acc / args.num_subsets}")
print(f"Mean Sparsity {mean_sparsity / args.num_subsets}")
print("Mean Label Dist:", mean_label_dist / args.num_subsets)
print()
# Print val stats
out_all_val = task_encoder(torch.from_numpy(phi1_val).to(device))
preds_all_val = torch.argmax(out_all_val, dim=1).detach().cpu().numpy()
print("Val metrics:")
print("Num found clusters:", len(np.unique(preds_all_val)))
|
def run(args=None):
args = parse_args(args)
device = torch.device(args.device)
fix_seed(args.seed)
if not os.path.exists(args.exp_path):
os.makedirs(args.exp_path)
phi1 = np.load(args.phi1_path).astype(np.float32)
phi2 = np.load(args.phi2_path).astype(np.float32)
assert check_both_none_or_not_none(args.phi1_path_val, args.phi2_path_val)
if args.phi1_path_val is not None:
phi1_val = np.load(args.phi1_path_val).astype(np.float32)
phi2_val = np.load(args.phi2_path_val).astype(np.float32)
else:
phi1_val = np.copy(phi1)
phi2_val = np.copy(phi2)
y_true_val = np.load(args.gt_labels_path)
assert phi1.shape[0] == phi2.shape[0]
assert phi1_val.shape[0] == phi2_val.shape[0]
assert phi1_val.shape[0] == y_true_val.shape[0]
n_train = phi1.shape[0]
d1, d2 = phi2.shape[1], phi1.shape[1]
subset_size = min(n_train, args.subset_size)
# Instantiate linear layer for the inner optimization (Equation 5)
inner_linear = nn.Linear(d1, args.k, bias=True).to(device)
inner_linear = l2l.algorithms.MAML(inner_linear, lr=args.inner_lr)
# Instantiate task encoder with orthogonal weights parametrization (Equation 3)
task_encoder = nn.Linear(d2, args.k, bias=False).to(device)
task_encoder = nn.utils.parametrizations.orthogonal(task_encoder)
all_parameters = list(task_encoder.parameters())
optimizer = torch.optim.Adam(all_parameters, lr=args.outer_lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 200],
gamma=0.1 if args.anneal else 1.0
)
old_lr = args.outer_lr
tau = args.tau
sparsemax_act = Sparsemax(dim=1)
for i in tqdm(range(args.num_iters)):
optimizer.zero_grad()
mean_train_error = 0.0
mean_valid_error = 0.0
mean_valid_acc = 0.0
mean_train_acc = 0.0
mean_label_dist = 0.0
mean_sparsity = 0.0
for j in range(args.num_subsets):
# Sample X_tr and X_te
subset = np.random.choice(n_train, size=subset_size, replace=False)
subset_tr = subset[:int(subset_size * args.train_fraction)]
subset_te = subset[int(subset_size * args.train_fraction):]
phi1_tr = torch.from_numpy(phi1[subset_tr]).to(device)
phi1_te = torch.from_numpy(phi1[subset_te]).to(device)
phi2_tr = torch.from_numpy(phi2[subset_tr]).to(device)
phi2_te = torch.from_numpy(phi2[subset_te]).to(device)
# Get labels using current task encoder
task_labels_tr = sparsemax_act(task_encoder(phi1_tr) / tau)
task_labels_te = sparsemax_act(task_encoder(phi1_te) / tau)
task_labels_all = torch.cat((task_labels_tr, task_labels_te))
"""
Perform inner optimization from the random initialization or
from fixed w0 (corresponds to Cold Start BLO for Equation 5)
"""
if args.rand_init:
inner_linear.reset_parameters()
learner = inner_linear.clone()
for step in range(args.adaptation_steps):
train_error = F.cross_entropy(learner(phi2_tr), task_labels_tr)
learner.adapt(train_error)
# Compute HUME's objective (Equation 7)
label_dist = task_labels_all.mean(0)
entr = torch.special.entr(label_dist)
valid_error = F.cross_entropy(learner(phi2_te), task_labels_te)
# Accumulate gradients across args.num_subsets
(valid_error - args.H_reg * entr.sum()).backward()
# Compute training stats
mean_train_error += train_error.item()
mean_train_acc += torch.eq(
learner(phi2_tr).argmax(1),
task_labels_tr.argmax(1)
).float().mean().item()
mean_valid_error += valid_error.item()
mean_valid_acc += torch.eq(
learner(phi2_te).argmax(1),
task_labels_te.argmax(1)
).float().mean().item()
mean_label_dist += label_dist.detach().cpu().numpy()
mean_sparsity += task_labels_all[torch.arange(task_labels_all.shape[0]),
task_labels_all.argmax(1)].mean().item()
# Average gradients over args.num_subsets and update the task encoder parameters
for p in all_parameters:
p.grad.data.mul_(1.0 / args.num_subsets)
print(f"Grad norm: {torch.norm(p.grad.data).item()}")
nn.utils.clip_grad_norm_(task_encoder.parameters(), 1.0)
optimizer.step()
scheduler.step()
# Anneal step size and temperature
if scheduler.get_last_lr()[0] != old_lr:
print("Annealed Learning rate")
old_lr = scheduler.get_last_lr()[0]
print("Annealed Temperature")
tau = tau / 10
print()
# Print train stats
print("Train stats:")
print(f"Mean TrainError {mean_train_error / args.num_subsets}")
print(f"Mean ValidError {mean_valid_error / args.num_subsets}")
print(f"Mean TrainAcc {mean_train_acc / args.num_subsets}")
print(f"Mean ValidAcc {mean_valid_acc / args.num_subsets}")
print(f"Mean Sparsity {mean_sparsity / args.num_subsets}")
print("Mean Label Dist:", mean_label_dist / args.num_subsets)
print()
# Print val stats
out_all_val = task_encoder(torch.from_numpy(phi1_val).to(device))
preds_all_val = torch.argmax(out_all_val, dim=1).detach().cpu().numpy()
print("Val metrics:")
print("Num found clusters:", len(np.unique(preds_all_val))) | print(f"Cluster ACC epoch {i}:", cluster_acc(preds_all_val, y_true_val)) | 5 | 2023-10-20 15:32:06+00:00 | 4k |
lwaekfjlk/TRAMS | utils/src.py | [
{
"identifier": "TransfoXLLMHeadModel",
"path": "utils/modeling_transfo_xl.py",
"snippet": "_CHECKPOINT_FOR_DOC = \"transfo-xl-wt103\"\n_CONFIG_FOR_DOC = \"TransfoXLConfig\"\n_TOKENIZER_FOR_DOC = \"TransfoXLTokenizer\"\nTRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"transfo-xl-wt103\",\n # See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl\n]\n AC = torch.einsum(\"ibnd,jbnd->ijbn\", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head\n Q, K, V = torch.chunk(self.qkv_net.weight, 3, dim=0)\n AC = torch.einsum(\"ibk, jbnk->ijbn\", (w, QKk)) + torch.einsum(\"nd,jbnd->jbn\", (self.r_w_bias, w_head_k)).unsqueeze(0)\n BD = torch.einsum(\"ibnd,jnd->ijbn\", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head\n BD = self._rel_shift(BD)\nTRANSFO_XL_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n Parameters:\n config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nTRANSFO_XL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n Indices can be obtained using [`TransfoXLTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n [What are input IDs?](../glossary#input-ids)\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems\n given to this model should not be passed as `input_ids` as they have already been computed.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\ndef build_tf_to_pytorch_map(model, config):\ndef load_tf_weights_in_transfo_xl(model, config, tf_path):\n def __init__(self, demb):\n def forward(self, pos_seq, bsz=None):\n def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):\n def forward(self, inp):\n def __init__(\n self,\n n_head,\n d_model,\n d_head,\n dropout,\n dropatt=0,\n pre_lnorm=False,\n r_r_bias=None,\n r_w_bias=None,\n layer_norm_epsilon=1e-5,\n ):\n def _rel_shift(self, x):\n def trams(\n self, \n QKk, \n w_head_k, \n w_head_v, \n r_head_k, \n attn_mask, \n topk_num=None, \n remain_mem_num=None\n ):\n def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False, topk_num=None, remain_mem_num=None):\n def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):\n def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False, topk_num=None, remain_mem_num=None):\n def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):\n def forward(self, inp):\n def _init_weight(self, weight):\n def _init_bias(self, bias):\n def _init_weights(self, m):\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):\n def _get_new_num_tokens_layer(self, new_num_tokens, layer):\n def _get_embedding_shapes(self):\n def _resize_token_embeddings(self, new_num_tokens, layer=-1):\n def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):\n def logits(self):\n def __init__(self, config, args):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def backward_compatible(self):\n def reset_memory_length(self, mem_len):\n def reset_length(self, tgt_len, ext_len, mem_len):\n def _prune_heads(self, heads):\n def init_mems(self, bsz):\n def _update_mems(self, hids, mems, mlen, qlen):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLModelOutput]:\n def __init__(self, config, args):\n def tie_weights(self):\n def reset_memory_length(self, mem_len):\n def reset_length(self, tgt_len, ext_len, mem_len):\n def init_mems(self, bsz):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:\n def get_output_embeddings(self):\n def prepare_inputs_for_generation(self, input_ids, past=None, **model_kwargs):\n def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):\n def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, config):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:\nclass PositionalEmbedding(nn.Module):\nclass PositionwiseFF(nn.Module):\nclass RelPartialLearnableMultiHeadAttn(nn.Module):\nclass RelPartialLearnableDecoderLayer(nn.Module):\nclass AdaptiveEmbedding(nn.Module):\nclass TransfoXLPreTrainedModel(PreTrainedModel):\nclass TransfoXLModelOutput(ModelOutput):\nclass TransfoXLSequenceClassifierOutputWithPast(ModelOutput):\nclass TransfoXLLMHeadModelOutput(ModelOutput):\nclass TransfoXLModel(TransfoXLPreTrainedModel):\nclass TransfoXLLMHeadModel(TransfoXLPreTrainedModel):\nclass TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):"
},
{
"identifier": "get_lm_corpus",
"path": "data_utils.py",
"snippet": "def get_lm_corpus(datadir, dataset):\n fn = os.path.join(datadir, dataset, 'cache.pt')\n if os.path.exists(fn):\n print('Loading cached dataset...')\n corpus = torch.load(fn)\n print('Finish loading cached dataset...')\n else:\n print('Producing dataset {}...'.format(dataset))\n kwargs = {}\n if dataset in ['wt103', 'wt2']:\n kwargs['special'] = ['<eos>']\n kwargs['lower_case'] = False\n elif dataset == 'ptb':\n kwargs['special'] = ['<eos>']\n kwargs['lower_case'] = True\n elif dataset in ['enwik8', 'text8']:\n pass\n\n corpus = Corpus(datadir, dataset, **kwargs)\n torch.save(corpus, fn)\n\n return corpus"
}
] | import os
import logging
import wandb
import torch
import sys
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Adam
from utils.modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLConfig
from torch.optim.lr_scheduler import ExponentialLR, LambdaLR
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from data_utils import get_lm_corpus
from earlystopping import EarlyStopper | 3,306 |
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class Trainer(object):
def __init__(self, args):
super().__init__()
self.args = args
self.set_tool()
self.set_dist()
self.set_seed()
self.train_iter, self.valid_iter, self.test_iter = self.prepare_data()
self.model = self.get_model(use_checkpoint=self.args.use_checkpoint)
self.optimizer = Adam(params=self.model.parameters(), lr=self.args.lr)
self.scheduler = self.get_scheduler()
self.earlystopper = EarlyStopper(args, self.logger)
def avg_rank(self, scalar):
if self.args.local_rank == -1:
return scalar
scalar_t = torch.tensor(
scalar,
dtype=torch.float,
device=self.device
) / torch.distributed.get_world_size()
torch.distributed.all_reduce(
scalar_t,
op=torch.distributed.ReduceOp.SUM
)
return scalar_t.item()
def set_tool(self):
if self.args.local_rank in [-1, 0]:
os.environ['WANDB_API_KEY'] = '972035264241fb0f6cc3cab51a5d82f47ca713db'
#wandb.init(project="LTDecoder", name=self.args.timestamp, config=self.args, dir='./tmp')
wandb.init(mode='disabled')
self.logger = logging.getLogger(__file__)
def set_dist(self):
self.args.distributed = self.args.local_rank != -1
logging.basicConfig(
level=logging.INFO
if self.args.local_rank in [-1, 0]
else logging.WARN
)
if self.args.distributed:
self.device = torch.device("cuda", self.args.local_rank)
torch.distributed.init_process_group(
backend="nccl",
init_method="env://"
)
else:
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu'
)
def set_seed(self):
if self.args.distributed:
rank = torch.distributed.get_rank()
torch.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed_all(self.args.seed_id + rank_id)
else:
torch.manual_seed(self.args.seed_id)
torch.cuda.manual_seed(self.args.seed_id)
torch.cuda.manual_seed_all(self.args.seed_id)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def log(self, str):
if self.args.local_rank in [-1, 0]:
self.logger.info(str)
def wandb_log(self, dict):
if self.args.local_rank in [-1, 0]:
wandb.log(dict)
def judge_earlystopping(self, metric, model, optimizer, metric_direction='small'):
if self.args.local_rank in [-1, 0]:
self.earlystopper(metric, model, optimizer, metric_direction)
return self.earlystopper.early_stop
else:
return
def get_config(self):
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if self.args.adaptive:
assert self.args.dataset in ['wt103']
if self.args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class Trainer(object):
def __init__(self, args):
super().__init__()
self.args = args
self.set_tool()
self.set_dist()
self.set_seed()
self.train_iter, self.valid_iter, self.test_iter = self.prepare_data()
self.model = self.get_model(use_checkpoint=self.args.use_checkpoint)
self.optimizer = Adam(params=self.model.parameters(), lr=self.args.lr)
self.scheduler = self.get_scheduler()
self.earlystopper = EarlyStopper(args, self.logger)
def avg_rank(self, scalar):
if self.args.local_rank == -1:
return scalar
scalar_t = torch.tensor(
scalar,
dtype=torch.float,
device=self.device
) / torch.distributed.get_world_size()
torch.distributed.all_reduce(
scalar_t,
op=torch.distributed.ReduceOp.SUM
)
return scalar_t.item()
def set_tool(self):
if self.args.local_rank in [-1, 0]:
os.environ['WANDB_API_KEY'] = '972035264241fb0f6cc3cab51a5d82f47ca713db'
#wandb.init(project="LTDecoder", name=self.args.timestamp, config=self.args, dir='./tmp')
wandb.init(mode='disabled')
self.logger = logging.getLogger(__file__)
def set_dist(self):
self.args.distributed = self.args.local_rank != -1
logging.basicConfig(
level=logging.INFO
if self.args.local_rank in [-1, 0]
else logging.WARN
)
if self.args.distributed:
self.device = torch.device("cuda", self.args.local_rank)
torch.distributed.init_process_group(
backend="nccl",
init_method="env://"
)
else:
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu'
)
def set_seed(self):
if self.args.distributed:
rank = torch.distributed.get_rank()
torch.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed_all(self.args.seed_id + rank_id)
else:
torch.manual_seed(self.args.seed_id)
torch.cuda.manual_seed(self.args.seed_id)
torch.cuda.manual_seed_all(self.args.seed_id)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def log(self, str):
if self.args.local_rank in [-1, 0]:
self.logger.info(str)
def wandb_log(self, dict):
if self.args.local_rank in [-1, 0]:
wandb.log(dict)
def judge_earlystopping(self, metric, model, optimizer, metric_direction='small'):
if self.args.local_rank in [-1, 0]:
self.earlystopper(metric, model, optimizer, metric_direction)
return self.earlystopper.early_stop
else:
return
def get_config(self):
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if self.args.adaptive:
assert self.args.dataset in ['wt103']
if self.args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
| config = TransfoXLConfig( | 0 | 2023-10-19 00:49:29+00:00 | 4k |
npgrosser/autowired | autowired/_container.py | [
{
"identifier": "component_scan",
"path": "autowired/_component_scan.py",
"snippet": "def component_scan(root_module: ModuleType) -> Iterable[ClassComponentInfo]:\n scanner = ClassScanner(root_module)\n component_infos = (get_component_info(cls) for cls in scanner.get_classes())\n return (c for c in component_infos if c is not None)"
},
{
"identifier": "MissingTypeAnnotation",
"path": "autowired/_exceptions.py",
"snippet": "class MissingTypeAnnotation(AutowiredException):\n \"\"\"\n Raised when a field or property is not annotated with a type hint.\n \"\"\"\n\n pass"
},
{
"identifier": "AmbiguousDependencyException",
"path": "autowired/_exceptions.py",
"snippet": "class AmbiguousDependencyException(AutowiredException):\n \"\"\"\n Raised when a dependency cannot be resolved because multiple candidates are found\n and none of them matches the name of the dependency.\n \"\"\"\n\n pass"
},
{
"identifier": "IllegalAutoWireType",
"path": "autowired/_exceptions.py",
"snippet": "class IllegalAutoWireType(AutowiredException):\n \"\"\"\n Raised when an object of a type that is not allowed to be auto-wired is auto-wired.\n \"\"\"\n\n pass"
},
{
"identifier": "InstantiationError",
"path": "autowired/_exceptions.py",
"snippet": "class InstantiationError(AutowiredException):\n \"\"\"\n Raised when an object cannot be instantiated.\n \"\"\"\n\n pass"
},
{
"identifier": "UnresolvableDependencyException",
"path": "autowired/_exceptions.py",
"snippet": "class UnresolvableDependencyException(AutowiredException):\n \"\"\"\n Raised when a dependency cannot be resolved.\n \"\"\"\n\n pass"
},
{
"identifier": "AutowiredException",
"path": "autowired/_exceptions.py",
"snippet": "class AutowiredException(Exception, ABC):\n \"\"\"\n Base class for all library exceptions.\n \"\"\"\n\n pass"
},
{
"identifier": "logger",
"path": "autowired/_logging.py",
"snippet": "class _SimpleLogger:\n def trace(self, msg: str):"
},
{
"identifier": "is_subtype",
"path": "autowired/_typing_utils.py",
"snippet": "def is_subtype(t1: Type, t2: Type) -> bool:\n \"\"\"\n Checks if t1 is a subtype of t2 (instances of t1 can be used where instances of t2 are expected).\n Similar to issubclass, but also works for generic types.\n\n Note that this is a simple implementation that does not take invariant type arguments into account.\n Meaning is_subtype(List[int], List[object]) will return True, although strictly speaking\n List[int] is not a subtype of List[object], since it is a mutable container and therefore invariant.\n\n :param t1:\n :param t2:\n :return:\n \"\"\"\n\n if t1 is t2:\n return True\n\n # region union type support\n # union type similarity check rule: all types of t1 must be subtypes of at least one type of t2\n t1_union_types = _as_union_types(t1)\n t2_union_types = _as_union_types(t2)\n\n if len(t1_union_types) > 1 or len(t2_union_types) > 1:\n return all(\n any(is_subtype(t1_arg, t2_arg) for t2_arg in t2_union_types)\n for t1_arg in t1_union_types\n )\n # endregion\n\n if t1 is Any or t2 is Any:\n return True\n\n # both types are not generic -> we can use issubclass\n if get_origin(t1) is None and get_origin(t2) is None:\n return issubclass(t1, t2)\n\n origin1 = get_origin(t1) or t1\n origin2 = get_origin(t2) or t2\n\n # base condition: t1 must be a subclass of t2, otherwise we can already return False\n if not issubclass(origin1, origin2):\n return False\n\n # from now on t1 is a subclass of t2\n # -> we only need to check type arguments now\n\n # only the one type is generic -> we consider the argument to be Any\n # -> t1 = t1[Any, Any, ...] and t2 = t2[x, y, ...]\n # or t1 = t1[x, y, ...] and t2 = t2[Any, Any, ...]\n if get_origin(t1) is None or get_origin(t2) is None:\n return True\n\n args1 = get_args(t1)\n args2 = get_args(t2)\n\n # if one of the types has no type arguments, same as above\n if not args1 or not args2:\n return True\n\n # compare each of the type arguments recursively\n # as above,\n for arg1, arg2 in zip(args1, args2):\n if arg1 is Ellipsis or arg2 is Ellipsis:\n # again, handle as Any\n continue\n if not is_subtype(arg1, arg2):\n return False\n\n return True"
},
{
"identifier": "get_sequence_type",
"path": "autowired/_typing_utils.py",
"snippet": "def get_sequence_type(t: Type) -> Union[Tuple[Type, Type], Tuple[None, None]]:\n \"\"\"\n Returns the type of the elements of a list type, or None if t is not a list type.\n \"\"\"\n origin = get_origin(t)\n if origin is list or origin is List:\n args = get_args(t)\n if args:\n return list, args[0]\n\n if origin is tuple or origin is Tuple:\n args = get_args(t)\n if len(args) == 2 and args[1] is Ellipsis:\n return tuple, args[0]\n\n return None, None"
}
] | import dataclasses
import inspect
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from types import FunctionType, ModuleType
from typing import (
Type,
Callable,
Any,
List,
Optional,
Union,
Generic,
Dict,
TypeVar,
)
from ._component_scan import component_scan
from ._exceptions import (
MissingTypeAnnotation,
AmbiguousDependencyException,
IllegalAutoWireType,
InstantiationError,
UnresolvableDependencyException,
AutowiredException,
)
from ._logging import logger
from ._typing_utils import is_subtype, get_sequence_type | 2,707 | @staticmethod
def from_supplier(
supplier: Callable[[], _T],
type: Optional[Type[_T]] = None,
name: Optional[str] = None,
) -> "Provider[_T]":
"""
Creates a provider from the given supplier function.
:param supplier: The supplier function. Will be called every time self.get_instance(...) is called.
:param type: The type of the component this provider provides.
If None, the return type of the supplier function is used, or if supplier is a class,
the class itself is used.
:param name: The name of the provider. If None, the type name of the supplier is used (snake case).
:return: The newly created provider
"""
if type is None:
# if getter is a class, use the class as a type
if inspect.isclass(supplier):
type = supplier
else:
type = inspect.signature(supplier).return_annotation
if type == inspect.Signature.empty:
raise MissingTypeAnnotation(
f"Failed to determine type of {supplier.__name__}. "
)
if name is None:
name = _camel_to_snake(type.__name__)
return _SimpleProvider(name, type, supplier)
@staticmethod
def from_class(cls, container: "Container", transient: bool) -> "Provider[_T]":
def supplier():
return container.autowire(cls)
if not transient:
supplier = _cached(supplier)
return _SimpleProvider(_camel_to_snake(cls.__name__), cls, supplier)
def _cached(supplier: Callable[[], _T]) -> Callable[[], _T]:
cached = False
result = None
def wrapper():
nonlocal cached
nonlocal result
if not cached:
result = supplier()
cached = True
return result
return wrapper
@dataclass(frozen=True)
class _SimpleProvider(Provider[_T]):
name: str
type: Type[_T]
getter: Callable[[], _T] = dataclasses.field(repr=False)
def get_instance(self, dependency: Dependency, container: "Container") -> _T:
return self.getter()
def get_name(self) -> str:
return self.name
def satisfies(self, dependency: Dependency) -> bool:
return is_subtype(self.type, dependency.type)
_illegal_autowiredType_modules = ["builtins", "typing", "dataclasses", "abc", "object"]
def _is_illegal_type(t: Type[_T]) -> bool:
return t.__module__.split(".")[0] in _illegal_autowiredType_modules
class Container:
"""
A container for resolving and storing dependencies.
"""
_providers: List[Provider]
def __init__(self):
self._providers = []
def get_providers(self, dependency: Optional[Dependency] = None) -> List[Provider]:
"""
Returns all providers that match the given dependency specification.
:param dependency: Optional dependency specification, if None, all providers are returned
:return:
"""
if dependency is None:
return list(self._providers)
else:
return [p for p in self._providers if p.satisfies(dependency)]
def get_provider(self, dependency: Dependency) -> Optional[Provider]:
"""
Returns an existing provider that matches the given dependency specification.
:param dependency:
:return:
:raises AmbiguousDependencyException: If multiple matching providers are found and there is no name match
"""
candidates = self.get_providers(dependency)
if len(candidates) == 1:
return candidates[0]
if len(candidates) > 1:
by_name = _group_by(lambda obj: obj.name, candidates)
if dependency.name in by_name and len(by_name[dependency.name]) == 1:
return by_name[dependency.name][0]
else:
|
_T = TypeVar("_T")
@dataclass(frozen=True)
class Dependency(Generic[_T]):
"""
A dependency specification.
"""
name: str
type: Type[_T]
required: bool = True
default_factory: Optional[Callable[[], _T]] = None
class Provider(ABC, Generic[_T]):
@abstractmethod
def get_instance(
self, dependency: Dependency, container: "Container"
) -> _T: # pragma: no cover
"""
Returns an instance that satisfies the given dependency specification.
:param dependency: The dependency specification.
:param container: The container that is currently resolving the dependency.
:return: An instance that satisfies the given dependency specification
"""
...
@abstractmethod
def get_name(self) -> str: # pragma: no cover
"""
Returns the name of the provider.
Used by the container to resolve ambiguous dependencies.
If a container contains multiple dependencies that satisfy the same dependency specification,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:return: The name of the provider
"""
...
@abstractmethod
def satisfies(self, dependency: Dependency) -> bool: # pragma: no cover
"""
Returns whether this provider satisfies the given dependency specification.
:param dependency: The dependency specification.
:return: Whether this provider satisfies the given dependency specification
"""
...
@staticmethod
def from_instance(instance: _T, name: Optional[str] = None) -> "Provider[_T]":
"""
Creates a singleton provider from the given instance.
:param instance: The instance. Will always be returned by self.get_instance(...)
:param name: The name of the provider. If None, the type name of the instance is used (snake case).
:return: The newly created provider
"""
if name is None:
name = _camel_to_snake(type(instance).__name__)
return _SimpleProvider(name, type(instance), lambda: instance)
# noinspection PyShadowingBuiltins
@staticmethod
def from_supplier(
supplier: Callable[[], _T],
type: Optional[Type[_T]] = None,
name: Optional[str] = None,
) -> "Provider[_T]":
"""
Creates a provider from the given supplier function.
:param supplier: The supplier function. Will be called every time self.get_instance(...) is called.
:param type: The type of the component this provider provides.
If None, the return type of the supplier function is used, or if supplier is a class,
the class itself is used.
:param name: The name of the provider. If None, the type name of the supplier is used (snake case).
:return: The newly created provider
"""
if type is None:
# if getter is a class, use the class as a type
if inspect.isclass(supplier):
type = supplier
else:
type = inspect.signature(supplier).return_annotation
if type == inspect.Signature.empty:
raise MissingTypeAnnotation(
f"Failed to determine type of {supplier.__name__}. "
)
if name is None:
name = _camel_to_snake(type.__name__)
return _SimpleProvider(name, type, supplier)
@staticmethod
def from_class(cls, container: "Container", transient: bool) -> "Provider[_T]":
def supplier():
return container.autowire(cls)
if not transient:
supplier = _cached(supplier)
return _SimpleProvider(_camel_to_snake(cls.__name__), cls, supplier)
def _cached(supplier: Callable[[], _T]) -> Callable[[], _T]:
cached = False
result = None
def wrapper():
nonlocal cached
nonlocal result
if not cached:
result = supplier()
cached = True
return result
return wrapper
@dataclass(frozen=True)
class _SimpleProvider(Provider[_T]):
name: str
type: Type[_T]
getter: Callable[[], _T] = dataclasses.field(repr=False)
def get_instance(self, dependency: Dependency, container: "Container") -> _T:
return self.getter()
def get_name(self) -> str:
return self.name
def satisfies(self, dependency: Dependency) -> bool:
return is_subtype(self.type, dependency.type)
_illegal_autowiredType_modules = ["builtins", "typing", "dataclasses", "abc", "object"]
def _is_illegal_type(t: Type[_T]) -> bool:
return t.__module__.split(".")[0] in _illegal_autowiredType_modules
class Container:
"""
A container for resolving and storing dependencies.
"""
_providers: List[Provider]
def __init__(self):
self._providers = []
def get_providers(self, dependency: Optional[Dependency] = None) -> List[Provider]:
"""
Returns all providers that match the given dependency specification.
:param dependency: Optional dependency specification, if None, all providers are returned
:return:
"""
if dependency is None:
return list(self._providers)
else:
return [p for p in self._providers if p.satisfies(dependency)]
def get_provider(self, dependency: Dependency) -> Optional[Provider]:
"""
Returns an existing provider that matches the given dependency specification.
:param dependency:
:return:
:raises AmbiguousDependencyException: If multiple matching providers are found and there is no name match
"""
candidates = self.get_providers(dependency)
if len(candidates) == 1:
return candidates[0]
if len(candidates) > 1:
by_name = _group_by(lambda obj: obj.name, candidates)
if dependency.name in by_name and len(by_name[dependency.name]) == 1:
return by_name[dependency.name][0]
else: | raise AmbiguousDependencyException( | 2 | 2023-10-16 09:22:20+00:00 | 4k |
chenxn2020/GOSE | GOSEfinetune/data/datasets/xfun.py | [
{
"identifier": "load_image",
"path": "GOSEfinetune/data/utils.py",
"snippet": "def load_image(image_path):\n image = read_image(image_path, format=\"BGR\")\n h = image.shape[0]\n w = image.shape[1]\n img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])\n image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1) # copy to make it writeable\n return image, (w, h)"
},
{
"identifier": "merge_bbox",
"path": "GOSEfinetune/data/utils.py",
"snippet": "def merge_bbox(bbox_list):\n x0, y0, x1, y1 = list(zip(*bbox_list))\n return [min(x0), min(y0), max(x1), max(y1)]"
},
{
"identifier": "normalize_bbox",
"path": "GOSEfinetune/data/utils.py",
"snippet": "def normalize_bbox(bbox, size):\n return [\n int(1000 * bbox[0] / size[0]),\n int(1000 * bbox[1] / size[1]),\n int(1000 * bbox[2] / size[0]),\n int(1000 * bbox[3] / size[1]),\n ]"
},
{
"identifier": "simplify_bbox",
"path": "GOSEfinetune/data/utils.py",
"snippet": "def simplify_bbox(bbox):\n return [\n min(bbox[0::2]),\n min(bbox[1::2]),\n max(bbox[2::2]),\n max(bbox[3::2]),\n ]"
}
] | import json
import logging
import os
import datasets
from GOSEfinetune.data.utils import load_image, merge_bbox, normalize_bbox, simplify_bbox
from transformers import AutoTokenizer | 2,050 | "relations": datasets.Sequence(
{
"head": datasets.Value("int64"),
"tail": datasets.Value("int64"),
"start_index": datasets.Value("int64"),
"end_index": datasets.Value("int64"),
}
),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# urls_to_download = {
# "train": [f"{_URL}{self.config.lang}.train.json", f"{_URL}{self.config.lang}.train.zip"],
# "val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"],
# # "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"],
# }
# downloaded_files = dl_manager.download_and_extract(urls_to_download)
# train_files_for_many_langs = [downloaded_files["train"]]
# val_files_for_many_langs = [downloaded_files["val"]]
# # test_files_for_many_langs = [downloaded_files["test"]]
self.fewshot = False
self.fewshot_num = 32
self.fewshot_time = 5 #32 1
print(f'=========================fewshot_num {self.fewshot_num}========================')
print(f'=========================fewshot_time {self.fewshot_time}========================')
if not self.fewshot:
file_dir = 'xfund&funsd/'
else:
file_dir = f'fewshot_dataset/xfund&funsd_fewshot{self.fewshot_num}_{self.fewshot_time}/'
print('asdfasdasdff')
print(file_dir)
train_files_for_many_langs = [[file_dir+f"{self.config.lang}.train.json", file_dir+f"{self.config.lang}"]]
val_files_for_many_langs = [[file_dir+f"{self.config.lang}.val.json", file_dir+f"{self.config.lang}"]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split("+")
if "all" in additional_langs:
additional_langs = [lang for lang in _LANG if lang != self.config.lang]
for lang in additional_langs:
# urls_to_download = {"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]}
# additional_downloaded_files = dl_manager.download_and_extract(urls_to_download)
# train_files_for_many_langs.append(additional_downloaded_files["train"])
train_files_for_many_langs.append([file_dir+f"{lang}.train.json", file_dir+f"{lang}"])
logger.info(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})")
logger.info(f"Evaluating on {self.config.lang}")
logger.info(f"Testing on {self.config.lang}")
print(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})")
print(f"Evaluating on {self.config.lang}")
print(f"Testing on {self.config.lang}")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs}
),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}),
]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info("Generating examples from = %s", filepath)
with open(filepath[0], "r") as f:
data = json.load(f)
for doc in data["documents"]:
doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"])
image, size = load_image(doc["img"]["fpath"])
document = doc["document"]
tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if len(line["text"]) == 0:
empty_entity.add(line["id"])
continue
id2label[line["id"]] = line["label"]
relations.extend([tuple(sorted(l)) for l in line["linking"]])
if '/en' in filepath[0]:
tokenized_inputs = self.tokenizer(
' '.join([q['text'].replace(u'\uf703','') for q in line['words']]),
add_special_tokens=False,
return_offsets_mapping=True,
return_attention_mask=False,
)
else:
tokenized_inputs = self.tokenizer(
line["text"],
add_special_tokens=False,
return_offsets_mapping=True,
return_attention_mask=False,
)
text_length = 0
ocr_length = 0
bbox = []
last_box = None
for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]):
if token_id == 6:
bbox.append(None)
continue
text_length += offset[1] - offset[0]
tmp_box = []
while ocr_length < text_length:
ocr_word = line["words"].pop(0)
ocr_length += len(
self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip())
)
tmp_box.append(simplify_bbox(line["box"]))
if len(tmp_box) == 0:
tmp_box = last_box
| # Lint as: python3
_URL = "https://github.com/doc-analysis/XFUN/releases/download/v1.0/"
_LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"]
logger = logging.getLogger(__name__)
class XFUNConfig(datasets.BuilderConfig):
"""BuilderConfig for XFUN."""
def __init__(self, lang, additional_langs=None, **kwargs):
"""
Args:
lang: string, language for the input text
**kwargs: keyword arguments forwarded to super.
"""
super(XFUNConfig, self).__init__(**kwargs)
self.lang = lang
self.additional_langs = additional_langs
class XFUN(datasets.GeneratorBasedBuilder):
"""XFUN dataset."""
BUILDER_CONFIGS = [XFUNConfig(name=f"xfun.{lang}", lang=lang) for lang in _LANG]
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
def __init__(self, data_dir, **kwargs):
super(XFUN, self).__init__(**kwargs)
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
"input_ids": datasets.Sequence(datasets.Value("int64")),
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"labels": datasets.Sequence(
datasets.ClassLabel(
names=["O", "B-QUESTION", "B-ANSWER", "B-HEADER", "I-ANSWER", "I-QUESTION", "I-HEADER"]
)
),
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
"entities": datasets.Sequence(
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]),
}
),
"relations": datasets.Sequence(
{
"head": datasets.Value("int64"),
"tail": datasets.Value("int64"),
"start_index": datasets.Value("int64"),
"end_index": datasets.Value("int64"),
}
),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# urls_to_download = {
# "train": [f"{_URL}{self.config.lang}.train.json", f"{_URL}{self.config.lang}.train.zip"],
# "val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"],
# # "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"],
# }
# downloaded_files = dl_manager.download_and_extract(urls_to_download)
# train_files_for_many_langs = [downloaded_files["train"]]
# val_files_for_many_langs = [downloaded_files["val"]]
# # test_files_for_many_langs = [downloaded_files["test"]]
self.fewshot = False
self.fewshot_num = 32
self.fewshot_time = 5 #32 1
print(f'=========================fewshot_num {self.fewshot_num}========================')
print(f'=========================fewshot_time {self.fewshot_time}========================')
if not self.fewshot:
file_dir = 'xfund&funsd/'
else:
file_dir = f'fewshot_dataset/xfund&funsd_fewshot{self.fewshot_num}_{self.fewshot_time}/'
print('asdfasdasdff')
print(file_dir)
train_files_for_many_langs = [[file_dir+f"{self.config.lang}.train.json", file_dir+f"{self.config.lang}"]]
val_files_for_many_langs = [[file_dir+f"{self.config.lang}.val.json", file_dir+f"{self.config.lang}"]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split("+")
if "all" in additional_langs:
additional_langs = [lang for lang in _LANG if lang != self.config.lang]
for lang in additional_langs:
# urls_to_download = {"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]}
# additional_downloaded_files = dl_manager.download_and_extract(urls_to_download)
# train_files_for_many_langs.append(additional_downloaded_files["train"])
train_files_for_many_langs.append([file_dir+f"{lang}.train.json", file_dir+f"{lang}"])
logger.info(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})")
logger.info(f"Evaluating on {self.config.lang}")
logger.info(f"Testing on {self.config.lang}")
print(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})")
print(f"Evaluating on {self.config.lang}")
print(f"Testing on {self.config.lang}")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs}
),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}),
]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info("Generating examples from = %s", filepath)
with open(filepath[0], "r") as f:
data = json.load(f)
for doc in data["documents"]:
doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"])
image, size = load_image(doc["img"]["fpath"])
document = doc["document"]
tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if len(line["text"]) == 0:
empty_entity.add(line["id"])
continue
id2label[line["id"]] = line["label"]
relations.extend([tuple(sorted(l)) for l in line["linking"]])
if '/en' in filepath[0]:
tokenized_inputs = self.tokenizer(
' '.join([q['text'].replace(u'\uf703','') for q in line['words']]),
add_special_tokens=False,
return_offsets_mapping=True,
return_attention_mask=False,
)
else:
tokenized_inputs = self.tokenizer(
line["text"],
add_special_tokens=False,
return_offsets_mapping=True,
return_attention_mask=False,
)
text_length = 0
ocr_length = 0
bbox = []
last_box = None
for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]):
if token_id == 6:
bbox.append(None)
continue
text_length += offset[1] - offset[0]
tmp_box = []
while ocr_length < text_length:
ocr_word = line["words"].pop(0)
ocr_length += len(
self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip())
)
tmp_box.append(simplify_bbox(line["box"]))
if len(tmp_box) == 0:
tmp_box = last_box | bbox.append(normalize_bbox(merge_bbox(tmp_box), size)) | 1 | 2023-10-19 14:36:32+00:00 | 4k |
mklissa/dceo | dopamine/jax/agents/rainbow/rainbow_agent.py | [
{
"identifier": "losses",
"path": "dopamine/jax/losses.py",
"snippet": "def huber_loss(targets: jnp.array,\n predictions: jnp.array,\n delta: float = 1.0) -> jnp.ndarray:\ndef mse_loss(targets: jnp.array, predictions: jnp.array) -> jnp.ndarray:\ndef softmax_cross_entropy_loss_with_logits(labels: jnp.array,\n logits: jnp.array) -> jnp.ndarray:"
},
{
"identifier": "networks",
"path": "dopamine/jax/networks.py",
"snippet": "def preprocess_atari_inputs(x):\n def __call__(self, x):\n def setup(self):\n def __call__(self, x):\n def __init__(self,\n nvars: int,\n min_vals: Union[float, Sequence[float]] = 0.0,\n max_vals: Optional[Union[float, Sequence[float]]] = None,\n order: int = 3):\n def scale(self, values):\n def compute_features(self, features):\n def __call__(self, x):\n def __call__(self, x, support):\n def setup(self):\n def __call__(self, x, support):\n def __call__(self, x, num_quantiles, rng):\n def __call__(self, x):\n def sample_noise(key, shape):\n def f(x):\n def __call__(self, x, features, bias=True, kernel_init=None):\n def mu_init(key, shape):\n def sigma_init(key, shape, dtype=jnp.float32): # pylint: disable=unused-argument\ndef feature_layer(key, noisy, eval_mode=False):\n def noisy_net(x, features):\n def dense_net(x, features):\n def __call__(self, x, support, eval_mode=False, key=None):\nclass NatureDQNNetwork(nn.Module):\nclass ClassicControlDQNNetwork(nn.Module):\nclass FourierBasis(object):\nclass JaxFourierDQNNetwork(nn.Module):\nclass RainbowNetwork(nn.Module):\nclass ClassicControlRainbowNetwork(nn.Module):\nclass ImplicitQuantileNetwork(nn.Module):\nclass QuantileNetwork(nn.Module):\nclass NoisyNetwork(nn.Module):\nclass FullRainbowNetwork(nn.Module):"
},
{
"identifier": "dqn_agent",
"path": "dopamine/jax/agents/dqn/dqn_agent.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = jnp.uint8\nNATURE_DQN_STACK_SIZE = dqn_agent.NATURE_DQN_STACK_SIZE\ndef create_optimizer(name='adam', learning_rate=6.25e-5, beta1=0.9, beta2=0.999,\n eps=1.5e-4, centered=False):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, cumulative_gamma,\n loss_type='mse'):\n def loss_fn(params, target):\n def q_online(state):\n def q_target(state):\ndef target_q(target_network, next_states, rewards, terminals, cumulative_gamma):\ndef linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn):\n def __init__(self,\n num_actions,\n observation_shape=NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=NATURE_DQN_DTYPE,\n stack_size=NATURE_DQN_STACK_SIZE,\n network=networks.NatureDQNNetwork,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n eval_mode=False,\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False,\n seed=None,\n loss_type='mse',\n preprocess_fn=None,\n collector_allowlist=('tensorboard',)):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _sample_from_replay_buffer(self):\n def _sync_weights(self):\n def _reset_state(self):\n def _record_observation(self, observation):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def end_episode(self, reward, terminal=True):\n def _train_step(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):\n def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):\n def set_collector_dispatcher(self, collector_dispatcher):\nclass JaxDQNAgent(object):"
},
{
"identifier": "statistics_instance",
"path": "dopamine/metrics/statistics_instance.py",
"snippet": "class StatisticsInstance:"
},
{
"identifier": "prioritized_replay_buffer",
"path": "dopamine/replay_memory/prioritized_replay_buffer.py",
"snippet": "class OutOfGraphPrioritizedReplayBuffer(\n circular_replay_buffer.OutOfGraphReplayBuffer):\nclass WrappedPrioritizedReplayBuffer(\n circular_replay_buffer.WrappedReplayBuffer):\n def __init__(self,\n observation_shape,\n stack_size,\n replay_capacity,\n batch_size,\n update_horizon=1,\n gamma=0.99,\n max_sample_attempts=1000,\n extra_storage_types=None,\n observation_dtype=np.uint8,\n terminal_dtype=np.uint8,\n action_shape=(),\n action_dtype=np.int32,\n reward_shape=(),\n reward_dtype=np.float32):\n def get_add_args_signature(self):\n def _add(self, *args):\n def sample_index_batch(self, batch_size):\n def sample_transition_batch(self, batch_size=None, indices=None):\n def set_priority(self, indices, priorities):\n def get_priority(self, indices):\n def get_transition_elements(self, batch_size=None):\n def __init__(self,\n observation_shape,\n stack_size,\n use_staging=False,\n replay_capacity=1000000,\n batch_size=32,\n update_horizon=1,\n gamma=0.99,\n wrapped_memory=None,\n max_sample_attempts=1000,\n extra_storage_types=None,\n observation_dtype=np.uint8,\n terminal_dtype=np.uint8,\n action_shape=(),\n action_dtype=np.int32,\n reward_shape=(),\n reward_dtype=np.float32):\n def tf_set_priority(self, indices, priorities):\n def tf_get_priority(self, indices):"
}
] | import functools
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import tensorflow as tf
from dopamine.jax import losses
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.metrics import statistics_instance
from dopamine.replay_memory import prioritized_replay_buffer | 3,135 | def loss_fn(params, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, support)
logits = jax.vmap(q_online)(states).logits
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
loss = jax.vmap(losses.softmax_cross_entropy_loss_with_logits)(
target,
chosen_action_logits)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
def q_target(state):
return network_def.apply(target_params, state, support)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
target = target_distribution(q_target,
next_states,
rewards,
terminals,
support,
cumulative_gamma)
# Get the unweighted loss without taking its mean for updating priorities.
(mean_loss, loss), grad = grad_fn(online_params, target, loss_weights)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=online_params)
online_params = optax.apply_updates(online_params, updates)
return optimizer_state, online_params, loss, mean_loss
@functools.partial(jax.vmap, in_axes=(None, 0, 0, 0, None, None))
def target_distribution(target_network, next_states, rewards, terminals,
support, cumulative_gamma):
"""Builds the C51 target distribution as per Bellemare et al. (2017).
First, we compute the support of the Bellman target, r + gamma Z'. Where Z'
is the support of the next state distribution:
* Evenly spaced in [-vmax, vmax] if the current state is nonterminal;
* 0 otherwise (duplicated num_atoms times).
Second, we compute the next-state probabilities, corresponding to the action
with highest expected value.
Finally we project the Bellman target (support + probabilities) onto the
original support.
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
support: support for the distribution.
cumulative_gamma: float, cumulative gamma to use.
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
target_support = rewards + gamma_with_terminal * support
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
probabilities = jnp.squeeze(next_state_target_outputs.probabilities)
next_probabilities = probabilities[next_qt_argmax]
return jax.lax.stop_gradient(
project_distribution(target_support, next_probabilities, support))
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn, support):
"""Select an action from the set of available actions.
Chooses an action randomly with probability self._calculate_epsilon(), and
otherwise acts greedily according to the current Q-value estimates.
Args:
network_def: Linen Module to use for inference.
params: Linen params (frozen dict) to use for inference.
state: input state to use for inference.
rng: Jax random number generator.
num_actions: int, number of actions (static_argnum).
eval_mode: bool, whether we are in eval mode (static_argnum).
epsilon_eval: float, epsilon value to use in eval mode (static_argnum).
epsilon_train: float, epsilon value to use in train mode (static_argnum).
epsilon_decay_period: float, decay period for epsilon value for certain
epsilon functions, such as linearly_decaying_epsilon, (static_argnum).
training_steps: int, number of training steps so far.
min_replay_history: int, minimum number of steps in replay buffer
(static_argnum).
epsilon_fn: function used to calculate epsilon value (static_argnum).
support: support for the distribution.
Returns:
rng: Jax random number generator.
action: int, the selected action.
"""
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2 = jax.random.split(rng, num=3)
p = jax.random.uniform(rng1)
return rng, jnp.where(
p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
jnp.argmax(network_def.apply(params, state, support).q_values))
@gin.configurable
| # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compact implementation of a simplified Rainbow agent in Jax.
Specifically, we implement the following components from Rainbow:
* n-step updates;
* prioritized replay; and
* distributional RL.
These three components were found to significantly impact the performance of
the Atari game-playing agent.
Furthermore, our implementation does away with some minor hyperparameter
choices. Specifically, we
* keep the beta exponent fixed at beta=0.5, rather than increase it linearly;
* remove the alpha parameter, which was set to alpha=0.5 throughout the paper.
Details in "Rainbow: Combining Improvements in Deep Reinforcement Learning" by
Hessel et al. (2018).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@functools.partial(jax.jit, static_argnums=(0, 3, 12))
def train(network_def, online_params, target_params, optimizer, optimizer_state,
states, actions, next_states, rewards, terminals, loss_weights,
support, cumulative_gamma):
"""Run a training step."""
def loss_fn(params, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, support)
logits = jax.vmap(q_online)(states).logits
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
loss = jax.vmap(losses.softmax_cross_entropy_loss_with_logits)(
target,
chosen_action_logits)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
def q_target(state):
return network_def.apply(target_params, state, support)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
target = target_distribution(q_target,
next_states,
rewards,
terminals,
support,
cumulative_gamma)
# Get the unweighted loss without taking its mean for updating priorities.
(mean_loss, loss), grad = grad_fn(online_params, target, loss_weights)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=online_params)
online_params = optax.apply_updates(online_params, updates)
return optimizer_state, online_params, loss, mean_loss
@functools.partial(jax.vmap, in_axes=(None, 0, 0, 0, None, None))
def target_distribution(target_network, next_states, rewards, terminals,
support, cumulative_gamma):
"""Builds the C51 target distribution as per Bellemare et al. (2017).
First, we compute the support of the Bellman target, r + gamma Z'. Where Z'
is the support of the next state distribution:
* Evenly spaced in [-vmax, vmax] if the current state is nonterminal;
* 0 otherwise (duplicated num_atoms times).
Second, we compute the next-state probabilities, corresponding to the action
with highest expected value.
Finally we project the Bellman target (support + probabilities) onto the
original support.
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
support: support for the distribution.
cumulative_gamma: float, cumulative gamma to use.
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
target_support = rewards + gamma_with_terminal * support
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
probabilities = jnp.squeeze(next_state_target_outputs.probabilities)
next_probabilities = probabilities[next_qt_argmax]
return jax.lax.stop_gradient(
project_distribution(target_support, next_probabilities, support))
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn, support):
"""Select an action from the set of available actions.
Chooses an action randomly with probability self._calculate_epsilon(), and
otherwise acts greedily according to the current Q-value estimates.
Args:
network_def: Linen Module to use for inference.
params: Linen params (frozen dict) to use for inference.
state: input state to use for inference.
rng: Jax random number generator.
num_actions: int, number of actions (static_argnum).
eval_mode: bool, whether we are in eval mode (static_argnum).
epsilon_eval: float, epsilon value to use in eval mode (static_argnum).
epsilon_train: float, epsilon value to use in train mode (static_argnum).
epsilon_decay_period: float, decay period for epsilon value for certain
epsilon functions, such as linearly_decaying_epsilon, (static_argnum).
training_steps: int, number of training steps so far.
min_replay_history: int, minimum number of steps in replay buffer
(static_argnum).
epsilon_fn: function used to calculate epsilon value (static_argnum).
support: support for the distribution.
Returns:
rng: Jax random number generator.
action: int, the selected action.
"""
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2 = jax.random.split(rng, num=3)
p = jax.random.uniform(rng1)
return rng, jnp.where(
p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
jnp.argmax(network_def.apply(params, state, support).q_values))
@gin.configurable | class JaxRainbowAgent(dqn_agent.JaxDQNAgent): | 2 | 2023-10-15 22:14:16+00:00 | 4k |
keepfoolisher/My-DocTr-Plus | GeoTr.py | [
{
"identifier": "BasicEncoder",
"path": "extractor.py",
"snippet": "class BasicEncoder(nn.Module):\n def __init__(self, output_dim=128, norm_fn='batch'):\n super(BasicEncoder, self).__init__()\n self.norm_fn = norm_fn\n\n if self.norm_fn == 'group':\n self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)\n\n elif self.norm_fn == 'batch':\n self.norm1 = nn.BatchNorm2d(64)\n\n elif self.norm_fn == 'instance':\n self.norm1 = nn.InstanceNorm2d(64)\n\n elif self.norm_fn == 'none':\n self.norm1 = nn.Sequential()\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.in_planes = 64\n self.layer1 = self._make_layer(64, stride=1)\n self.layer2 = self._make_layer(128, stride=2)\n self.layer3 = self._make_layer(192, stride=2)\n\n # output convolution\n self.conv2 = nn.Conv2d(192, output_dim, kernel_size=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, dim, stride=1):\n layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)\n layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)\n layers = (layer1, layer2)\n\n self.in_planes = dim\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.conv2(x)\n\n return x"
},
{
"identifier": "build_position_encoding",
"path": "position_encoding.py",
"snippet": "def build_position_encoding(hidden_dim=512, position_embedding='sine'):\n N_steps = hidden_dim // 2\n if position_embedding in ('v2', 'sine'):\n position_embedding = PositionEmbeddingSine(N_steps, normalize=True)\n elif position_embedding in ('v3', 'learned'):\n position_embedding = PositionEmbeddingLearned(N_steps)\n else:\n raise ValueError(f\"not supported {position_embedding}\")\n\n return position_embedding"
}
] | from extractor import BasicEncoder
from position_encoding import build_position_encoding
from torch import nn, Tensor
from typing import Optional
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import copy | 3,037 | bs, c, h, w = imgf.shape
imgf = imgf.flatten(2).permute(2, 0, 1)
# query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
pos = pos.flatten(2).permute(2, 0, 1)
for layer in self.layers:
query_embed = layer(query_embed, [imgf], pos=pos, memory_pos=[pos, pos])
query_embed = query_embed.permute(1, 2, 0).reshape(bs, c, h, w)
return query_embed
class TransEncoder(nn.Module):
def __init__(self, num_attn_layers, hidden_dim=128):
super(TransEncoder, self).__init__()
attn_layer = attnLayer(hidden_dim)
self.layers = _get_clones(attn_layer, num_attn_layers)
self.position_embedding = build_position_encoding(hidden_dim)
def forward(self, imgf):
pos = self.position_embedding(torch.ones(imgf.shape[0], imgf.shape[2], imgf.shape[3]).bool().cuda()) # torch.Size([1, 128, 36, 36])
bs, c, h, w = imgf.shape
imgf = imgf.flatten(2).permute(2, 0, 1)
pos = pos.flatten(2).permute(2, 0, 1)
for layer in self.layers:
imgf = layer(imgf, [imgf], pos=pos, memory_pos=[pos, pos])
imgf = imgf.permute(1, 2, 0).reshape(bs, c, h, w)
return imgf
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class UpdateBlock(nn.Module):
def __init__(self, hidden_dim=128):
super(UpdateBlock, self).__init__()
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(hidden_dim, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, imgf, coords1):
mask = .25 * self.mask(imgf) # scale mask to balence gradients
dflow = self.flow_head(imgf)
coords1 = coords1 + dflow
return mask, coords1
def coords_grid(batch, ht, wd):
coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class GeoTr(nn.Module):
def __init__(self):
super(GeoTr, self).__init__()
self.hidden_dim = hdim = 256
|
class attnLayer(nn.Module):
def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn_list = nn.ModuleList([copy.deepcopy(nn.MultiheadAttention(d_model, nhead, dropout=dropout)) for i in range(2)])
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2_list = nn.ModuleList([copy.deepcopy(nn.LayerNorm(d_model)) for i in range(2)])
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2_list = nn.ModuleList([copy.deepcopy(nn.Dropout(dropout)) for i in range(2)])
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory_list, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None,
pos=None, memory_pos=None):
q = k = self.with_pos_embed(tgt, pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
for memory, multihead_attn, norm2, dropout2, m_pos in zip(memory_list, self.multihead_attn_list, self.norm2_list, self.dropout2_list, memory_pos):
tgt2 = multihead_attn(query=self.with_pos_embed(tgt, pos),
key=self.with_pos_embed(memory, m_pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + dropout2(tgt2)
tgt = norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None,
pos=None, memory_pos=None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, pos),
key=self.with_pos_embed(memory, memory_pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory_list, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None,
pos=None, memory_pos=None):
if self.normalize_before:
return self.forward_pre(tgt, memory_list, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, memory_pos)
return self.forward_post(tgt, memory_list, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, memory_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class TransDecoder(nn.Module):
def __init__(self, num_attn_layers, hidden_dim=128):
super(TransDecoder, self).__init__()
attn_layer = attnLayer(hidden_dim)
self.layers = _get_clones(attn_layer, num_attn_layers)
self.position_embedding = build_position_encoding(hidden_dim)
def forward(self, imgf, query_embed):
pos = self.position_embedding(torch.ones(imgf.shape[0], imgf.shape[2], imgf.shape[3]).bool().cuda()) # torch.Size([1, 128, 36, 36])
bs, c, h, w = imgf.shape
imgf = imgf.flatten(2).permute(2, 0, 1)
# query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
pos = pos.flatten(2).permute(2, 0, 1)
for layer in self.layers:
query_embed = layer(query_embed, [imgf], pos=pos, memory_pos=[pos, pos])
query_embed = query_embed.permute(1, 2, 0).reshape(bs, c, h, w)
return query_embed
class TransEncoder(nn.Module):
def __init__(self, num_attn_layers, hidden_dim=128):
super(TransEncoder, self).__init__()
attn_layer = attnLayer(hidden_dim)
self.layers = _get_clones(attn_layer, num_attn_layers)
self.position_embedding = build_position_encoding(hidden_dim)
def forward(self, imgf):
pos = self.position_embedding(torch.ones(imgf.shape[0], imgf.shape[2], imgf.shape[3]).bool().cuda()) # torch.Size([1, 128, 36, 36])
bs, c, h, w = imgf.shape
imgf = imgf.flatten(2).permute(2, 0, 1)
pos = pos.flatten(2).permute(2, 0, 1)
for layer in self.layers:
imgf = layer(imgf, [imgf], pos=pos, memory_pos=[pos, pos])
imgf = imgf.permute(1, 2, 0).reshape(bs, c, h, w)
return imgf
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class UpdateBlock(nn.Module):
def __init__(self, hidden_dim=128):
super(UpdateBlock, self).__init__()
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(hidden_dim, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, imgf, coords1):
mask = .25 * self.mask(imgf) # scale mask to balence gradients
dflow = self.flow_head(imgf)
coords1 = coords1 + dflow
return mask, coords1
def coords_grid(batch, ht, wd):
coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class GeoTr(nn.Module):
def __init__(self):
super(GeoTr, self).__init__()
self.hidden_dim = hdim = 256
| self.fnet = BasicEncoder(output_dim=hdim, norm_fn='instance') | 0 | 2023-10-17 11:06:30+00:00 | 4k |
zzbuzzard/stable-diffusion-infinite-scroll | sd_scroll.py | [
{
"identifier": "next_image",
"path": "util.py",
"snippet": "def next_image(pipe, image, base_size, prompt, shiftx, shifty, pipe_args):\n \"\"\"Given an image, uses inpainting to produce the next image (which overlaps with the previous image)\"\"\"\n assert image.size == (base_size, base_size)\n\n image_n = np.array(image)\n image_n = np.concatenate((image_n[:, shiftx:],\n np.zeros((base_size, shiftx, 3), dtype=np.uint8)),\n axis=1)\n image_n = np.concatenate((image_n[shifty:],\n np.zeros((shifty, base_size, 3), dtype=np.uint8)),\n axis=0)\n image = Image.fromarray(image_n)\n\n mask = np.zeros((base_size, base_size, 3), dtype=np.uint8)\n mask[:, base_size - shiftx:] = 255\n mask[base_size - shifty:, :] = 255\n mask = Image.fromarray(mask)\n\n image = pipe(prompt=prompt,\n image=image,\n mask_image=mask,\n **pipe_args).images[0]\n return image"
},
{
"identifier": "Slider",
"path": "slider.py",
"snippet": "class Slider:\n def __init__(self, canvas, start_image, base_size, screen_width, screen_height, mode='H'):\n \"\"\"\n :param canvas: Tk canvas\n :param start_image: Init image (must have shape base_size x base_size x 3)\n :param base_size: The size of the generated images (which must be square)\n :param screen_width: Width of screen in pixels\n :param screen_height: Height of screen in pixels\n :param mode: 'H' for horizontal, 'V' for vertical. Diagonal movement not currently supported.\n \"\"\"\n assert mode in ['H', 'V'], f\"Mode must be either 'H' or 'V', but found '{mode}'.\"\n if start_image is None:\n start_image = np.zeros((base_size, base_size, 3))\n start_image = np.array(start_image)\n assert start_image.shape == (base_size, base_size, 3), \\\n f\"Start image shape was {start_image.shape}, expected {(base_size, base_size, 3)}\"\n self.canvas = canvas\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.mode = mode\n self.base_size = base_size\n\n # Size of the slider is bigger than the screen width by this much\n size_offset = 1024\n\n if mode == 'H':\n self.width = int(screen_width / screen_height * base_size) + size_offset\n self.height = base_size\n self.display_multiplier = screen_height / base_size # from image pix to display pix\n else:\n self.width = base_size\n self.height = int(screen_height / screen_width * base_size) + size_offset\n self.display_multiplier = screen_width / base_size # from image pix to display pix\n self.display_width = int(self.width * self.display_multiplier) + 1\n self.display_height = int(self.height * self.display_multiplier) + 1\n\n self.img_np = np.zeros((self.height, self.width, 3), dtype=np.uint8)\n self.img_np[-base_size:, -base_size:] = np.array(start_image)\n\n # Start 256 away from the end\n if self.mode == 'H':\n self.offset = screen_width - self.display_width - base_size // 2\n else:\n self.offset = screen_height - self.display_height - base_size // 2\n\n self.img = Image.fromarray(self.img_np).resize((self.display_width, self.display_height),\n Image.Resampling.LANCZOS)\n self.obj = ImageTk.PhotoImage(self.img)\n self.id = canvas.create_image((self.offset if self.mode == 'H' else 0,\n self.offset if self.mode == 'V' else 0), image=self.obj)\n\n self.speed = 0\n\n def move(self, deltatime):\n self.offset -= deltatime * self.speed\n\n if self.offset > 0:\n print(\"Gap on left / top\")\n self.offset = 0\n self.speed *= 1.25 # just until the next generation...\n bot = self.screen_width - self.display_width if self.mode == 'H' else self.screen_height - self.display_height\n if self.offset < bot:\n print(\"Gap on right / bottom\")\n self.offset = bot\n self.speed /= 1.25 # just until the next generation...\n\n self.canvas.moveto(self.id,\n self.offset if self.mode == 'H' else 0,\n self.offset if self.mode == 'V' else 0)\n\n def update(self, new_img, shiftx, shifty, speed):\n new_img = np.array(new_img)\n if self.mode == 'H':\n self.img_np = np.concatenate((self.img_np[:, shiftx:], new_img[:, self.base_size - shiftx:]), axis=1)\n shift = shiftx\n else:\n self.img_np = np.concatenate((self.img_np[shifty:], new_img[self.base_size - shifty:]), axis=0)\n shift = shifty\n\n # Delete old object\n self.canvas.delete(self.id)\n\n # Create new object\n self.img = Image.fromarray(self.img_np). \\\n resize((self.display_width, self.display_height), Image.Resampling.LANCZOS)\n self.obj = ImageTk.PhotoImage(self.img)\n\n self.offset += shift * self.display_multiplier\n\n self.id = self.canvas.create_image((self.offset if self.mode == 'H' else 0,\n self.offset if self.mode == 'V' else 0), image=self.obj)\n\n # Set speed\n pix_per_sec = shift / max(0.1, speed)\n display_pix_per_sec = pix_per_sec * self.display_multiplier\n ema_factor = 0.8 # higher = adapts faster to changes in speed (but less smooth)\n if self.speed == 0: # first run\n self.speed = display_pix_per_sec\n else:\n self.speed = display_pix_per_sec * ema_factor + self.speed * (1 - ema_factor)"
}
] | import torch
import numpy as np
import tkinter as tk
import time
import random
import argparse
import util
from diffusers import StableDiffusionInpaintPipeline
from PIL import Image
from multiprocessing import Process, Queue
from util import next_image
from slider import Slider | 2,006 |
parser = util.get_argparser()
parser.add_argument("-spd", "--speed", default=1., type=float,
help="Speed multiplier (between 0 and 1). A value of 1 causes images to be generated as fast as "
"possible. A value less than 1 leads to intentional breaks between generations to stop your "
"GPU exploding")
def draw_loop(queue, shiftx, shifty):
"""Repeatedly tells the slider to move, and notifies it when new images become available."""
queue.get() # wait from signal from update_loop to start
print("Starting draw")
start = time.time()
prev = start
while True:
if not queue.empty():
image, speed = queue.get()
slider.update(image, shiftx, shifty, speed)
t = time.time()
slider.move(t - prev)
prev = t
root.update()
def generate_loop(queue, start_image, prompts, pipe_args, shiftx, shifty, model, base_size, attn_slicing, speed_mul=1):
"""
Repeatedly computes new images to display using SD, and adds them to the queue.
If speed_mul < 1, we wait between generations to reduce GPU usage intensity.
"""
assert 0 < speed_mul <= 1
print("Loading SD...")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model,
revision="fp16",
torch_dtype=torch.float16,
)
pipe.safety_checker = None # A single black image causes a lot of problems for this scroller
pipe = pipe.to("cuda")
if attn_slicing:
pipe.enable_attention_slicing()
print("Loaded.")
if start_image is None:
prompt = random.choice(prompts)
print(f"No init image provided: generating from prompt '{prompt}'")
start_image = util.generate_image_with_inpainting_pipeline(pipe, prompt, base_size, pipe_args)
queue.put(0) # draw_loops waits for this to signal it should begin
front = start_image
while True:
prompt = random.choice(prompts)
print(f"Using prompt '{prompt}'")
start = time.time()
|
parser = util.get_argparser()
parser.add_argument("-spd", "--speed", default=1., type=float,
help="Speed multiplier (between 0 and 1). A value of 1 causes images to be generated as fast as "
"possible. A value less than 1 leads to intentional breaks between generations to stop your "
"GPU exploding")
def draw_loop(queue, shiftx, shifty):
"""Repeatedly tells the slider to move, and notifies it when new images become available."""
queue.get() # wait from signal from update_loop to start
print("Starting draw")
start = time.time()
prev = start
while True:
if not queue.empty():
image, speed = queue.get()
slider.update(image, shiftx, shifty, speed)
t = time.time()
slider.move(t - prev)
prev = t
root.update()
def generate_loop(queue, start_image, prompts, pipe_args, shiftx, shifty, model, base_size, attn_slicing, speed_mul=1):
"""
Repeatedly computes new images to display using SD, and adds them to the queue.
If speed_mul < 1, we wait between generations to reduce GPU usage intensity.
"""
assert 0 < speed_mul <= 1
print("Loading SD...")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model,
revision="fp16",
torch_dtype=torch.float16,
)
pipe.safety_checker = None # A single black image causes a lot of problems for this scroller
pipe = pipe.to("cuda")
if attn_slicing:
pipe.enable_attention_slicing()
print("Loaded.")
if start_image is None:
prompt = random.choice(prompts)
print(f"No init image provided: generating from prompt '{prompt}'")
start_image = util.generate_image_with_inpainting_pipeline(pipe, prompt, base_size, pipe_args)
queue.put(0) # draw_loops waits for this to signal it should begin
front = start_image
while True:
prompt = random.choice(prompts)
print(f"Using prompt '{prompt}'")
start = time.time() | front = next_image(pipe, image=front, base_size=base_size, prompt=prompt, shiftx=shiftx, shifty=shifty, | 0 | 2023-10-15 14:43:52+00:00 | 4k |
MaxDude132/django-register-field | tests/models.py | [
{
"identifier": "Register",
"path": "django_register/base.py",
"snippet": "class Register:\n def __init__(self):\n self._key_to_class = {}\n self._class_to_key = {}\n\n def register(self, klass, db_key=None):\n if db_key is None:\n try:\n db_key = klass.label\n except AttributeError:\n raise ValueError(\n _(\n \"The class {klass} does not have a label. Define \"\n \"one or pass a db_key to be used as database value.\"\n ).format(klass=klass)\n )\n\n if db_key in self._key_to_class:\n raise ValueError(_(\"Key {key} already registered.\").format(key=db_key))\n\n if klass in self._class_to_key:\n raise ValueError(_(\"Class {klass} already registered.\").format(klass=klass))\n\n self._key_to_class[db_key] = klass\n self._class_to_key[klass] = db_key\n\n return klass\n\n def from_key(self, value):\n try:\n return self._key_to_class[value]\n except (KeyError, TypeError):\n raise ValidationError(\n _(\"Value {value} not a registered key.\").format(value=value)\n )\n\n def from_class(self, value):\n try:\n return self._class_to_key[value]\n except KeyError:\n raise ValidationError(\n _(\"Value {value} not a registered class.\").format(value=value)\n )\n\n def get_key(self, value):\n try:\n self.from_key(value)\n except ValidationError:\n return self.from_class(value)\n\n return value\n\n def get_class(self, value):\n try:\n self.from_class(value)\n except ValidationError:\n return self.from_key(value)\n\n return value\n\n @property\n def max_length(self):\n if self._key_to_class:\n return max(len(key) for key in self._key_to_class)\n\n @property\n def choices(self):\n return [\n (k, self._get_verbose_name(v, k)) for k, v in self._key_to_class.items()\n ]\n\n @property\n def flatchoices(self):\n return [\n (v, self._get_verbose_name(v, k)) for k, v in self._key_to_class.items()\n ]\n\n def _get_verbose_name(self, klass, key):\n return getattr(klass, \"verbose_name\", key.replace(\"_\", \" \").title())\n\n def __iter__(self):\n return iter(self._key_to_class.values())"
},
{
"identifier": "RegisterChoices",
"path": "django_register/base.py",
"snippet": "class RegisterChoices(metaclass=RegisterChoicesMeta):\n def __new__(cls, klass):\n return cls.register.get_class(klass)"
},
{
"identifier": "RegisterField",
"path": "django_register/base.py",
"snippet": "class RegisterField(models.CharField):\n description = _(\"Store a string, return the associated class\")\n\n def __init__(self, *args, **kwargs):\n if \"register\" not in kwargs and \"choices\" not in kwargs:\n raise ValueError(_(\"You must provide choices to the RegisterField.\"))\n\n if \"register\" not in kwargs and not hasattr(kwargs[\"choices\"], \"register\"):\n raise ValueError(_(\"Choices must be a RegisterChoices instance.\"))\n\n # When building the migrations, the register cannot be in the choices.\n # It will be passed individually, so we take it from there.\n self.register: Register = (\n kwargs.pop(\"register\")\n if \"register\" in kwargs\n else kwargs[\"choices\"].register\n )\n\n if \"choices\" not in kwargs:\n kwargs[\"choices\"] = self.register.choices\n\n if \"max_length\" not in kwargs and (max_length := self.register.max_length):\n kwargs[\"max_length\"] = max_length\n\n if \"default\" in kwargs:\n try:\n kwargs[\"default\"] = self.register.get_key(kwargs[\"default\"])\n except ValidationError:\n pass\n\n super().__init__(*args, **kwargs)\n\n def from_db_value(self, value, expression, connection):\n if not value:\n return value\n\n return self.register.get_class(value)\n\n def get_default(self):\n default = super().get_default()\n\n if default:\n return self.register.get_class(default)\n\n return default\n\n def to_python(self, value):\n if not value:\n return value\n\n return self.register.get_class(value)\n\n def get_prep_value(self, value):\n if not value:\n return value\n\n return self.register.get_key(value)\n\n def value_from_object(self, obj):\n value = super().value_from_object(obj)\n return self.get_prep_value(value)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs.pop(\"choices\", None)\n kwargs[\"register\"] = self.register\n return name, path, args, kwargs\n\n def clean(self, value, model_instance):\n \"\"\"\n We need to override clean because it runs the validations on the\n Python object instead of on the database string.\n \"\"\"\n value = self.get_prep_value(value)\n self.validate(value, model_instance)\n self.run_validators(value)\n return self.to_python(value)\n\n def _get_flatchoices(self):\n return self.register.flatchoices\n\n flatchoices = property(_get_flatchoices)\n\n def _register_choices(self):\n return self.register.choices\n\n def _register_choices_set(self, value):\n return\n\n choices = property(_register_choices, _register_choices_set)\n _choices = property(_register_choices, _register_choices_set)"
}
] | from dataclasses import dataclass
from django.db import models
from django_register import Register, RegisterChoices, RegisterField | 1,625 | # Standard libraries
# Django
# django_register
@dataclass(unsafe_hash=True)
class CountryInfo:
population: int
capital: str
class CountryChoices(RegisterChoices):
CANADA = CountryInfo(population=37_742_154, capital="Ottawa")
FRANCE = CountryInfo(population=65_273_511, capital="Paris")
GERMANY = CountryInfo(population=83_783_942, capital="Berlin")
UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington")
@dataclass(unsafe_hash=True)
class ContinentInfo:
label: str
@dataclass(unsafe_hash=True)
class FoodInfo:
verbose_name: str
food_register = Register()
food_register.register(FoodInfo("Pizza"), db_key="pizza")
@dataclass(unsafe_hash=True)
class CarCompanies:
verbose_name: str
cars_register = Register()
class ContinentChoices(RegisterChoices):
AMERICA = ContinentInfo(label="America")
EUROPE = ContinentInfo(label="Europe")
class City(models.Model):
label = models.CharField(max_length=50)
| # Standard libraries
# Django
# django_register
@dataclass(unsafe_hash=True)
class CountryInfo:
population: int
capital: str
class CountryChoices(RegisterChoices):
CANADA = CountryInfo(population=37_742_154, capital="Ottawa")
FRANCE = CountryInfo(population=65_273_511, capital="Paris")
GERMANY = CountryInfo(population=83_783_942, capital="Berlin")
UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington")
@dataclass(unsafe_hash=True)
class ContinentInfo:
label: str
@dataclass(unsafe_hash=True)
class FoodInfo:
verbose_name: str
food_register = Register()
food_register.register(FoodInfo("Pizza"), db_key="pizza")
@dataclass(unsafe_hash=True)
class CarCompanies:
verbose_name: str
cars_register = Register()
class ContinentChoices(RegisterChoices):
AMERICA = ContinentInfo(label="America")
EUROPE = ContinentInfo(label="Europe")
class City(models.Model):
label = models.CharField(max_length=50) | country = RegisterField( | 2 | 2023-10-23 18:11:08+00:00 | 4k |
hsouri/bob-classification | timm_dataset.py | [
{
"identifier": "INAT2019",
"path": "datasets/inat_loader.py",
"snippet": "class INAT2019(data.Dataset):\n def __init__(self, root, mode='train', year=\"2019\", transform=None):\n # load annotations\n ann_file = os.path.join(root, f\"{mode}{year}.json\")\n with open(ann_file) as data_file:\n ann_data = json.load(data_file)\n\n # set up the filenames and annotations\n self.imgs = [aa['file_name'] for aa in ann_data['images']]\n self.ids = [aa['id'] for aa in ann_data['images']]\n\n # if we dont have class labels set them to '0'\n if 'annotations' in ann_data.keys():\n self.classes = []\n for i, aa in enumerate(ann_data['annotations']):\n assert aa['image_id'] == self.ids[i]\n self.classes.append(aa['category_id'])\n else:\n self.classes = [0]*len(self.imgs)\n\n # load taxonomy\n # self.tax_levels = ['id', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom']\n #8142, 4412, 1120, 273, 57, 25, 6\n # self.taxonomy, self.classes_taxonomic = load_taxonomy(ann_data, self.tax_levels, self.classes)\n\n # print out some stats\n print('\\t' + str(len(self.imgs)) + ' images')\n print('\\t' + str(len(set(self.classes))) + ' classes')\n\n self.root = root\n self.loader = default_loader\n\n self.transform = transform\n # # augmentation params\n # self.im_size = [299, 299] # can change this to train on higher res\n # self.mu_data = [0.485, 0.456, 0.406]\n # self.std_data = [0.229, 0.224, 0.225]\n # self.brightness = 0.4\n # self.contrast = 0.4\n # self.saturation = 0.4\n # self.hue = 0.25\n\n # # augmentations\n # self.center_crop = transforms.CenterCrop((self.im_size[0], self.im_size[1]))\n # self.scale_aug = transforms.RandomResizedCrop(size=self.im_size[0])\n # self.flip_aug = transforms.RandomHorizontalFlip()\n # self.color_aug = transforms.ColorJitter(self.brightness, self.contrast, self.saturation, self.hue)\n # self.tensor_aug = transforms.ToTensor()\n # self.norm_aug = transforms.Normalize(mean=self.mu_data, std=self.std_data)\n\n def __getitem__(self, index):\n path = os.path.join(self.root, self.imgs[index])\n img = self.loader(path)\n species_id = self.classes[index]\n # tax_ids = self.classes_taxonomic[species_id]\n\n # if self.is_train:\n # img = self.scale_aug(img)\n # img = self.flip_aug(img)\n # img = self.color_aug(img)\n # else:\n # img = self.center_crop(img)\n\n # img = self.tensor_aug(img)\n # img = self.norm_aug(img)\n if self.transform:\n img = self.transform(img)\n\n return img, torch.tensor(species_id).long()\n\n def __len__(self):\n return len(self.imgs)"
},
{
"identifier": "INAT2021",
"path": "datasets/inat_loader.py",
"snippet": "class INAT2021(torchvision.datasets.VisionDataset):\n \"\"\"\n modified from torchvision.datasets.INaturalist() to work with vulcan inat2021\n \"\"\"\n\n def __init__(\n self,\n root: str,\n version: str = \"train\",\n target_type: Union[List[str], str] = \"full\",\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n ) -> None:\n self.version = version\n super().__init__(os.path.join(root, version), transform=transform, target_transform=target_transform)\n\n if not self._check_integrity():\n raise RuntimeError(\"Dataset not found or corrupted. You can use download=True to download it\")\n\n self.all_categories: List[str] = []\n\n # map: category type -> name of category -> index\n self.categories_index: Dict[str, Dict[str, int]] = {}\n\n # list indexed by category id, containing mapping from category type -> index\n self.categories_map: List[Dict[str, int]] = []\n\n if not isinstance(target_type, list):\n target_type = [target_type]\n\n self.target_type = target_type\n self._init_2021()\n\n\n # index of all files: (full category id, filename)\n self.index: List[Tuple[int, str]] = []\n\n for dir_index, dir_name in enumerate(self.all_categories):\n files = os.listdir(os.path.join(self.root, dir_name))\n for fname in files:\n self.index.append((dir_index, fname))\n\n def _init_2021(self) -> None:\n \"\"\"Initialize based on 2021 layout\"\"\"\n\n self.all_categories = sorted(os.listdir(self.root))\n\n # map: category type -> name of category -> index\n self.categories_index = {k: {} for k in CATEGORIES_2021}\n\n for dir_index, dir_name in enumerate(self.all_categories):\n pieces = dir_name.split(\"_\")\n if len(pieces) != 8:\n raise RuntimeError(f\"Unexpected category name {dir_name}, wrong number of pieces\")\n if pieces[0] != f\"{dir_index:05d}\":\n raise RuntimeError(f\"Unexpected category id {pieces[0]}, expecting {dir_index:05d}\")\n cat_map = {}\n for cat, name in zip(CATEGORIES_2021, pieces[1:7]):\n if name in self.categories_index[cat]:\n cat_id = self.categories_index[cat][name]\n else:\n cat_id = len(self.categories_index[cat])\n self.categories_index[cat][name] = cat_id\n cat_map[cat] = cat_id\n self.categories_map.append(cat_map)\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where the type of target specified by target_type.\n \"\"\"\n\n cat_id, fname = self.index[index]\n img = Image.open(os.path.join(self.root, self.all_categories[cat_id], fname))\n\n target: Any = []\n for t in self.target_type:\n if t == \"full\":\n target.append(cat_id)\n else:\n target.append(self.categories_map[cat_id][t])\n target = tuple(target) if len(target) > 1 else target[0]\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self) -> int:\n return len(self.index)\n\n def category_name(self, category_type: str, category_id: int) -> str:\n \"\"\"\n Args:\n category_type(str): one of \"full\", \"kingdom\", \"phylum\", \"class\", \"order\", \"family\", \"genus\" or \"super\"\n category_id(int): an index (class id) from this category\n\n Returns:\n the name of the category\n \"\"\"\n if category_type == \"full\":\n return self.all_categories[category_id]\n else:\n if category_type not in self.categories_index:\n raise ValueError(f\"Invalid category type '{category_type}'\")\n else:\n for name, id in self.categories_index[category_type].items():\n if id == category_id:\n return name\n raise ValueError(f\"Invalid category id {category_id} for {category_type}\")\n\n def _check_integrity(self) -> bool:\n return os.path.exists(self.root) and len(os.listdir(self.root)) > 0"
}
] | from datasets.transfer_cls_datasets import *
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from wilds import get_dataset
from datasets.inat_loader import INAT2019, INAT2021
import wilds
import torchvision.transforms as transforms | 2,833 |
transfer_datasets = {
'flower102': 'Flower102',
'aircraft': 'Aircraft',
# 'birdsnap': 'Birdsnap',
'dtd': 'DTD',
'voc2007': 'VOC2007',
'pets': 'Pets',
'sun397': 'SUN397',
'cars': 'Cars',
'food101': 'Food101',
'caltech101': 'Caltech101',
'cifar10': 'Cifar10',
'cifar100': 'Cifar100',
'eurosat': 'eurosat'
}
wilds_group_list = {
'iwildcam': 'location',
'fmow': 'region',
'globalwheat': 'location',
'camelyon17': 'hospital',
'poverty': 'batch',
}
num_classes = {
"inat2021": 10000,
"inat2019": 1010,
"imagenet": 1000,
"cifar10": 10,
"cifar100": 100,
"flower102": 102,
"aircraft": 100,
"eurosat": 10,
"semiimagenet": 1000,
}
def create_other_dataset(
name,
root,
split='validation',
search_split=True,
class_map=None,
load_bytes=False,
is_training=False,
download=False,
batch_size=None,
seed=42,
repeats=0,
group=None,
domain=None,
json_path=None,
n_shot=None,
**kwargs
):
""" Dataset for transfer learning and wilds
Args:
name: dataset name, empty is okay for folder based datasets
root: root folder of dataset (all)
split: dataset split (all)
search_split: search for split specific child fold from root so one can specify
`imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder)
class_map: specify class -> index mapping via text file or dict (folder)
load_bytes: load data, return images as undecoded bytes (folder)
download: download dataset if not present and supported (HFDS, TFDS, torch)
is_training: create dataset in train mode, this is different from the split.
For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS, WDS)
batch_size: batch size hint for (TFDS, WDS)
seed: seed for iterable datasets (TFDS, WDS)
repeats: dataset repeats per iteration i.e. epoch (TFDS, WDS)
group: whether to specific a domain in wilds
domain: which specific domain is used in wilds
json_path: json file (of train/val split)
n_shot: if integer: n_shot samples per class; if float: means the percentage
**kwargs: other args to pass to dataset
Returns:
Dataset object
"""
name = name.lower()
if name in transfer_datasets:
ds = get_transfer_datasets(name, root, 'train' if split=='train' else 'val', n_shot=n_shot, json_path=json_path)
elif name in wilds.supported_datasets:
dataset = get_dataset(dataset=name, download=False, root_dir=root, debug=True)
if group is not None:
group = wilds_group_list[name]
ds = dataset.get_subset(split, group=group, domain=domain)
elif name == 'inat2019':
ds = INAT2019(root, mode='train' if split=='train' else 'val')
elif name == 'inat2021':
# root: /fs/vulcan-datasets/inat_comp_2021
|
transfer_datasets = {
'flower102': 'Flower102',
'aircraft': 'Aircraft',
# 'birdsnap': 'Birdsnap',
'dtd': 'DTD',
'voc2007': 'VOC2007',
'pets': 'Pets',
'sun397': 'SUN397',
'cars': 'Cars',
'food101': 'Food101',
'caltech101': 'Caltech101',
'cifar10': 'Cifar10',
'cifar100': 'Cifar100',
'eurosat': 'eurosat'
}
wilds_group_list = {
'iwildcam': 'location',
'fmow': 'region',
'globalwheat': 'location',
'camelyon17': 'hospital',
'poverty': 'batch',
}
num_classes = {
"inat2021": 10000,
"inat2019": 1010,
"imagenet": 1000,
"cifar10": 10,
"cifar100": 100,
"flower102": 102,
"aircraft": 100,
"eurosat": 10,
"semiimagenet": 1000,
}
def create_other_dataset(
name,
root,
split='validation',
search_split=True,
class_map=None,
load_bytes=False,
is_training=False,
download=False,
batch_size=None,
seed=42,
repeats=0,
group=None,
domain=None,
json_path=None,
n_shot=None,
**kwargs
):
""" Dataset for transfer learning and wilds
Args:
name: dataset name, empty is okay for folder based datasets
root: root folder of dataset (all)
split: dataset split (all)
search_split: search for split specific child fold from root so one can specify
`imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder)
class_map: specify class -> index mapping via text file or dict (folder)
load_bytes: load data, return images as undecoded bytes (folder)
download: download dataset if not present and supported (HFDS, TFDS, torch)
is_training: create dataset in train mode, this is different from the split.
For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS, WDS)
batch_size: batch size hint for (TFDS, WDS)
seed: seed for iterable datasets (TFDS, WDS)
repeats: dataset repeats per iteration i.e. epoch (TFDS, WDS)
group: whether to specific a domain in wilds
domain: which specific domain is used in wilds
json_path: json file (of train/val split)
n_shot: if integer: n_shot samples per class; if float: means the percentage
**kwargs: other args to pass to dataset
Returns:
Dataset object
"""
name = name.lower()
if name in transfer_datasets:
ds = get_transfer_datasets(name, root, 'train' if split=='train' else 'val', n_shot=n_shot, json_path=json_path)
elif name in wilds.supported_datasets:
dataset = get_dataset(dataset=name, download=False, root_dir=root, debug=True)
if group is not None:
group = wilds_group_list[name]
ds = dataset.get_subset(split, group=group, domain=domain)
elif name == 'inat2019':
ds = INAT2019(root, mode='train' if split=='train' else 'val')
elif name == 'inat2021':
# root: /fs/vulcan-datasets/inat_comp_2021 | ds = INAT2021(root, | 1 | 2023-10-20 16:28:17+00:00 | 4k |
Salz0/telegram_flea | main.py | [
{
"identifier": "User",
"path": "models.py",
"snippet": "class User(BaseModel):\n \"\"\"\n The model for the Telegram user.\n\n This model stores all the information about the user.\n It is also used to store all the authentication-related information.\n \"\"\"\n\n id = fields.BigIntField(pk=True, generated=False)\n\n username = fields.CharField(max_length=32, null=True)\n\n first_name = fields.TextField(null=True)\n last_name = fields.TextField(null=True)\n\n phone_number = fields.CharField(max_length=14, null=True)\n language_code = fields.CharField(max_length=2, null=True)\n is_bot = fields.BooleanField(default=False)\n\n start_payload = fields.TextField(null=True)\n\n is_active = fields.BooleanField(default=True)\n has_bot_blocked = fields.BooleanField(default=False)\n is_beta = fields.BooleanField(default=False)\n is_deleted = fields.BooleanField(default=False)\n\n is_admin = fields.BooleanField(default=False)\n is_staff_member = fields.BooleanField(default=False)\n\n messages: fields.ReverseRelation[Message]\n\n @property\n def full_name(self):\n \"\"\"Get the full name of the user.\"\"\"\n if not self.last_name:\n return self.first_name\n\n return f\"{self.first_name} {self.last_name}\""
},
{
"identifier": "Message",
"path": "models.py",
"snippet": "class Message(BaseModel):\n \"\"\"The model for the Telegram message.\"\"\"\n\n from_user: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(\n \"bot.User\", related_name=\"messages\"\n )\n id = fields.IntField(pk=True, generated=True)\n\n # In Telegram, `message_id` is unique only **within a chat**.\n message_id = fields.BigIntField() # for the sake of safety, this is a `BigIntField`\n\n # TODO: [3/20/2023 by Mykola] Make this a foreign key to the Chat model\n chat_id = fields.BigIntField()\n\n reply_to_message: fields.ForeignKeyRelation[Message] = fields.ForeignKeyField(\n \"bot.Message\", related_name=\"replies\", null=True\n )\n\n content_type = fields.TextField(null=True)\n text = fields.TextField(null=True)\n\n date = fields.DatetimeField()\n is_handled = fields.BooleanField(default=False)\n content = fields.BinaryField(null=True)\n status = fields.CharField(max_length=32, null=True)\n\n complete_message_json = fields.JSONField(null=True)\n\n replies: fields.BackwardFKRelation[Message]"
},
{
"identifier": "compile_all_languages",
"path": "po_compile.py",
"snippet": "def compile_all_languages(base_locales_path=\"locales\"):\n for lang in os.listdir(base_locales_path):\n lang_path = os.path.join(base_locales_path, lang)\n if os.path.isdir(lang_path):\n lc_messages_path = os.path.join(lang_path, \"LC_MESSAGES\")\n for file_name in os.listdir(lc_messages_path):\n if file_name.endswith(\".po\"):\n po_path = os.path.join(lc_messages_path, file_name)\n mo_path = os.path.join(lc_messages_path, file_name.replace(\".po\", \".mo\"))\n compile_po_to_mo(po_path, mo_path)"
},
{
"identifier": "tortoise_orm",
"path": "utils/tortoise_orm.py",
"snippet": "class ModelMeta(tortoise.ModelMeta):\nclass Model(tortoise.Model, metaclass=ModelMeta):\n def __new__(mcs, name, bases, attrs):\ndef get_tortoise_config():\nasync def init():\nasync def shutdown():\ndef flatten_tortoise_model(\n model: tortoise.Model, separator: str | None = \".\", prefix: str | None = None\n) -> dict:\n DATABASE_URL = f\"postgres://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_db}\"\nTORTOISE_ORM_CONFIG = get_tortoise_config()"
},
{
"identifier": "validate_photo_as_document",
"path": "utils/data_validation.py",
"snippet": "def validate_photo_as_document(file: Document) -> bool:\n \"\"\"Validation of a photo uploaded as a document\"\"\"\n\n # checking the file extension\n photo_type = mimetypes.guess_type(file.file_name)\n if photo_type[0] is None:\n return False\n return photo_type[0].startswith(\"image\")"
},
{
"identifier": "create_message_instance",
"path": "utils/generalization.py",
"snippet": "async def create_message_instance(message: types.Message, **extra_fields) -> Message:\n # Create a dictionary with the common fields.\n message_data = {\n \"message_id\": message.message_id,\n \"from_user_id\": message.from_user.id,\n \"chat_id\": message.chat.id, # This assumes that chat ID is directly accessible.\n \"text\": message.text,\n \"date\": message.date,\n \"is_handled\": True,\n \"complete_message_json\": message.to_python(),\n \"content_type\": message.content_type,\n }\n message_data.update(extra_fields)\n return await Message.create(**message_data)\n\n # TODO: Add replied message relations to the database [04/11/2023 by Vladyslav Bilyk]\n # Create the Message instance with the combined data.\n # if message.reply_to_message:\n # reply_to_message: tuple[Message, bool] = await create_message_instance(message.reply_to_message)\n # message_data['reply_to_message_id'] = reply_to_message[0].message_id\n # message_data.update(extra_fields)\n # Add any additional fields that were passed in.\n # try:\n # return await Message.get_or_create(**message_data)\n # except tortoise.exceptions.IntegrityError as e:\n # logger.exception(e)"
},
{
"identifier": "logger",
"path": "utils/loguru_logging.py",
"snippet": "class InterceptHandler(logging.Handler):\n def emit(self, record):"
},
{
"identifier": "redis_storage",
"path": "utils/redis_storage.py",
"snippet": "def parse_config(config_to_parse: dict[str, typing.Any]) -> dict[str, typing.Any]:"
},
{
"identifier": "start_keyboard",
"path": "keyboards.py",
"snippet": "BASE_DIR = Path(__file__).parent\nLOCALES_DIR = BASE_DIR / \"locales\"\nBOT_LANGUAGE = os.environ.get(\"BOT_LANGUAGE\")\ndef moderator_keyboard(userid, msg_id):\ndef cancel_listing_keyboard(channel_message_id, msg_id):"
}
] | import os
import aiogram
from asyncio import gather
from pathlib import Path
from aiogram import types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.contrib.middlewares.i18n import I18nMiddleware
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import CommandStart
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types.callback_query import CallbackQuery
from dotenv import load_dotenv
from models import User, Message
from po_compile import compile_all_languages
from utils import tortoise_orm
from utils.data_validation import validate_photo_as_document
from utils.generalization import create_message_instance
from utils.loguru_logging import logger
from utils.redis_storage import redis_storage
from keyboards import (
start_keyboard,
sell_keyboard,
cancel_listing_keyboard,
moderator_keyboard,
empty_inline_keyboard,
) | 2,177 |
load_dotenv()
compile_all_languages()
bot = aiogram.Bot(os.environ["TELEGRAM_BOT_TOKEN"])
dp = aiogram.Dispatcher(bot, storage=MemoryStorage())
BASE_DIR = Path(__file__).parent
LOCALES_DIR = BASE_DIR / "locales"
BOT_LANGUAGE = os.environ.get("BOT_LANGUAGE")
i18n = I18nMiddleware("bot", LOCALES_DIR, default="en")
dp.middleware.setup(i18n)
if BOT_LANGUAGE not in i18n.locales:
logger.warning("language is not supported")
BOT_LANGUAGE = "en"
# Define states
class SellItem(StatesGroup):
waiting_description = State()
waiting_for_price = State()
waiting_for_photo = State()
@dp.message_handler(CommandStart(), state="*")
async def start(message: types.Message):
user_dict = message.from_user.to_python()
await User.get_or_create(
id=message.from_user.id,
username=user_dict.get("username"),
first_name=user_dict.get("first_name"),
last_name=user_dict.get("last_name"),
is_bot=message.from_user.is_bot,
phone_number=user_dict.get("phone_number"),
language_code=message.from_user.language_code,
start_payload=message.get_args(),
)
await message.answer(
i18n.gettext("bot.start_message", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Attach the reply keyboard here
)
@dp.message_handler(
lambda message: message.text.lower()
== i18n.gettext("bot.sell_keyboard_cancel", locale=BOT_LANGUAGE).lower(),
state="*",
)
async def cancel(message: types.Message, state: FSMContext):
await gather(
state.finish(),
create_message_instance(message),
message.reply(
i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Switch back to the start keyboard
),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_help", locale=BOT_LANGUAGE),
state="*",
)
async def help_command(message: aiogram.types.Message):
support_username = os.environ.get("SUPPORT_USERNAME")
# Assuming `get_or_create_user` is a function that handles User instances.
help_text = i18n.gettext("bot.help_message", locale=BOT_LANGUAGE).format(
support_username=support_username
)
await gather(
create_message_instance(message),
message.reply(help_text, reply_markup=start_keyboard),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_sell", locale=BOT_LANGUAGE),
state="*",
)
async def enter_sell(message: aiogram.types.Message):
await SellItem.waiting_description.set(),
await gather(
create_message_instance(message),
message.reply(
i18n.gettext("bot.enter_sell_description", locale=BOT_LANGUAGE),
|
load_dotenv()
compile_all_languages()
bot = aiogram.Bot(os.environ["TELEGRAM_BOT_TOKEN"])
dp = aiogram.Dispatcher(bot, storage=MemoryStorage())
BASE_DIR = Path(__file__).parent
LOCALES_DIR = BASE_DIR / "locales"
BOT_LANGUAGE = os.environ.get("BOT_LANGUAGE")
i18n = I18nMiddleware("bot", LOCALES_DIR, default="en")
dp.middleware.setup(i18n)
if BOT_LANGUAGE not in i18n.locales:
logger.warning("language is not supported")
BOT_LANGUAGE = "en"
# Define states
class SellItem(StatesGroup):
waiting_description = State()
waiting_for_price = State()
waiting_for_photo = State()
@dp.message_handler(CommandStart(), state="*")
async def start(message: types.Message):
user_dict = message.from_user.to_python()
await User.get_or_create(
id=message.from_user.id,
username=user_dict.get("username"),
first_name=user_dict.get("first_name"),
last_name=user_dict.get("last_name"),
is_bot=message.from_user.is_bot,
phone_number=user_dict.get("phone_number"),
language_code=message.from_user.language_code,
start_payload=message.get_args(),
)
await message.answer(
i18n.gettext("bot.start_message", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Attach the reply keyboard here
)
@dp.message_handler(
lambda message: message.text.lower()
== i18n.gettext("bot.sell_keyboard_cancel", locale=BOT_LANGUAGE).lower(),
state="*",
)
async def cancel(message: types.Message, state: FSMContext):
await gather(
state.finish(),
create_message_instance(message),
message.reply(
i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Switch back to the start keyboard
),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_help", locale=BOT_LANGUAGE),
state="*",
)
async def help_command(message: aiogram.types.Message):
support_username = os.environ.get("SUPPORT_USERNAME")
# Assuming `get_or_create_user` is a function that handles User instances.
help_text = i18n.gettext("bot.help_message", locale=BOT_LANGUAGE).format(
support_username=support_username
)
await gather(
create_message_instance(message),
message.reply(help_text, reply_markup=start_keyboard),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_sell", locale=BOT_LANGUAGE),
state="*",
)
async def enter_sell(message: aiogram.types.Message):
await SellItem.waiting_description.set(),
await gather(
create_message_instance(message),
message.reply(
i18n.gettext("bot.enter_sell_description", locale=BOT_LANGUAGE), | reply_markup=sell_keyboard, | 8 | 2023-10-19 17:28:55+00:00 | 4k |
RobertCsordas/moe_layer | triton_src/moe_layer/moe_layer_simple.py | [
{
"identifier": "cvmm",
"path": "triton_src/moe_layer/cvmm.py",
"snippet": "def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):\n if not isinstance(sel, CVMMSel):\n sel = cvmm_prepare_sel(sel, keys.shape[0])\n\n return CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, sel.reduction_weight)"
},
{
"identifier": "cvmm_prepare_sel2",
"path": "triton_src/moe_layer/cvmm.py",
"snippet": "def cvmm_prepare_sel2(sel: torch.Tensor, w: Optional[torch.Tensor] = None) -> CVMMSel:\n # Has multiple selections for each batch element\n n_per_batch = sel.shape[-1]\n\n # indices = torch.arange(sel.nelement() // n_per_batch, device=sel.device, dtype=torch.int32)\n # indices = indices.repeat_interleave(n_per_batch).flatten()\n\n fsel = sel.flatten()\n ssel, sel_index = fsel.sort()\n\n # in_index = indices[sel_index]\n in_index = sel_index // n_per_batch\n\n return CVMMSel(sel, ssel.view_as(sel), in_index, sel_index, w)"
},
{
"identifier": "CVMMSel",
"path": "triton_src/moe_layer/cvmm.py",
"snippet": "class CVMMSel:\n raw_sel: torch.Tensor\n sel: torch.Tensor\n sel_index: torch.Tensor\n out_index: Optional[torch.Tensor] = None\n reduction_weight: Optional[torch.Tensor] = None\n\n def clone(self) -> 'CVMMSel':\n return CVMMSel(self.raw_sel, self.sel, self.sel_index, self.out_index, self.reduction_weight)"
}
] | import torch
import torch.distributed
import torch.nn.functional as F
import math
from typing import Tuple, List, Optional
from .cvmm import cvmm, cvmm_prepare_sel2, CVMMSel | 1,944 | activation_after_topk: bool = False,
activation=F.relu,
bias: bool = False, v_dim: Optional[int] = None,
sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,
weight_std_scale: float = 1.0):
super().__init__()
self.k_dim = dmodel
self.v_dim = v_dim if v_dim is not None else dmodel
self.n_experts = n_experts
self.expert_size = expert_size
self.size = self.n_experts * self.expert_size
self.dropout = dropout
self.selection_mode = selection_mode
self.k_vec_dim = self.k_dim
self.n_heads = k
self.activation_after_topk = activation_after_topk
self.activation = activation
self.sinkhorn_n_iters = sinkhorn_n_iters
self.expert_dropout = expert_dropout
if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}:
raise ValueError(f"Unknown selection mode {self.selection_mode}")
self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))
self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))
self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))
torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))
self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))
else:
self.bias = None
self.o_bias = None
self.renorm_keep_std(self.expert_sel, dim=1)
def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):
with torch.no_grad():
std = weight.std()
weight.div_(weight.norm(dim=dim, keepdim=True))
weight.mul_(std / weight.std())
def entropy_reg(self, sel: torch.Tensor) -> float:
# Everything is done in log scale
sel = sel.flatten(0, -2)
sel = F.log_softmax(sel, dim=-1)
sel = log_mean(sel, -2)
return - entropy_l(sel).mean()
def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor:
scores = cvmm(input, index, self.keys)
if self.bias is not None:
scores = scores + self.bias[index.raw_sel]
scores = self.activation(scores)
if self.dropout > 0:
# Standard dropout on the "up-projected scores"
scores = F.dropout(scores, self.dropout, training=self.training)
return scores
def sel_activation(self, sel: torch.Tensor) -> torch.Tensor:
if self.selection_mode == "sinkmoid":
if self.training:
with torch.no_grad():
sel = self.sinkhorn_unnorm(sel)
else:
sel = torch.sigmoid(sel)
elif self.selection_mode == "sigmoid":
sel = torch.sigmoid(sel)
elif self.selection_mode == "softmax":
sel = F.softmax(sel, dim=-1)
else:
assert False
return sel
def sinkhorn_unnorm(self, x: torch.Tensor) -> torch.Tensor:
# Based on https://arxiv.org/abs/2202.01169. Unnormalized verison
A, B = x.shape[-2:]
a = torch.zeros_like(x[..., 0, :])
b = torch.zeros_like(x[..., 0])
for _ in range(self.sinkhorn_n_iters):
b = math.log(A) - (x - a[..., None, :]).logsumexp(-1)
if torch.distributed.is_initialized():
a = math.log(B) - dist_logsumexp(x - b[..., None], -2)
else:
a = math.log(B) - (x - b[..., None]).logsumexp(-2)
return (a[..., None, :] + b[..., None] + x).exp()
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# Selection score calculation
sel = sel_raw = F.linear(input, self.expert_sel, None)
reg_loss = self.entropy_reg(sel_raw)
# Selection activation and topk
if (not self.activation_after_topk) or (self.selection_mode == "sinkmoid"):
# Sinkhorn should be always applied before top-k
sel = self.sel_activation(sel)
if self.training and self.expert_dropout > 0:
mask = torch.rand_like(sel) < self.expert_dropout
sel = sel.masked_fill(mask, float("-inf"))
sel_val, sel_index = sel.topk(self.n_heads, dim=-1, sorted=False)
if self.activation_after_topk or (self.selection_mode == "sinkmoid"):
sel_val = torch.gather(sel_raw, -1, sel_index)
# for sinkmoid, the score is always calculated by a sigmoid
sel_val = torch.sigmoid(sel_val) if self.selection_mode == "sinkmoid" else self.sel_activation(sel_val)
# Preprocess the selection indices. They will be needed for both layers and save some time
|
def dist_logsumexp(x: torch.Tensor, dim: int, keepdim: bool = False) -> torch.Tensor:
# Calculate numerically stable distributed logsumexp
xmax = x.max(dim=dim, keepdim=True).values
torch.distributed.all_reduce(xmax, op=torch.distributed.ReduceOp.MAX)
xe = (x - xmax).exp().sum(dim=dim, keepdim=True)
torch.distributed.all_reduce(xe, op=torch.distributed.ReduceOp.SUM)
res = (xmax + xe.log())
if not keepdim:
res = res.squeeze(dim)
return res
def log_mean(x: torch.Tensor, dim: int = 0):
if torch.distributed.is_initialized():
xlse = dist_logsumexp(x, dim=dim)
# Normalize
n = torch.tensor(x.shape[dim]).to(x.device)
torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)
return xlse - n.log()
else:
return x.logsumexp(dim) - math.log(x.shape[dim])
def entropy_l(l: torch.Tensor) -> torch.Tensor:
return - (l * l.exp()).sum(-1)
class MoE(torch.nn.Module):
def __init__(self, dmodel: int, n_experts: int, expert_size: int, k: int,
dropout: float = 0, selection_mode: str = "sigmoid",
activation_after_topk: bool = False,
activation=F.relu,
bias: bool = False, v_dim: Optional[int] = None,
sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,
weight_std_scale: float = 1.0):
super().__init__()
self.k_dim = dmodel
self.v_dim = v_dim if v_dim is not None else dmodel
self.n_experts = n_experts
self.expert_size = expert_size
self.size = self.n_experts * self.expert_size
self.dropout = dropout
self.selection_mode = selection_mode
self.k_vec_dim = self.k_dim
self.n_heads = k
self.activation_after_topk = activation_after_topk
self.activation = activation
self.sinkhorn_n_iters = sinkhorn_n_iters
self.expert_dropout = expert_dropout
if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}:
raise ValueError(f"Unknown selection mode {self.selection_mode}")
self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))
self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))
self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))
torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))
self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))
else:
self.bias = None
self.o_bias = None
self.renorm_keep_std(self.expert_sel, dim=1)
def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):
with torch.no_grad():
std = weight.std()
weight.div_(weight.norm(dim=dim, keepdim=True))
weight.mul_(std / weight.std())
def entropy_reg(self, sel: torch.Tensor) -> float:
# Everything is done in log scale
sel = sel.flatten(0, -2)
sel = F.log_softmax(sel, dim=-1)
sel = log_mean(sel, -2)
return - entropy_l(sel).mean()
def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor:
scores = cvmm(input, index, self.keys)
if self.bias is not None:
scores = scores + self.bias[index.raw_sel]
scores = self.activation(scores)
if self.dropout > 0:
# Standard dropout on the "up-projected scores"
scores = F.dropout(scores, self.dropout, training=self.training)
return scores
def sel_activation(self, sel: torch.Tensor) -> torch.Tensor:
if self.selection_mode == "sinkmoid":
if self.training:
with torch.no_grad():
sel = self.sinkhorn_unnorm(sel)
else:
sel = torch.sigmoid(sel)
elif self.selection_mode == "sigmoid":
sel = torch.sigmoid(sel)
elif self.selection_mode == "softmax":
sel = F.softmax(sel, dim=-1)
else:
assert False
return sel
def sinkhorn_unnorm(self, x: torch.Tensor) -> torch.Tensor:
# Based on https://arxiv.org/abs/2202.01169. Unnormalized verison
A, B = x.shape[-2:]
a = torch.zeros_like(x[..., 0, :])
b = torch.zeros_like(x[..., 0])
for _ in range(self.sinkhorn_n_iters):
b = math.log(A) - (x - a[..., None, :]).logsumexp(-1)
if torch.distributed.is_initialized():
a = math.log(B) - dist_logsumexp(x - b[..., None], -2)
else:
a = math.log(B) - (x - b[..., None]).logsumexp(-2)
return (a[..., None, :] + b[..., None] + x).exp()
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# Selection score calculation
sel = sel_raw = F.linear(input, self.expert_sel, None)
reg_loss = self.entropy_reg(sel_raw)
# Selection activation and topk
if (not self.activation_after_topk) or (self.selection_mode == "sinkmoid"):
# Sinkhorn should be always applied before top-k
sel = self.sel_activation(sel)
if self.training and self.expert_dropout > 0:
mask = torch.rand_like(sel) < self.expert_dropout
sel = sel.masked_fill(mask, float("-inf"))
sel_val, sel_index = sel.topk(self.n_heads, dim=-1, sorted=False)
if self.activation_after_topk or (self.selection_mode == "sinkmoid"):
sel_val = torch.gather(sel_raw, -1, sel_index)
# for sinkmoid, the score is always calculated by a sigmoid
sel_val = torch.sigmoid(sel_val) if self.selection_mode == "sinkmoid" else self.sel_activation(sel_val)
# Preprocess the selection indices. They will be needed for both layers and save some time | sel_indices = cvmm_prepare_sel2(sel_index.int()) | 1 | 2023-10-16 11:00:47+00:00 | 4k |
BurgerBurgerBurger/AA | model.py | [
{
"identifier": "process_long_input",
"path": "long_seq.py",
"snippet": "def process_long_input(model, input_ids, attention_mask, start_tokens, end_tokens):\n # Split the input to 2 overlapping chunks. Now BERT can encode inputs of which the length are up to 1024.\n n, c = input_ids.size()\n start_tokens = torch.tensor(start_tokens).to(input_ids)\n end_tokens = torch.tensor(end_tokens).to(input_ids)\n len_start = start_tokens.size(0)\n len_end = end_tokens.size(0)\n if c <= 512:\n # if document can fit into the encoder\n output = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n output_attentions=True,\n output_hidden_states=True,\n )\n sequence_outputs = torch.stack(output[-2][-3:], dim=1)\n sequence_output = sequence_outputs.mean(dim=1)\n attentions = torch.stack(output[-1][-3:],dim=1)\n attention = attentions.mean(dim=1)\n \n else:\n new_input_ids, new_attention_mask, num_seg = [], [], []\n seq_len = attention_mask.sum(1).cpu().numpy().astype(np.int32).tolist()\n for i, l_i in enumerate(seq_len): # for each batch\n if l_i <= 512:\n new_input_ids.append(input_ids[i, :512])\n new_attention_mask.append(attention_mask[i, :512])\n num_seg.append(1)\n else: # split the input into two parts: (0, 512) and (end - 512, end)\n input_ids1 = torch.cat([input_ids[i, :512 - len_end], end_tokens], dim=-1)\n input_ids2 = torch.cat([start_tokens, input_ids[i, (l_i - 512 + len_start): l_i]], dim=-1)\n attention_mask1 = attention_mask[i, :512]\n attention_mask2 = attention_mask[i, (l_i - 512): l_i]\n new_input_ids.extend([input_ids1, input_ids2])\n new_attention_mask.extend([attention_mask1, attention_mask2])\n num_seg.append(2)\n \n input_ids = torch.stack(new_input_ids, dim=0)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n \n output = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n output_attentions=True,\n output_hidden_states=True,\n )\n \n sequence_outputs = torch.stack(output[-2][-3:], dim=1)\n sequence_output = sequence_outputs.mean(dim=1)\n attentions = torch.stack(output[-1][-3:],dim=1)\n attention = attentions.mean(dim=1)\n\n i = 0\n new_output, new_attention = [], []\n for (n_s, l_i) in zip(num_seg, seq_len):\n if n_s == 1: # 1 segment (no split)\n output = F.pad(sequence_output[i], (0, 0, 0, c - 512))\n att = F.pad(attention[i], (0, c - 512, 0, c - 512))\n new_output.append(output)\n new_attention.append(att)\n elif n_s == 2: # 2 segments (splitted)\n \n # first half\n output1 = sequence_output[i][:512 - len_end]\n mask1 = attention_mask[i][:512 - len_end]\n att1 = attention[i][:, :512 - len_end, :512 - len_end]\n # pad to reserve space for the second half\n output1 = F.pad(output1, (0, 0, 0, c - 512 + len_end))\n mask1 = F.pad(mask1, (0, c - 512 + len_end))\n att1 = F.pad(att1, (0, c - 512 + len_end, 0, c - 512 + len_end))\n\n # second half\n output2 = sequence_output[i + 1][len_start:]\n mask2 = attention_mask[i + 1][len_start:]\n att2 = attention[i + 1][:, len_start:, len_start:]\n # pad to reserve space for the first half\n output2 = F.pad(output2, (0, 0, l_i - 512 + len_start, c - l_i))\n mask2 = F.pad(mask2, (l_i - 512 + len_start, c - l_i))\n att2 = F.pad(att2, [l_i - 512 + len_start, c - l_i, l_i - 512 + len_start, c - l_i])\n \n # combine first half and second half\n mask = mask1 + mask2 + 1e-10\n output = (output1 + output2) / mask.unsqueeze(-1)\n att = (att1 + att2)\n att = att / (att.sum(-1, keepdim=True) + 1e-10)\n new_output.append(output)\n new_attention.append(att)\n i += n_s\n \n sequence_output = torch.stack(new_output, dim=0)\n attention = torch.stack(new_attention, dim=0)\n\n return sequence_output, attention"
},
{
"identifier": "ATLoss",
"path": "losses.py",
"snippet": "class ATLoss(nn.Module):\n \n def __init__(self):\n super().__init__()\n\n def forward(self, logits, labels):\n # TH label\n th_label = torch.zeros_like(labels, dtype=torch.float).to(labels)\n th_label[:, 0] = 1.0\n labels[:, 0] = 0.0\n\n p_mask = labels + th_label\n n_mask = 1 - labels\n\n # Rank positive classes to TH\n logit1 = logits - (1 - p_mask) * 1e30\n loss1 = -(F.log_softmax(logit1, dim=-1) * labels).sum(1)\n # Rank TH to negative classes\n logit2 = logits - (1 - n_mask) * 1e30\n loss2 = -(F.log_softmax(logit2, dim=-1) * th_label).sum(1)\n # Sum two parts\n loss = loss1 + loss2\n loss = loss.mean()\n return loss\n\n def get_label(self, logits, num_labels=-1):\n\n th_logit = logits[:, 0].unsqueeze(1) # theshold is norelation\n output = torch.zeros_like(logits).to(logits)\n mask = (logits > th_logit)\n if num_labels > 0:\n top_v, _ = torch.topk(logits, num_labels, dim=1)\n top_v = top_v[:, -1] # smallest logits among the num_labels\n # predictions are those logits > thresh and logits >= smallest\n mask = (logits >= top_v.unsqueeze(1)) & mask\n output[mask] = 1.0\n # if no such relation label exist: set its label to 'Nolabel'\n output[:, 0] = (output.sum(1) == 0.).to(logits)\n return output\n\n def get_score(self, logits, num_labels=-1):\n\n if num_labels > 0:\n return torch.topk(logits, num_labels, dim=1)\n else:\n return logits[:,1] - logits[:,0], 0"
},
{
"identifier": "AttentionGCNLayer",
"path": "graph.py",
"snippet": "class AttentionGCNLayer(nn.Module):\n def __init__(self, edges, input_size, nhead=2, graph_drop=0.0, iters=2, attn_drop=0.0):\n super(AttentionGCNLayer, self).__init__()\n self.nhead = nhead\n self.graph_attention = MultiHeadDotProductAttention(edges, input_size, input_size, self.nhead, attn_drop)\n self.gcn_layers = nn.Sequential(\n *[GraphConvolutionLayer(input_size, input_size, graph_drop) for _ in range(iters)])\n self.blocks = nn.ModuleList([self.gcn_layers for _ in range(self.nhead)])\n\n self.aggregate_W = nn.Linear(input_size * nhead, input_size)\n\n def forward(self, nodes_embed, node_adj):\n output = []\n graph_attention = self.graph_attention(nodes_embed, node_adj)\n for cnt in range(0, self.nhead):\n hi, _ = self.blocks[cnt]((nodes_embed, graph_attention[cnt]))\n output.append(hi)\n output = torch.cat(output, dim=-1)\n return self.aggregate_W(output), graph_attention"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract
from long_seq import process_long_input
from losses import ATLoss
from graph import AttentionGCNLayer | 2,125 |
class DocREModel(nn.Module):
def __init__(self, args, config, model, tokenizer,
emb_size=768, block_size=64, num_labels=-1,
max_sent_num=25, evi_thresh=0.2):
super().__init__()
self.config = config
self.model = model
self.tokenizer = tokenizer
self.hidden_size = config.hidden_size
|
class DocREModel(nn.Module):
def __init__(self, args, config, model, tokenizer,
emb_size=768, block_size=64, num_labels=-1,
max_sent_num=25, evi_thresh=0.2):
super().__init__()
self.config = config
self.model = model
self.tokenizer = tokenizer
self.hidden_size = config.hidden_size
| self.loss_fnt = ATLoss() | 1 | 2023-10-20 05:53:25+00:00 | 4k |
hnesk/flipper-raw-rfid | tests/test_rifl_file.py | [
{
"identifier": "Rifl",
"path": "flipper_raw_rfid/rifl.py",
"snippet": "class Rifl:\n \"\"\"\n A raw rfid file from flipper (xyz.ask.raw or xyz.psk.raw)\n\n \"\"\"\n header: RiflHeader\n \"\"\" The header of the file \"\"\"\n\n pulse_and_durations: npt.NDArray[numpy.int64] = None\n \"\"\"\n a nx2 numpy array with:\n column 0: pulse - (number of µs while output high) and\n column 1: duration - (number of µs till next signal)\n\n Diagram:\n\n _____________ _____\n ______ _______________ .......\n\n ^ - pulse - ^\n\n ^ - duration -^\n\n\n \"\"\"\n\n @staticmethod\n def load(path: Path | str) -> Rifl:\n path = Path(path)\n with path.open('rb') as f:\n return Rifl.from_io(f)\n\n @staticmethod\n def from_io(io: BinaryIO) -> Rifl:\n header = RiflHeader.from_io(io)\n pads = numpy.array(list(Rifl._pulse_and_durations(io, header.max_buffer_size)), dtype=numpy.int64)\n return Rifl(header, pads)\n\n def save(self, path: Path | str) -> None:\n path = Path(path)\n with path.open('wb') as f:\n self.to_io(f)\n\n def to_io(self, io: BinaryIO) -> None:\n\n def write(b: BytesIO) -> None:\n io.write(pack('I', b.getbuffer().nbytes))\n io.write(b.getvalue())\n\n def write_pair(b: BytesIO, pair: BytesIO) -> BytesIO:\n if b.getbuffer().nbytes + pair.getbuffer().nbytes > self.header.max_buffer_size:\n write(b)\n b = BytesIO()\n b.write(pair.getvalue())\n return b\n\n io.write(self.header.to_bytes())\n\n buffer = BytesIO()\n for pulse, duration in self.pulse_and_durations:\n pair_buffer = BytesIO()\n Rifl.write_varint(pair_buffer, pulse)\n Rifl.write_varint(pair_buffer, duration)\n buffer = write_pair(buffer, pair_buffer)\n\n write(buffer)\n\n @staticmethod\n def _buffers(io: BinaryIO, max_buffer_size: int) -> Generator[BinaryIO, None, None]:\n \"\"\"\n Read raw binary buffers and loop through them\n\n Each buffer holds varint (https://github.com/flipperdevices/flipperzero-firmware/blob/dev/lib/toolbox/varint.c#L13) encoded pairs\n \"\"\"\n while True:\n try:\n buffer_size, = unpack('I', io.read(4))\n except struct_error:\n # No more bytes left, EOF\n break\n if buffer_size > max_buffer_size:\n raise RiflError(f'read pair: buffer size is too big {buffer_size} > {max_buffer_size}', io)\n buffer = io.read(buffer_size)\n if len(buffer) != buffer_size:\n raise RiflError(f'Tried to read {buffer_size} bytes got only {len(buffer)}', io)\n yield BytesIO(buffer)\n\n @staticmethod\n def _pulse_and_durations(io: BinaryIO, max_buffer_size: int) -> Generator[tuple[int, int], None, None]:\n \"\"\"\n loop through buffers and yield a pulse and duration tuple\n \"\"\"\n for buffer in Rifl._buffers(io, max_buffer_size):\n for pulse, duration in batched(Rifl.read_varint(buffer), 2):\n yield pulse, duration\n\n @staticmethod\n def read_varint(buffer: BinaryIO) -> Generator[int, None, None]:\n \"\"\"\n Read one varint from buffer\n\n Python implementation of https://github.com/flipperdevices/flipperzero-firmware/blob/dev/lib/toolbox/varint.c#L13\n\n \"\"\"\n res = 0\n i = 1\n while (vs := buffer.read(1)) != b'':\n v = vs[0]\n # the low 7 bits are the value\n res = res | (v & 0x7F) * i\n i = i << 7\n # yield when continue bit (bit 8) is not set\n if v & 0x80 == 0:\n yield res\n res = 0\n i = 1\n\n @staticmethod\n def write_varint(buffer: BinaryIO, value: int) -> int:\n \"\"\"\n Write one varint to buffer\n \"\"\"\n i = 1\n while value > 0x80:\n buffer.write(bytes([value & 0x7F | 0x80]))\n value >>= 7\n i += 1\n\n buffer.write(bytes([value & 0x7F]))\n return i"
},
{
"identifier": "RiflHeader",
"path": "flipper_raw_rfid/rifl.py",
"snippet": "class RiflHeader:\n \"\"\"\n Rifl Header data structure\n \"\"\"\n version: int\n \"\"\" Version of the rifl file format: 1 supported \"\"\"\n frequency: float\n \"\"\" Frequency of the signal in Hz \"\"\"\n duty_cycle: float\n \"\"\" Duty cycle of the signal\"\"\"\n max_buffer_size: int\n \"\"\" Maximum buffer size in bytes\"\"\"\n\n @staticmethod\n def from_io(io: BinaryIO) -> RiflHeader:\n try:\n return RiflHeader.from_bytes(io.read(20))\n except RiflError as e:\n e.file = io\n raise e\n\n @staticmethod\n def from_bytes(f: bytes) -> RiflHeader:\n try:\n magic, version, frequency, duty_cycle, max_buffer_size = unpack('IIffI', f)\n except struct_error:\n raise RiflError('Not a RIFL file')\n if magic != LFRFID_RAW_FILE_MAGIC:\n raise RiflError('Not a RIFL file')\n if version != LFRFID_RAW_FILE_VERSION:\n raise RiflError(f'Unsupported RIFL Version {version}')\n\n return RiflHeader(version, frequency, duty_cycle, max_buffer_size)\n\n def to_bytes(self) -> bytes:\n return pack('IIffI', LFRFID_RAW_FILE_MAGIC, self.version, self.frequency, self.duty_cycle, self.max_buffer_size)"
}
] | from io import BytesIO
from pathlib import Path
from unittest import TestCase
from numpy.testing import assert_array_equal
from flipper_raw_rfid.rifl import Rifl, RiflHeader
import numpy | 1,690 |
TEST_BASE_PATH = Path(__file__).parent.absolute()
class RiflFileTest(TestCase):
example_bytes = bytes.fromhex('f101a903ae028506a604fb05bb028706ad04b90404c403')
example_ints = [241, 425, 302, 773, 550, 763, 315, 775, 557, 569, 4, 452]
def test_header_to_bytes_and_back(self):
|
TEST_BASE_PATH = Path(__file__).parent.absolute()
class RiflFileTest(TestCase):
example_bytes = bytes.fromhex('f101a903ae028506a604fb05bb028706ad04b90404c403')
example_ints = [241, 425, 302, 773, 550, 763, 315, 775, 557, 569, 4, 452]
def test_header_to_bytes_and_back(self): | header = RiflHeader(1, 125_000, 0.5, 2048) | 1 | 2023-10-20 13:06:00+00:00 | 4k |
xingchenshanyao/YOLOP-E | lib/dataset/DemoDataset.py | [
{
"identifier": "clean_str",
"path": "lib/utils/utils.py",
"snippet": "def clean_str(s):\n # Cleans a string by replacing special characters with underscore _\n return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)"
},
{
"identifier": "letterbox_for_img",
"path": "lib/utils/augmentations.py",
"snippet": "def letterbox_for_img(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n\n\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding\n\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)\n\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)"
}
] | import glob
import os
import random
import shutil
import time
import cv2
import math
import numpy as np
import torch
from pathlib import Path
from threading import Thread
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from ..utils import letterbox_for_img, clean_str | 1,794 |
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
h0, w0 = img0.shape[:2]
self.frame += 1
print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR
#img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: \n' % (self.count, self.nf, path), end='')
h0, w0 = img0.shape[:2]
# Padded resize # 填充尺寸,640*360*3 -> 640*384*3
img, ratio, pad = letterbox_for_img(img0, new_shape=self.img_size, auto=True)
h, w = img.shape[:2] # h = 384, w = 640
shapes = (h0, w0), ((h / h0, w / w0), pad)
# Convert
#img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap, shapes
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, auto=True):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
|
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
h0, w0 = img0.shape[:2]
self.frame += 1
print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR
#img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: \n' % (self.count, self.nf, path), end='')
h0, w0 = img0.shape[:2]
# Padded resize # 填充尺寸,640*360*3 -> 640*384*3
img, ratio, pad = letterbox_for_img(img0, new_shape=self.img_size, auto=True)
h, w = img.shape[:2] # h = 384, w = 640
shapes = (h0, w0), ((h / h0, w / w0), pad)
# Convert
#img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap, shapes
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, auto=True):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n | self.sources = [clean_str(x) for x in sources] # clean source names for later | 0 | 2023-10-24 02:08:25+00:00 | 4k |
giulio98/functional-diffusion-processes | src/functional_diffusion_processes/models/base_maml.py | [
{
"identifier": "clip_learning_rates",
"path": "src/functional_diffusion_processes/utils/common.py",
"snippet": "def clip_learning_rates(params):\n \"\"\"Clip the learning rates to the range [0, 1].\n\n Args:\n params: A dictionary of parameters.\n\n Returns:\n A dictionary containing the clipped learning rates.\n \"\"\"\n params_true, learning_rates_true = separate_learning_rates(unfreeze(params))\n clipped_learning_rates = jax.tree_map(lambda lr: jnp.clip(lr, 0, 1), learning_rates_true)\n new_params = merge_learning_rates(unfreeze(params_true), unfreeze(clipped_learning_rates))\n return new_params"
},
{
"identifier": "make_coordinates",
"path": "src/functional_diffusion_processes/utils/common.py",
"snippet": "def make_coordinates(batch_size: int, shape: Any, num_channels: int = None) -> jnp.ndarray:\n \"\"\"Make coordinates for a given shape.\n\n Args:\n batch_size: The batch size.\n shape: The shape of the coordinates.\n num_channels: The number of channels.\n\n Returns:\n A Numpy Array of coordinates.\n \"\"\"\n x = jnp.stack(jnp.ones(shape).nonzero(), -1) * jnp.ones([batch_size, 1, 1])\n x = normalize_coordinates(x, max_coordinate=jnp.max(x))\n if len(shape) == 2:\n grid_size = shape[0] * shape[1]\n else:\n grid_size = shape[0]\n if num_channels is not None:\n y_aux = jnp.ones((batch_size, grid_size, num_channels))\n x = jnp.concatenate([x, y_aux], axis=-1)\n\n # initialize t to 1\n t = jnp.ones((batch_size, grid_size, 1))\n\n x = jnp.concatenate([x, t], axis=-1)\n return x"
},
{
"identifier": "merge_learning_rates",
"path": "src/functional_diffusion_processes/utils/common.py",
"snippet": "def merge_learning_rates(params, learning_rates):\n \"\"\"Merge the learning rates with the other parameters.\n\n Args:\n params: A dictionary of parameters.\n learning_rates: A dictionary of learning rates.\n\n Returns:\n A dictionary containing the merged parameters.\n \"\"\"\n new_params = {}\n\n for key, value in learning_rates[\"params\"].items():\n layer_name, param_id = key.split(\"_\", 1)\n new_layer_name = f\"{layer_name}LR_{param_id}\"\n if new_layer_name not in new_params:\n new_params[new_layer_name] = value\n\n new_params.update(params[\"params\"])\n\n return FrozenDict({\"params\": new_params})"
},
{
"identifier": "separate_learning_rates",
"path": "src/functional_diffusion_processes/utils/common.py",
"snippet": "def separate_learning_rates(params):\n \"\"\"Separate the learning rates from the other parameters.\n\n Args:\n params: A dictionary of parameters.\n\n Returns:\n A tuple containing the learning rates and the other parameters.\n \"\"\"\n learning_rates = {}\n other_params = {}\n for layer_name, layer_params in params[\"params\"].items():\n if \"lr\" in layer_name.lower():\n learning_rates[layer_name] = layer_params\n else:\n other_params[layer_name] = layer_params\n\n new_lr_params = {}\n for key, value in learning_rates.items():\n layer_name, param_id = key.split(\"_\", 1) # Split the key into layer_name and param_name\n clean_layer_name = layer_name.replace(\"LR\", \"\")\n combined_layer_name = f\"{clean_layer_name}_{param_id}\"\n\n if combined_layer_name not in new_lr_params:\n new_lr_params[combined_layer_name] = {}\n new_lr_params[combined_layer_name] = value\n\n return FrozenDict({\"params\": other_params}), FrozenDict({\"params\": new_lr_params})"
}
] | import abc
import logging
import flax.linen as nn
import hydra
import jax
import jax.numpy as jnp
import optax
from functools import partial
from typing import Any, Callable, Mapping, Optional, Tuple, TypeVar
from flax.core import FrozenDict, unfreeze
from omegaconf import DictConfig
from ..utils.common import clip_learning_rates, make_coordinates, merge_learning_rates, separate_learning_rates | 3,321 | ) -> Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]:
"""Apply the (outer) forward pass and update the model parameters.
Args:
rng (jax.random.PRNGKey): Random key.
params (Params): Initial model parameters.
batch_input (jnp.ndarray): Input tensor to the model.
batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.
psm (jnp.ndarray): Power special matrix.
Returns:
Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]: A tuple containing a new random key, the model output, and the inner loss.
"""
params_adapted, loss_inner = update_inner_fn(params, batch_input, batch_corrupted, psm)
model_output = jax.vmap(self.apply)(params_adapted, batch_input)
return rng, model_output, loss_inner
return apply_forward
def make_update_inner_fn(
self, optimizer_inner: optax.GradientTransformation, n_steps: int
) -> Callable[[Params, jnp.ndarray, jnp.ndarray, jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]:
"""Create a function to update model parameters for inner optimization.
This method creates a function that performs the inner optimization updates
during the meta-training phase, which is a key component of the MAML algorithm.
Args:
optimizer_inner (optax.GradientTransformation): The optimizer used for inner optimization.
n_steps (int): The number of optimization steps.
Returns:
Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Function to update model parameters for inner optimization.
"""
@partial(jax.vmap, in_axes=0)
@partial(jax.grad, has_aux=True)
def loss_inner_fn(params_i: Params, batch_input: T, y_corrupted: T, psm: T) -> T:
"""Computes the loss for inner optimization.
This inner method computes the loss for inner optimization by comparing
the model's output against the corrupted batch using mean square error.
The method is vectorized using JAX's vmap function for efficiency.
Args:
params_i (Params): Model parameters.
batch_input (T): Input batch.
y_corrupted (T): Corrupted batch.
psm (T): Power special matrix.
Returns:
T: Loss value.
"""
c = y_corrupted.shape[-1]
model_output = self.apply(params_i, batch_input)
if len(psm.shape) == 3:
model_output_freq = jnp.fft.fft2(model_output.reshape(*psm.shape[:-1], c), norm="ortho", axes=(0, 1))
y_corrupted_freq = jnp.fft.fft2(y_corrupted.reshape(*psm.shape[:-1], c), norm="ortho", axes=(0, 1))
else:
model_output_freq = jnp.fft.fft(model_output.reshape(*psm.shape[:-1], c), norm="ortho", axis=0)
y_corrupted_freq = jnp.fft.fft(y_corrupted.reshape(*psm.shape[:-1], c), norm="ortho", axis=0)
mse = mean_square_error(
y_corrupted_freq.reshape(-1, c),
model_output_freq.reshape(-1, c),
psm.reshape(-1, 1),
)
loss: jnp.ndarray = jnp.mean(mse)
return loss, loss
def apply_inner_forward(
params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: jnp.ndarray
):
"""Applies inner forward pass for updating model parameters.
Args:
params (Params): Model parameters.
batch_input (jnp.ndarray): Input batch.
batch_corrupted (jnp.ndarray): Corrupted batch.
psm (jnp.ndarray): Power special matrix.
Returns:
Tuple[jnp.ndarray, jnp.ndarray]: Updated model parameters and inner loss.
"""
def inner_opt_loop(
carry: Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], _: None
) -> Tuple[Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], None]:
"""Inner optimization loop for updating model parameters.
Args:
carry (Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]): Tuple containing model parameters,
loss vector, iteration index, optimizer state, and corrupted batch.
_ (None): A throwaway variable as no second argument is used in this function.
Returns:
Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]: Updated tuple with new model parameters,
updated loss vector, incremented iteration index, updated optimizer state, and corrupted batch.
"""
params_i, loss_inner_vec, it, opt_inner_state_params, batch_corrupted_i = carry
grad_params, (loss) = loss_inner_fn(params_i, batch_input, batch_corrupted_i, psm)
loss_inner_vec = loss_inner_vec.at[it].set(jnp.mean(loss))
if self.model_config.use_dense_lr:
# separate learning rates from grad_params
grad_params_true, _ = separate_learning_rates(unfreeze(grad_params))
# separate learning rates from params_i
params_i_true, learning_rates = separate_learning_rates(unfreeze(params_i))
# calculate updates using meta-sgd
updates_params = jax.tree_map(
lambda g, lr: -jnp.clip(lr, 0, 1) * g,
grad_params_true,
learning_rates,
)
# merge updates_params and learning_rates
|
Params = FrozenDict[str, Any]
T = TypeVar("T")
pylogger = logging.getLogger(__name__)
@partial(jax.vmap, in_axes=0)
def mean_square_error(y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, y_psm: jnp.ndarray) -> jnp.ndarray:
"""Calculate the mean squared error between the predicted and actual values of a batch.
Args:
y_corrupted (jnp.ndarray): The actual y perturbed, with shape (output_size,).
y_reconstructed (jnp.ndarray): The predicted y, with shape (output_size,).
y_psm (jnp.ndarray): The Power special matrix, with shape (1,).
Returns:
jnp.ndarray: The mean squared error for each y, with shape (1,).
"""
return jnp.sum(jnp.square(jnp.abs(y_corrupted * y_psm - y_reconstructed * y_psm)))
class BaseMAML(nn.Module, abc.ABC):
"""Abstract model class for implementing Model-Agnostic Meta-Learning (MAML).
The Model-Agnostic Meta-Learning (MAML) algorithm is designed to train models
in a manner that they can be fine-tuned for new tasks with a small number of examples.
This implementation is based on the MAML algorithm introduced in the paper
"Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks"
(https://arxiv.org/abs/1703.03400).
Attributes:
model_config (DictConfig): Configuration dictionary for the model.
optimizer_inner (optax.GradientTransformation): Inner optimizer configuration.
inner_steps (int): Number of inner optimization steps.
Methods:
__call__(self, inputs: jnp.ndarray) -> jnp.ndarray: Implement the forward pass of the model.
initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]: Initialize the model with dummy inputs.
initialize_input(self, shape: Tuple[int, ...]) -> jnp.ndarray: Create input tensor for the model based on the specified shape.
make_update_params_fn(self) -> Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]: Create a function to update the model parameters.
make_update_inner_fn(self, optimizer_inner: optax.GradientTransformation, n_steps: int) -> Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Create a function to update model parameters for inner optimization.
make_predict_fn(self) -> Callable[..., jnp.ndarray]: Creates a function for making predictions with the model.
"""
model_config: DictConfig
optimizer_inner: optax.GradientTransformation
inner_steps: int
@abc.abstractmethod
@nn.compact
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Implement the forward pass of the model.
Args:
inputs (jnp.ndarray): Input tensor to the model.
Returns:
jnp.ndarray: Output tensor from the model.
Raises:
NotImplementedError: If this method is not overridden by a derived class.
"""
raise NotImplementedError(f"{self.__class__.__name__} must implement the __call__ method.")
def initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]:
"""Initialize the model with dummy inputs.
This method initializes the model parameters by passing a batch of dummy inputs
through the model. This is a common practice to infer the dimensions of the model's
parameters.
Args:
rng (jax.random.PRNGKey): A random key for generating initial model parameters.
batch_input (jnp.ndarray): A batch of dummy inputs for initializing the model.
Returns:
FrozenDict[str, Mapping[str, Any]]: The initialized model parameters.
"""
self.optimizer_inner = hydra.utils.instantiate(self.optimizer_inner)
return self.init(rng, batch_input)
def initialize_input(self, shape: Tuple[int, ...]) -> jnp.ndarray:
"""Create input tensor for the model based on the specified shape.
Args:
shape (Tuple[int, ...]): Shape of the input tensor.
Returns:
jnp.ndarray: Initialized input tensor.
"""
batch_size = shape[0]
num_channels = shape[-1]
grid_size = shape[1:-1]
if not self.model_config.y_input:
num_channels = None
coordinates = make_coordinates(batch_size, grid_size, num_channels)
return coordinates
def make_update_params_fn(self) -> Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]:
"""Create a function to update the model parameters.
This method creates a function that performs the forward pass of the model
and updates the model parameters.
Returns:
Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]: Function to update model parameters.
"""
update_inner_fn = self.make_update_inner_fn(
optimizer_inner=self.optimizer_inner,
n_steps=self.inner_steps,
)
def apply_forward(
rng: jax.random.PRNGKey,
params: Params,
batch_input: jnp.ndarray,
batch_corrupted: jnp.ndarray,
psm: jnp.ndarray,
) -> Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]:
"""Apply the (outer) forward pass and update the model parameters.
Args:
rng (jax.random.PRNGKey): Random key.
params (Params): Initial model parameters.
batch_input (jnp.ndarray): Input tensor to the model.
batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.
psm (jnp.ndarray): Power special matrix.
Returns:
Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]: A tuple containing a new random key, the model output, and the inner loss.
"""
params_adapted, loss_inner = update_inner_fn(params, batch_input, batch_corrupted, psm)
model_output = jax.vmap(self.apply)(params_adapted, batch_input)
return rng, model_output, loss_inner
return apply_forward
def make_update_inner_fn(
self, optimizer_inner: optax.GradientTransformation, n_steps: int
) -> Callable[[Params, jnp.ndarray, jnp.ndarray, jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]:
"""Create a function to update model parameters for inner optimization.
This method creates a function that performs the inner optimization updates
during the meta-training phase, which is a key component of the MAML algorithm.
Args:
optimizer_inner (optax.GradientTransformation): The optimizer used for inner optimization.
n_steps (int): The number of optimization steps.
Returns:
Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Function to update model parameters for inner optimization.
"""
@partial(jax.vmap, in_axes=0)
@partial(jax.grad, has_aux=True)
def loss_inner_fn(params_i: Params, batch_input: T, y_corrupted: T, psm: T) -> T:
"""Computes the loss for inner optimization.
This inner method computes the loss for inner optimization by comparing
the model's output against the corrupted batch using mean square error.
The method is vectorized using JAX's vmap function for efficiency.
Args:
params_i (Params): Model parameters.
batch_input (T): Input batch.
y_corrupted (T): Corrupted batch.
psm (T): Power special matrix.
Returns:
T: Loss value.
"""
c = y_corrupted.shape[-1]
model_output = self.apply(params_i, batch_input)
if len(psm.shape) == 3:
model_output_freq = jnp.fft.fft2(model_output.reshape(*psm.shape[:-1], c), norm="ortho", axes=(0, 1))
y_corrupted_freq = jnp.fft.fft2(y_corrupted.reshape(*psm.shape[:-1], c), norm="ortho", axes=(0, 1))
else:
model_output_freq = jnp.fft.fft(model_output.reshape(*psm.shape[:-1], c), norm="ortho", axis=0)
y_corrupted_freq = jnp.fft.fft(y_corrupted.reshape(*psm.shape[:-1], c), norm="ortho", axis=0)
mse = mean_square_error(
y_corrupted_freq.reshape(-1, c),
model_output_freq.reshape(-1, c),
psm.reshape(-1, 1),
)
loss: jnp.ndarray = jnp.mean(mse)
return loss, loss
def apply_inner_forward(
params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: jnp.ndarray
):
"""Applies inner forward pass for updating model parameters.
Args:
params (Params): Model parameters.
batch_input (jnp.ndarray): Input batch.
batch_corrupted (jnp.ndarray): Corrupted batch.
psm (jnp.ndarray): Power special matrix.
Returns:
Tuple[jnp.ndarray, jnp.ndarray]: Updated model parameters and inner loss.
"""
def inner_opt_loop(
carry: Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], _: None
) -> Tuple[Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], None]:
"""Inner optimization loop for updating model parameters.
Args:
carry (Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]): Tuple containing model parameters,
loss vector, iteration index, optimizer state, and corrupted batch.
_ (None): A throwaway variable as no second argument is used in this function.
Returns:
Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]: Updated tuple with new model parameters,
updated loss vector, incremented iteration index, updated optimizer state, and corrupted batch.
"""
params_i, loss_inner_vec, it, opt_inner_state_params, batch_corrupted_i = carry
grad_params, (loss) = loss_inner_fn(params_i, batch_input, batch_corrupted_i, psm)
loss_inner_vec = loss_inner_vec.at[it].set(jnp.mean(loss))
if self.model_config.use_dense_lr:
# separate learning rates from grad_params
grad_params_true, _ = separate_learning_rates(unfreeze(grad_params))
# separate learning rates from params_i
params_i_true, learning_rates = separate_learning_rates(unfreeze(params_i))
# calculate updates using meta-sgd
updates_params = jax.tree_map(
lambda g, lr: -jnp.clip(lr, 0, 1) * g,
grad_params_true,
learning_rates,
)
# merge updates_params and learning_rates | merged_updates = merge_learning_rates(unfreeze(updates_params), unfreeze(learning_rates)) | 2 | 2023-10-24 22:01:35+00:00 | 4k |
godisboy0/nonebot-adapter-wcf | adapters/wechatferry/eventconverter.py | [
{
"identifier": "Event",
"path": "adapters/wechatferry/event.py",
"snippet": "class Sender (OnebotSender):\nclass PrivateMessageEvent (OnebotPrivateMessageEvent):\nclass GroupMessageEvent (OnebotGroupMessageEvent):\nclass TTT(BaseModel):\nclass TTTB(TTT):"
},
{
"identifier": "MessageSegment",
"path": "adapters/wechatferry/message.py",
"snippet": ""
},
{
"identifier": "WxType",
"path": "adapters/wechatferry/type.py",
"snippet": "class WxType(IntEnum):\n \"\"\"微信原始类型枚举\"\"\"\n\n WX_MSG_TEXT = 1\n \"\"\"文本\"\"\"\n WX_MSG_PICTURE = 3\n \"\"\"图片\"\"\"\n WX_MSG_VOICE = 34\n \"\"\"语音\"\"\"\n WX_MSG_FRIEND = 37\n \"\"\"加好友请求\"\"\"\n WX_MSG_CARD = 42\n \"\"\"名片\"\"\"\n WX_MSG_VIDEO = 43\n \"\"\"视频\"\"\"\n WX_MSG_EMOJI = 47\n \"\"\"表情\"\"\"\n WX_MSG_LOCATION = 48\n \"\"\"位置\"\"\"\n WX_MSG_APP = 49\n \"\"\"应用类型,这也是个复杂的类型,类似发送链接,发送小程序,引用消息。都是这个type,需要内部拆分。\"\"\"\n WX_MSG_HEARTBEAT = 51\n \"\"\"看着跟心跳包似得。。也没啥用\"\"\"\n WX_MSG_SYSTEM = 10000\n \"\"\"系统消息\"\"\"\n WX_MSG_REVOKE = 10002\n \"\"\"撤回消息,准确的说,这是‘系统消息’,撤回是其中之一,只用来做撤回了,暂时就叫这名字吧。。\"\"\""
},
{
"identifier": "logger",
"path": "adapters/wechatferry/utils.py",
"snippet": "class Logger:\nclass downloader:\n def __init__(self) -> None:\n def info(self, msg: str, e: Exception=None) -> None:\n def error(self, msg: str, e: Exception=None) -> None:\n def debug(self, msg: str, e: Exception=None) -> None:\n def warning(self, msg: str, e: Exception=None) -> None:\ndef handle_api_result(result: Optional[Dict[str, Any]]) -> Any:\ndef file_md5(file_path) -> Optional[str]:\n def __init__(self, url, file_name, path: str, override: bool = True, chunk_size: int = 1024, headers={}) -> None:\n async def downloadAsync(self) -> str:\n def download(self) -> str:"
},
{
"identifier": "database",
"path": "adapters/wechatferry/sqldb.py",
"snippet": "class database:\n\n def __init__(self, file_path, db_name=\"wcf\") -> None:\n ## 如果同参数\n global singleton_dict\n if hasattr(singleton_dict, file_path):\n self.conn = getattr(singleton_dict, file_path)\n return\n \n if not file_path:\n raise ValueError(\"file_path can not be empty\")\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n datafile = os.path.join(file_path, db_name)\n self.conn = sqlite3.connect(datafile)\n singleton_dict.file_path = self.conn\n\n def create_table(self, sql: str) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to create table: {e}\")\n raise e\n finally:\n cursor.close()\n\n def query(self, sql, *args) -> list:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n return cursor.fetchall()\n except Exception as e:\n logger.error(f\"Failed to query: {e}\")\n raise e\n finally:\n cursor.close()\n\n def execute(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to execute: {e}\")\n raise e\n finally:\n cursor.close()\n\n def insert(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to insert: {e}\")\n raise e\n finally:\n cursor.close()\n\n def update(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to update: {e}\")\n raise e\n finally:\n cursor.close()\n\n def delete(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to delete: {e}\")\n raise e\n finally:\n cursor.close()\n\n def table_exists(self, table_name: str) -> bool:\n sql = f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{table_name}'\"\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql)\n return cursor.fetchone()[0] == 1\n except Exception as e:\n logger.error(f\"Failed to check table {table_name} exists: {e}\")\n return False\n finally:\n cursor.close()"
},
{
"identifier": "convert_to_bot_msg",
"path": "adapters/wechatferry/msg_converters.py",
"snippet": "async def convert_to_bot_msg(msg: WxMsg, bot_wx_id: str, wcf: Wcf, db: database) -> Optional[Message]:\n \"\"\"\n 用于转换消息。转化为标准的 Message 类型。\n \"\"\"\n msg_conv = msg_conv_dict.get(msg.type)\n if msg_conv:\n return await msg_conv(msg, bot_wx_id, wcf, db)\n else:\n logger.warning(f\"Unknown msg type: {msg.type}\")\n return None"
},
{
"identifier": "AdapterConfig",
"path": "adapters/wechatferry/config.py",
"snippet": "class AdapterConfig(BaseModel):\n \"\"\"wechatferry 配置类\"\"\"\n\n root_user: str\n debug: bool = Field(default=True)\n \"\"\"是否开启调试模式\"\"\"\n db_path: str = Field(default=\"./data\")\n \"\"\"数据库路径,默认为当前运行路径下的 data 文件夹,该文件夹已经被 .gitignore 忽略\"\"\"\n echo_root_msg: bool = Field(default=False)\n \"\"\"是否将 root_user 的信息直接做成json回传给root_user\"\"\"\n \"\"\"在debug时非常有用,特别是你的开发机器和部署微信的机器不是同一台时。用过的都说好\"\"\"\n \n\n class Config:\n extra = \"ignore\""
},
{
"identifier": "send_to_root",
"path": "adapters/wechatferry/debug_helper.py",
"snippet": "def send_to_root(msg: Union[Any, WxMsg], wcf: Wcf = None, root_user: str = None):\n from nonebot import get_adapter, get_driver\n global _root_user\n global _wcf\n if wcf is None:\n if _wcf:\n wcf = _wcf\n else: \n wcf: Wcf = get_adapter('wechatferry').wcf\n _wcf = wcf\n if root_user is None:\n if _root_user:\n root_user = _root_user\n else:\n root_user = AdapterConfig.parse_obj(get_driver().config).root_user\n _root_user = root_user\n\n if isinstance(msg, WxMsg):\n file_str = json.dumps({\n 'is_self': msg._is_self,\n 'is_group': msg._is_group,\n 'type': msg.type,\n 'id': msg.id,\n 'ts': msg.ts,\n 'sign': msg.sign,\n 'xml': msg.xml.replace(\"\\n\", \"\").replace(\"\\t\", \"\") if msg.xml else None,\n 'sender': msg.sender,\n 'roomid': msg.roomid,\n 'content': msg.content.replace(\"\\n\", \"\").replace(\"\\t\", \"\") if msg.content else None,\n 'thumb': msg.thumb,\n 'extra': msg.extra\n }, ensure_ascii=False, indent=4)\n file_ext = \"json\"\n else:\n file_str = msg\n file_ext = \"txt\"\n if isinstance(msg, str) or isinstance(msg, WxMsg):\n file_path = os.path.join(\n echo_temp_dir, f'{int(time.time() * 1000)}.{file_ext}')\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(file_str)\n wcf.send_file(file_path, root_user)\n else:\n raise TypeError(f\"msg should be str or WxMsg, not {type(msg)}\")"
}
] | from wcferry import Wcf, WxMsg
from .event import Event, PrivateMessageEvent, GroupMessageEvent, Sender
from .message import MessageSegment, Message
from .type import WxType
from .utils import logger
from nonebot.utils import escape_tag
from .sqldb import database
from .msg_converters import convert_to_bot_msg
from .config import AdapterConfig
from nonebot import get_driver
from .debug_helper import send_to_root
import re
import os | 2,572 | """
onebot11标准要求:https://github.com/botuniverse/onebot-11/blob/master/README.md
onebot11 message segment 类型: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md
"""
adapter_config = AdapterConfig.parse_obj(get_driver().config)
async def echo_root_msg_as_json_file(msg: WxMsg, wcf: Wcf = None):
root_user = adapter_config.root_user
echo_root_msg = adapter_config.echo_root_msg
if msg.sender != root_user or not echo_root_msg or msg._is_group:
return
send_to_root(msg, wcf, root_user)
def __get_mention_list(req: WxMsg) -> list[str]:
if req.xml is not None:
pattern = r'<atuserlist>(.*?)</atuserlist>'
match = re.search(pattern, req.xml)
if match:
atuserlist = match.group(1)
return [user_id for user_id in atuserlist.split(',')]
return []
async def convert_to_event(msg: WxMsg, login_wx_id: str, wcf: Wcf, db: database) -> Event:
"""Converts a wechatferry event to a nonebot event."""
logger.debug(f"Converting message to event: {escape_tag(str(msg))}")
if not msg or msg.type == WxType.WX_MSG_HEARTBEAT:
return None
await echo_root_msg_as_json_file(msg, wcf)
args = {}
| """
onebot11标准要求:https://github.com/botuniverse/onebot-11/blob/master/README.md
onebot11 message segment 类型: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md
"""
adapter_config = AdapterConfig.parse_obj(get_driver().config)
async def echo_root_msg_as_json_file(msg: WxMsg, wcf: Wcf = None):
root_user = adapter_config.root_user
echo_root_msg = adapter_config.echo_root_msg
if msg.sender != root_user or not echo_root_msg or msg._is_group:
return
send_to_root(msg, wcf, root_user)
def __get_mention_list(req: WxMsg) -> list[str]:
if req.xml is not None:
pattern = r'<atuserlist>(.*?)</atuserlist>'
match = re.search(pattern, req.xml)
if match:
atuserlist = match.group(1)
return [user_id for user_id in atuserlist.split(',')]
return []
async def convert_to_event(msg: WxMsg, login_wx_id: str, wcf: Wcf, db: database) -> Event:
"""Converts a wechatferry event to a nonebot event."""
logger.debug(f"Converting message to event: {escape_tag(str(msg))}")
if not msg or msg.type == WxType.WX_MSG_HEARTBEAT:
return None
await echo_root_msg_as_json_file(msg, wcf)
args = {} | onebot_msg: Message = await convert_to_bot_msg(msg, login_wx_id, wcf, db) | 5 | 2023-10-22 10:52:27+00:00 | 4k |
R1999RC-official/Reverse1999ResonanceCalculator | python/python_env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py | [
{
"identifier": "ConnectTimeoutError",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class ConnectTimeoutError(TimeoutError):\n \"\"\"Raised when a socket timeout occurs while connecting to a server\"\"\"\n\n pass"
},
{
"identifier": "InvalidHeader",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class InvalidHeader(HTTPError):\n \"\"\"The header provided was somehow invalid.\"\"\"\n\n pass"
},
{
"identifier": "MaxRetryError",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)"
},
{
"identifier": "ProtocolError",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class ProtocolError(HTTPError):\n \"\"\"Raised when something unexpected happens mid-request/response.\"\"\"\n\n pass"
},
{
"identifier": "ProxyError",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n def __init__(self, message, error, *args):\n super(ProxyError, self).__init__(message, error, *args)\n self.original_error = error"
},
{
"identifier": "ReadTimeoutError",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class ReadTimeoutError(TimeoutError, RequestError):\n \"\"\"Raised when a socket timeout occurs while receiving data from a server\"\"\"\n\n pass"
},
{
"identifier": "ResponseError",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py",
"snippet": "class ResponseError(HTTPError):\n \"\"\"Used as a container for an error reason supplied in a MaxRetryError.\"\"\"\n\n GENERIC_ERROR = \"too many error responses\"\n SPECIFIC_ERROR = \"too many {status_code} error responses\""
},
{
"identifier": "six",
"path": "python/python_env/Lib/site-packages/pip/_vendor/urllib3/packages/six.py",
"snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):"
}
] | import email
import logging
import re
import time
import warnings
from collections import namedtuple
from itertools import takewhile
from ..exceptions import (
ConnectTimeoutError,
InvalidHeader,
MaxRetryError,
ProtocolError,
ProxyError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six | 2,191 | from __future__ import absolute_import
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
)
# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
_Default = object()
class _RetryMeta(type):
@property
def DEFAULT_METHOD_WHITELIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
DeprecationWarning,
)
return cls.DEFAULT_ALLOWED_METHODS
@DEFAULT_METHOD_WHITELIST.setter
def DEFAULT_METHOD_WHITELIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
DeprecationWarning,
)
cls.DEFAULT_ALLOWED_METHODS = value
@property
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
@DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
@property
def BACKOFF_MAX(cls):
warnings.warn(
"Using 'Retry.BACKOFF_MAX' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
DeprecationWarning,
)
return cls.DEFAULT_BACKOFF_MAX
@BACKOFF_MAX.setter
def BACKOFF_MAX(cls, value):
warnings.warn(
"Using 'Retry.BACKOFF_MAX' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
DeprecationWarning,
)
cls.DEFAULT_BACKOFF_MAX = value
| from __future__ import absolute_import
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
)
# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
_Default = object()
class _RetryMeta(type):
@property
def DEFAULT_METHOD_WHITELIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
DeprecationWarning,
)
return cls.DEFAULT_ALLOWED_METHODS
@DEFAULT_METHOD_WHITELIST.setter
def DEFAULT_METHOD_WHITELIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
DeprecationWarning,
)
cls.DEFAULT_ALLOWED_METHODS = value
@property
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
@DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
@property
def BACKOFF_MAX(cls):
warnings.warn(
"Using 'Retry.BACKOFF_MAX' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
DeprecationWarning,
)
return cls.DEFAULT_BACKOFF_MAX
@BACKOFF_MAX.setter
def BACKOFF_MAX(cls, value):
warnings.warn(
"Using 'Retry.BACKOFF_MAX' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
DeprecationWarning,
)
cls.DEFAULT_BACKOFF_MAX = value
| @six.add_metaclass(_RetryMeta) | 7 | 2023-10-24 06:48:58+00:00 | 4k |
mentpy/mentpy | mentpy/operators/controlled_ment.py | [
{
"identifier": "PauliX",
"path": "mentpy/operators/gates.py",
"snippet": "CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\nSWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n U = unitary_group.rvs(2**n_qubits)\n U = U / np.power(detU, 1 / (2**n_qubits))\ndef random_su(n_qubits: int):\ndef swap_qubits(state_vector, i, j):\ndef arbitrary_qubit_gate(u, i, n):\ndef swap_ij(i, j, n):\ndef cnot_ij(i, j, n):\ndef controlled_z(i, j, n):"
},
{
"identifier": "Ment",
"path": "mentpy/operators/ment.py",
"snippet": "class Ment:\n \"\"\"Measurement operator.\n\n Args\n ----\n angle: float\n The angle of the measurement. Only used if plane is \"XY\", \"XZ\", \"YZ\", or \"XYZ\".\n If plane is \"XYZ\", the input should be a tuple of two angles.\n plane: str\n The plane of the measurement. Can be \"XY\", \"XZ\", \"YZ\", \"XYZ\", \"X\", \"Y\", \"Z\".\n \"\"\"\n\n def __init__(\n self,\n angle: Optional[Union[int, float, tuple, str]] = None,\n plane: Optional[str] = \"XY\",\n ):\n \"\"\"Measurement operator.\"\"\"\n\n if isinstance(angle, (int, float, tuple)) or angle is None:\n angle = angle if angle is not None else None\n plane = plane if plane is not None else \"XY\"\n elif isinstance(angle, str):\n temp_plane = angle\n if isinstance(plane, (int, float, tuple)):\n angle = plane\n else:\n angle = None\n plane = temp_plane\n else:\n raise TypeError(\n f\"Invalid argument type. Expected float or str but got {type(angle)}\"\n )\n\n plane = plane.upper()\n allowd_planes = [\"XY\", \"XZ\", \"YZ\", \"XYZ\", \"X\", \"Y\", \"Z\"]\n if plane not in allowd_planes:\n raise ValueError(f\"Plane {plane} is not supported.\")\n elif plane == \"XYZ\":\n warnings.warn(\"Plane XYZ might be unstable. Use at your own risk.\")\n\n if plane in [\"X\", \"Y\", \"Z\"]:\n if angle is not None and angle != 0:\n raise ValueError(f\"Plane {plane} does not support angle.\")\n else:\n angle = 0\n\n self._plane = plane\n self._angle = angle\n self._node_id = -1\n self._outcome = MentOutcome(lambda x: x[self._node_id])\n\n def __repr__(self):\n theta = round(self._angle, 4) if isinstance(self._angle, (int, float)) else \"θ\"\n theta = (\n (round(self._angle[0], 4), round(self._angle[1], 4))\n if isinstance(self._angle, tuple)\n else theta\n )\n return f\"Ment({theta}, {self._plane})\"\n\n @property\n def plane(self):\n return self._plane\n\n @property\n def angle(self):\n return self._angle\n\n @property\n def outcome(self) -> MentOutcome:\n return self._outcome\n\n @property\n def node_id(self) -> Any:\n return self._node_id\n\n @node_id.setter\n def node_id(self, node_id: Any):\n self._node_id = node_id\n self._outcome = MentOutcome(lambda x: x[self._node_id], self._node_id)\n\n def set_angle(self, angle):\n \"Sets the angle of the measurement.\"\n self._angle = angle\n return self\n\n def copy(self):\n \"Returns a copy of the measurement.\"\n return Ment(self._angle, self._plane)\n\n def is_trainable(self):\n \"Returns True if the measurement is trainable.\"\n return self._angle is None and self._plane in [\"XY\", \"XZ\", \"YZ\", \"XYZ\"]\n\n def matrix(self, angle: Optional[float] = None, *args, **kwargs):\n \"Returns the matrix representation of the measurement.\"\n if self._angle is None and angle is None:\n raise ValueError(\"Measurement is trainable, please provide an angle.\")\n elif self._angle is not None and angle is not None:\n if self._angle != angle:\n raise ValueError(\n f\"Measurement has a fixed angle of {round(self._angle, 4)}\"\n )\n elif self._angle is not None:\n angle = self._angle\n\n match self._plane:\n case \"XY\":\n matrix = np.cos(angle) * PauliX + np.sin(angle) * PauliY\n case \"X\" | \"Y\" | \"Z\":\n matrices = {\"X\": PauliX, \"Y\": PauliY, \"Z\": PauliZ}\n matrix = matrices[self._plane]\n case \"XZ\":\n matrix = np.cos(angle) * PauliX + np.sin(angle) * PauliZ\n case \"YZ\":\n matrix = np.cos(angle) * PauliY + np.sin(angle) * PauliZ\n case \"XYZ\":\n if isinstance(angle, tuple):\n angle1, angle2 = angle\n else:\n raise TypeError(\n f\"Invalid argument type. Expected tuple but got {type(angle)}\"\n )\n matrix = (\n np.cos(angle1) * np.cos(angle2) * PauliX\n + np.sin(angle1) * np.cos(angle2) * PauliY\n + np.sin(angle2) * PauliZ\n )\n\n return matrix\n\n def get_povm(self, angle: Optional[float] = None, *args, **kwargs):\n \"\"\"Returns the POVM representation of the measurement.\"\"\"\n mat = self.matrix(angle, *args, **kwargs)\n m0 = (np.eye(2) + mat) / 2\n m1 = (np.eye(2) - mat) / 2\n return m0, m1"
},
{
"identifier": "MentOutcome",
"path": "mentpy/operators/ment.py",
"snippet": "class MentOutcome:\n \"\"\"Measurement outcome class.\"\"\"\n\n def __init__(self, outcome: Callable[..., bool], node_id=None, cond_nodes=None):\n if isinstance(outcome, (bool, int)):\n outcome_value = bool(outcome % 2)\n outcome = lambda *args, **kwargs: outcome_value\n self._outcome = outcome\n self._node_id = node_id\n if isinstance(outcome, MentOutcome):\n self._cond_nodes = outcome.cond_nodes\n else:\n self._cond_nodes = (\n cond_nodes\n if cond_nodes is not None\n else (set([node_id]) if node_id is not None else set())\n )\n\n @property\n def node_id(self):\n return self._node_id\n\n @node_id.setter\n def node_id(self, node_id):\n self._node_id = node_id\n\n @property\n def cond_nodes(self):\n return self._cond_nodes\n\n def __repr__(self) -> str:\n return f\"Measurement Outcome\"\n\n def __call__(self, *args, **kwargs):\n try:\n return self._outcome(*args, **kwargs)\n except:\n raise UserWarning(\"Could not evaluate callable at given\")\n\n def _binary_operation(self, operation, other):\n if isinstance(other, (bool, int)):\n return MentOutcome(\n lambda x: bool(operation(self._outcome(x), other) % 2),\n cond_nodes=self._cond_nodes,\n )\n elif isinstance(other, MentOutcome):\n return MentOutcome(\n lambda x: bool(operation(self._outcome(x), other._outcome(x)) % 2),\n cond_nodes=self._cond_nodes | other._cond_nodes,\n )\n elif isinstance(other, Callable):\n return MentOutcome(\n lambda x: bool(operation(self._outcome(x), other(x)) % 2),\n cond_nodes=self._cond_nodes,\n )\n else:\n raise TypeError(f\"Invalid type {type(other)}\")\n\n def __mul__(self, other):\n return self._binary_operation(lambda x, y: x * y, other)\n\n def __add__(self, other):\n return self._binary_operation(lambda x, y: x + y, other)\n\n def __sub__(self, other):\n return self._binary_operation(lambda x, y: x - y, other)\n\n def __truediv__(self, other):\n return self._binary_operation(lambda x, y: x / y, other)\n\n def __floordiv__(self, other):\n return self._binary_operation(lambda x, y: x // y, other)\n\n def __mod__(self, other):\n return self._binary_operation(lambda x, y: x % y, other)\n\n def __pow__(self, other):\n return self._binary_operation(lambda x, y: x**y, other)\n\n def __eq__(self, other):\n return self._binary_operation(lambda x, y: x == y, other)\n\n def __ne__(self, other):\n return self._binary_operation(lambda x, y: x != y, other)\n\n def __lt__(self, other):\n return self._binary_operation(lambda x, y: x < y, other)\n\n def __le__(self, other):\n return self._binary_operation(lambda x, y: x <= y, other)\n\n def __gt__(self, other):\n return self._binary_operation(lambda x, y: x > y, other)\n\n def __ge__(self, other):\n return self._binary_operation(lambda x, y: x >= y, other)\n\n def __and__(self, other):\n return self._binary_operation(lambda x, y: x and y, other)\n\n def __or__(self, other):\n return self._binary_operation(lambda x, y: x or y, other)\n\n def __xor__(self, other):\n return self._binary_operation(lambda x, y: x ^ y, other)\n\n def __invert__(self):\n return MentOutcome(lambda x: not self._outcome(x))"
}
] | from typing import Optional, Union, Callable
from .gates import PauliX, PauliY, PauliZ
from .ment import Ment, MentOutcome
import numpy as np
import warnings | 2,653 | # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""Controlled measurement operator."""
class ControlMent(Ment):
def __init__(
self,
| # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""Controlled measurement operator."""
class ControlMent(Ment):
def __init__(
self, | condition: Optional[Union[bool, MentOutcome]] = None, | 2 | 2023-10-18 18:29:42+00:00 | 4k |
rnag/cert-hero | tests/integration/test_cert_hero.py | [
{
"identifier": "cert_please",
"path": "cert_hero/cert_hero.py",
"snippet": "def cert_please(hostname: str,\n context: ssl.SSLContext = None,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n default_encoding='latin-1',\n ) -> CertHero[str, str | int | dict[str, str | bool]] | None:\n \"\"\"\n Retrieve the SSL certificate for a given ``hostname`` - works even\n in the case of expired or self-signed certificates.\n\n Usage:\n\n >>> import cert_hero\n >>> cert = cert_hero.cert_please('google.com')\n >>> cert.not_after_date\n datetime.date(2023, 10, 28)\n >>> f'Cert is Valid Till: {cert.not_after_date.isoformat()}'\n 'Cert is Valid Till: 2023-10-28'\n >>> cert\n CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"753DD6FF20CB1B4510CB4C1EA27DA2EB\",\n \"Subject Name\": {\n \"Common Name\": \"*.google.com\"\n },\n \"Issuer Name\": {\n \"Country\": \"US\",\n \"State/Province\": \"California\",\n \"Organization\": \"Zscaler Inc.\",\n \"Organization Unit\": \"Zscaler Inc.\",\n \"Common Name\": \"Zscaler Intermediate Root CA (zscalerthree.net) (t) \"\n },\n \"Validity\": {\n \"Not After\": \"2023-10-28\",\n \"Not Before\": \"2023-10-14\"\n },\n \"Wildcard\": true,\n \"Signature Algorithm\": \"SHA256WITHRSA\",\n \"Key Algorithm\": \"RSA-2048\",\n \"Subject Alt Names\": [\n \"*.google.com\",\n \"*.appengine.google.com\",\n \"youtu.be\",\n \"*.youtube.com\",\n ...\n ],\n \"Location\": \"https://www.google.com/\",\n \"Status\": 301\n }\n )\n >>> cert_hero.set_expired(cert)\n >>> cert['Validity']\n {'Not After': '2023-10-28', 'Not Before': '2023-10-14', 'Expired': False}\n\n\n Rationale:\n\n The builtin Python module ``ssl`` can be used to retrieve a certificate from a server via ``getpeercert``,\n but it'll work only if the certificate of interest can be successfully verified (source_).\n\n If, for any reason, verification fails, like, for example, with expired or a `self-signed certificate`_,\n we'll get ``ssl.SSLCertVerificationError`` instead of the requested info.\n\n We can work around this by asking for the certificate in the binary form:\n\n getpeercert(binary_form=True)\n\n But now we have to convert it, and thus we can use a third party ``asn1crypto`` module, instead of\n the (bulkier) ``cryptography`` module.\n\n Additionally, if the host **redirects** the client to another URL, this info is\n captured in the ``Location`` and ``Status`` fields.\n\n .. _source: https://stackoverflow.com/a/74349032/10237506\n .. _self-signed certificate: https://stackoverflow.com/a/68889470/10237506\n\n :param hostname: Host (or server) to retrieve SSL Certificate for\n :param context: (Optional) Shared SSL Context\n :param user_agent: A custom *user agent* to use for the HTTP call to retrieve ``Location`` and ``Status``.\n Defaults to ``python-requests/{version}``, or a random *user agent* if the ``fake_useragent`` module\n is installed (via the ``fake-ua``\n `extra <https://packaging.python.org/en/latest/tutorials/installing-packages/#installing-extras>`__).\n :param default_encoding: Encoding used to decode bytes for the HTTP call to retrieve ``Location``\n and ``Status``. Defaults to ``latin-1`` (or ISO-8859-1).\n\n \"\"\"\n if context is None:\n context = create_ssl_context()\n\n # with socket.create_connection()\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.settimeout(3)\n with context.wrap_socket(\n sock, server_hostname=hostname\n ) as wrap_socket:\n wrap_socket.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1\n )\n\n wrap_socket.connect((hostname, 443))\n\n # get certificate\n cert_bin: bytes = wrap_socket.getpeercert(True) # type: ignore\n\n # use custom `user_agent` if passed in, else:\n # * use a random \"user agent\", if the `fake_useragent` module is installed,\n # else use the default \"user agent\" (python-requests)\n if not user_agent:\n user_agent = get_user_agent()\n\n LOG.debug('User Agent: %s', user_agent)\n\n headers = (\n f'GET / HTTP/1.0\\r\\n'\n f'Host: {hostname}\\r\\n'\n f'User-Agent: {user_agent}\\r\\n'\n 'Accept-Encoding: gzip, deflate\\r\\n'\n 'Accept: */*\\r\\n'\n '\\r\\n'\n )\n # print(\"\\n\\n\" + headers)\n\n wrap_socket.send(headers.encode()) # send request\n\n data = bytes()\n while True:\n this_data = wrap_socket.recv(512)\n if not this_data:\n break\n data += this_data\n\n # Latin-1 (or ISO-8859-1) is a safe default: it will always\n # decode any bytes (though the result may not be useful).\n response = data.decode(default_encoding)\n\n # Get the first line (the \"status line\")\n # Ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Messages\n status_line = response.split('\\n', 1)[0]\n\n # HTTP/1.1 301 Moved Permanently\n try:\n status_code = int(status_line.split(' ', 2)[1])\n except (ValueError, TypeError):\n status_code = None\n\n # print(response) # print receive response\n\n loc = None\n if (loc_start := response.find('\\nLocation: ')) != -1:\n loc = response[loc_start + 11:].split('\\r\\n', maxsplit=1)[\n 0\n ]\n except socket.gaierror as e:\n # curl: (6) Could not resolve host: <hostname>\n if e.errno == 8:\n # [Errno 8] nodename nor servname provided, or not known\n LOG.error(f'gaierror: could not resolve host. {hostname=}')\n ...\n else:\n LOG.error(f'{e.__class__.__name__}: {e}. {hostname=}')\n return None\n except ssl.SSLEOFError:\n # SSL/TLS connection terminated abruptly.\n # message: \"EOF occurred in violation of protocol\"\n # this could indicate bad cert or website is down\n LOG.error(f'SSLEOFError: bad cert. {hostname=}')\n return None\n except ssl.SSLError as e:\n #\n LOG.error(f'{e.__class__.__name__}: {e}. {hostname=}')\n return None\n # except socket.error as e:\n # print(f'{e.__class__.__name__}: Error for {hostname}: {e}')\n # return None\n except Exception as e:\n LOG.error(f'{e.__class__.__name__}: General Error - {e}. {hostname=}')\n return None\n else:\n _cert: Certificate = Certificate.load(cert_bin)\n\n # print(_cert)\n # print(dumps(_cert.native, default=str))\n # print(_cert.self_signed)\n\n # print(dict(_cert.subject.native))\n # print(dict(_cert.issuer.native))\n # pprint(_cert.native)\n # print(_cert.subject_alt_name_value.native)\n\n cert_info = CertHero(\n {\n 'Cert Status': 'SUCCESS',\n 'Serial': format(_cert.serial_number, 'X'),\n 'Subject Name': (\n subject := {\n KEY_MAP.get(k, k): v\n for k, v in _cert.subject.native.items()\n }\n ),\n 'Issuer Name': {\n KEY_MAP.get(k, k): v for k, v in _cert.issuer.native.items()\n },\n 'Validity': {\n 'Not After': (\n not_after_date := _cert.not_valid_after.date()\n ).isoformat(),\n 'Not Before': (\n not_before_date := _cert.not_valid_before.date()\n ).isoformat(),\n },\n 'Wildcard': subject.get('Common Name', '').startswith('*'),\n 'Signature Algorithm': _sig_algo(_cert),\n 'Key Algorithm': _key_algo(_cert),\n }\n )\n\n cert_info._not_after_date = not_after_date\n cert_info._not_before_date = not_before_date\n\n if subj_alt_names := _cert.subject_alt_name_value.native:\n cert_info['Subject Alt Names'] = subj_alt_names\n\n if loc:\n cert_info['Location'] = loc\n\n if status_code:\n cert_info['Status'] = status_code\n\n return cert_info"
},
{
"identifier": "certs_please",
"path": "cert_hero/cert_hero.py",
"snippet": "def certs_please(\n hostnames: list[str] | tuple[str] | set[str],\n context: ssl.SSLContext = None,\n num_threads: int = 25,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n) -> dict[str, CertHero]:\n \"\"\"\n Retrieve (concurrently) the SSL certificate(s) for a list of ``hostnames`` - works\n even in the case of expired or self-signed certificates.\n\n Usage:\n\n >>> import cert_hero, json\n >>> host_to_cert = cert_hero.certs_please(['google.com', 'cnn.com', 'www.yahoo.co.in', 'youtu.be'])\n >>> cert_hero.set_expired(host_to_cert)\n >>> host_to_cert\n {'google.com': CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"753DD6FF20CB1B4510CB4C1EA27DA2EB\",\n ...\n }\n ), 'cnn.com': CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"7F2F3E5C350554D71A6784CCFE6E8315\",\n ...\n }\n ), ...\n }\n >>> json.dumps(host_to_cert)\n {\"google.com\": {\"Cert Status\": \"SUCCESS\", ...}, \"cnn.com\": {\"Cert Status\": \"SUCCESS\", ...}, ...}\n\n :param hostnames: List of hosts to retrieve SSL Certificate(s) for\n :param context: (Optional) Shared SSL Context\n :param num_threads: Max number of concurrent threads\n :param user_agent: A custom *user agent* to use for the HTTP call to retrieve ``Location`` and ``Status``.\n Defaults to ``python-requests/{version}``, or a random *user agent* if the ``fake_useragent`` module\n is installed (via the ``fake-ua``\n `extra <https://packaging.python.org/en/latest/tutorials/installing-packages/#installing-extras>`__).\n :return: A mapping of ``hostname`` to the SSL Certificate (e.g. :class:`CertHero`) for that host\n\n \"\"\"\n\n if context is None:\n context = create_ssl_context()\n\n if num_hosts := len(hostnames):\n # We can use a with statement to ensure threads are cleaned up promptly\n with ThreadPoolExecutor(\n max_workers=min(num_hosts, num_threads)\n ) as pool:\n _host_to_cert = {\n # TODO: Update to remove `or` once we finalize how to handle missing certs\n host: cert_info or _build_failed_cert('TIMED_OUT')\n for host, cert_info in zip(\n hostnames,\n pool.map(\n cert_please,\n hostnames,\n repeat(context),\n repeat(user_agent),\n ),\n )\n }\n else:\n _host_to_cert = {}\n\n return _host_to_cert"
},
{
"identifier": "set_expired",
"path": "cert_hero/cert_hero.py",
"snippet": "def set_expired(certs: CertHero\n | dict[str, str | int | dict[str, str | bool]]\n | dict[str, CertHero]\n | dict[str, dict[str, str | int | dict[str, str | bool]]]\n | Iterable[CertHero]\n | Iterable[dict[str, str | int | dict[str, str | bool]]]\n | None,\n _date_from_iso_str=date.fromisoformat) -> None:\n \"\"\"\n Set or update the value for ``Validity > Expired`` (:type:`bool`) on\n each cert in a response from :func:`cert_please()` or :func:`certs_please()`,\n or a serialized version thereof (e.g. ``json.dumps`` > ``json.loads``).\n\n Example Usage::\n\n >>> from cert_hero import cert_please, set_expired\n >>> cert = cert_please('google.com')\n >>> assert 'Expired' not in cert['Validity']\n >>> set_expired(cert)\n >>> assert 'Expired' in cert['Validity']\n\n \"\"\"\n if not certs:\n return\n\n # cert_please(): given a `CertHero` (or `CertHero`-like) object\n if 'Serial' in certs:\n certs = [certs]\n # certs_please(): given a mapping of `hostname` to `CertHero` (or `CertHero`-like) object\n elif values_fn := getattr(certs, 'values', None):\n certs = values_fn()\n\n today = datetime.utcnow().date()\n\n for _cert in certs:\n if _cert:\n if _validity := _cert.get('Validity'):\n # Use cached attribute `not_after_date` if available (CertHero),\n # else we calculate it on the fly in case of a `dict`.\n not_after_date: date = getattr(_cert, '_not_after_date', None) \\\n or _date_from_iso_str(_validity['Not After'])\n # Set the `Validity > Expired` value (bool)\n _validity['Expired'] = not_after_date < today"
}
] | import json
from cert_hero import cert_please, certs_please, set_expired | 3,566 |
def test_cert_please():
cert = cert_please('google.com')
print('Cert is Valid Till:', cert.not_after_date.isoformat())
# To get the output as a JSON string, use `str(cert)` or remove `!r` from below
print(f'Cert -> \n{cert!r}')
assert cert['Subject Name']['Common Name'] == '*.google.com'
|
def test_cert_please():
cert = cert_please('google.com')
print('Cert is Valid Till:', cert.not_after_date.isoformat())
# To get the output as a JSON string, use `str(cert)` or remove `!r` from below
print(f'Cert -> \n{cert!r}')
assert cert['Subject Name']['Common Name'] == '*.google.com'
| set_expired(cert) | 2 | 2023-10-16 19:02:05+00:00 | 4k |
KosinskiLab/pyTME | tme/tests/test_parser.py | [
{
"identifier": "Parser",
"path": "tme/parser.py",
"snippet": "class Parser(ABC):\n \"\"\"\n Base class for structure file parsers.\n\n Classes inheriting from :py:class:`Parser` need to define\n a ``parse_input`` method that accepts a list of lines and returns a\n dictionary representation of the data.\n \"\"\"\n\n def __init__(self, filename: str, mode: str = \"r\") -> None:\n \"\"\"\n Initialize a Parser object.\n\n Parameters\n ----------\n filename : str\n File name to parse data from.\n\n mode : str, optional\n Mode to open the file. Default is 'r' for read.\n \"\"\"\n with open(filename, \"r\") as infile:\n data = infile.read()\n\n data = deque(filter(lambda line: line and line[0] != \"#\", data.split(\"\\n\")))\n self._data = self.parse_input(data)\n\n def __getitem__(self, key: str):\n \"\"\"\n Retrieve a value from the internal data using a given key.\n\n Parameters\n ----------\n key : str\n The key to use for retrieving the corresponding value from\n the internal data.\n\n Returns\n -------\n value\n The value associated with the provided key in the internal data.\n \"\"\"\n return self._data[key]\n\n def __contains__(self, key) -> bool:\n \"\"\"\n Check if a given key exists in the internal data.\n\n Parameters\n ----------\n key : str\n The key to check for in the internal data.\n\n Returns\n -------\n bool\n True if the key exists in the internal data, False otherwise.\n \"\"\"\n return key in self._data\n\n def get(self, key, default):\n \"\"\"\n Retrieve a value from the internal data using a given key. If the\n key does not exist, return a default value.\n\n Parameters\n ----------\n key : str\n The key to use for retrieving the corresponding value from\n the internal data.\n\n default : Any\n The value to return if the key does not exist in the internal data.\n\n Returns\n -------\n value\n The value associated with the provided key in the internal data,\n or the default value if the key does not exist.\n \"\"\"\n if key in self._data:\n return self[key]\n return default\n\n def keys(self):\n \"\"\"\n List keys available in internal dictionary.\n \"\"\"\n return self._data.keys()\n\n def values(self):\n \"\"\"\n List values available in internal dictionary.\n \"\"\"\n return self._data.values()\n\n def items(self):\n \"\"\"\n List items available in internal dictionary.\n \"\"\"\n return self._data.items()\n\n @abstractmethod\n def parse_input(self, lines: List[str]) -> Dict:\n \"\"\"\n Parse a list of lines from a file and convert the data into a dictionary.\n\n This function is not intended to be called directly, but should rather be\n defined by classes inheriting from :py:class:`Parser` to parse a given\n file format.\n\n Parameters\n ----------\n lines : list of str\n The lines of a structure file to parse.\n\n Returns\n -------\n dict\n A dictionary containing the parsed data.\n \"\"\""
},
{
"identifier": "PDBParser",
"path": "tme/parser.py",
"snippet": "class PDBParser(Parser):\n \"\"\"\n A Parser subclass for converting PDB file data into a dictionary representation.\n This class is specifically designed to work with PDB file format.\n\n References\n ----------\n .. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html\n \"\"\"\n\n def parse_input(self, lines: List[str]) -> Dict:\n \"\"\"\n Parse a list of lines from a PDB file and convert the data into a dictionary.\n\n Parameters\n ----------\n lines : list of str\n The lines of a PDB file to parse.\n\n Returns\n -------\n dict\n A dictionary containing the parsed data from the PDB file.\n \"\"\"\n metadata = {\n \"resolution\": re.compile(\n r\"(.)+?(EFFECTIVE RESOLUTION\\s+\\(ANGSTROMS\\)){1}(.)+?(\\d+\\.\\d+)(\\s)*$\"\n ),\n \"reconstruction_method\": re.compile(\n r\"(.)+?(RECONSTRUCTION METHOD)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"\n ),\n \"electron_source\": re.compile(r\"(.)+?(SOURCE)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"),\n \"illumination_mode\": re.compile(\n r\"(.)+?(ILLUMINATION MODE)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"\n ),\n \"microscope_mode\": re.compile(\n r\"(.)+?(IMAGING MODE)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"\n ),\n \"microscope_model\": re.compile(\n r\"(.)+?(MICROSCOPE MODEL)+(.+?:\\s+)+?(.+)(\\s)*$\"\n ),\n }\n\n data = {\n \"record_type\": [],\n \"atom_serial_number\": [],\n \"atom_name\": [],\n \"alternate_location_indicator\": [],\n \"residue_name\": [],\n \"chain_identifier\": [],\n \"residue_sequence_number\": [],\n \"code_for_residue_insertion\": [],\n \"atom_coordinate\": [],\n \"occupancy\": [],\n \"temperature_factor\": [],\n \"segment_identifier\": [],\n \"element_symbol\": [],\n \"charge\": [],\n \"details\": {},\n }\n data[\"details\"][\"resolution\"] = np.nan\n\n for line in lines:\n if line.startswith(\"REMARK\"):\n matches = [(key, metadata[key].match(line)) for key in metadata]\n matches = [match for match in matches if match[1]]\n for key, match in matches:\n data[\"details\"][key] = match.group(4)\n _ = metadata.pop(key)\n elif line.startswith(\"ATOM\") or line.startswith(\"HETATM\"):\n data[\"record_type\"].append(line[0:6])\n data[\"atom_serial_number\"].append(line[6:11])\n data[\"atom_name\"].append(line[12:16])\n data[\"alternate_location_indicator\"].append(line[16])\n data[\"residue_name\"].append(line[17:20])\n\n data[\"chain_identifier\"].append(line[21])\n data[\"residue_sequence_number\"].append(line[22:26])\n data[\"code_for_residue_insertion\"].append(line[26])\n data[\"atom_coordinate\"].append((line[30:38], line[38:46], line[46:54]))\n data[\"occupancy\"].append(line[54:60])\n data[\"temperature_factor\"].append(line[60:66])\n data[\"segment_identifier\"].append(line[74:76])\n data[\"element_symbol\"].append(line[76:78])\n data[\"charge\"].append(line[78:80])\n\n data[\"details\"][\"resolution\"] = float(data[\"details\"][\"resolution\"])\n\n return data"
}
] | import pytest
from tme.parser import Parser, PDBParser | 1,807 |
class TestParser:
def setup_method(self):
self.pdb_file = "./tme/tests/data/Structures/5khe.pdb"
def teardown_method(self):
self.pdb_file = None
def test_initialize_parser_error(self):
with pytest.raises(TypeError):
|
class TestParser:
def setup_method(self):
self.pdb_file = "./tme/tests/data/Structures/5khe.pdb"
def teardown_method(self):
self.pdb_file = None
def test_initialize_parser_error(self):
with pytest.raises(TypeError): | _ = Parser(self.pdb_file) | 0 | 2023-10-20 13:46:01+00:00 | 4k |
hookla/DreamTeamGPT | dream_team_gpt/meeting.py | [
{
"identifier": "Chairman",
"path": "dream_team_gpt/agents/chairman.py",
"snippet": "class Chairman(Agent):\n def __init__(self, client_factory: Callable, executives: list[SME], name: str = \"Chairman\"):\n # Construct the user_prompt string with details of the executives\n self.user_prompt = self.update_user_prompt(executives)\n\n system_prompt = f\"Answer with only the name and nothing else.\"\n\n # Call the superclass constructor with the constructed user_prompt\n super().__init__(client_factory, name, self.user_prompt, system_prompt)\n\n self.executives = executives\n\n @staticmethod\n def update_user_prompt(SMEs: list[SME]) -> str:\n frequency_info_list = []\n for sme in SMEs:\n frequency_info_list.append(\n f\"{sme.name}: expertise: {sme.expertise}. \"\n f\"concerns: {', '.join(sme.concerns)}. spoken count: {sme.spoken_count}.\\n\"\n )\n\n return (\n f\"Your task is to read the transcript and decide who should speak next. \"\n f\"Do not choose the same person all of the time.\\n\"\n f\"Participants:\\n{''.join(frequency_info_list)} \"\n )\n\n def decide_if_meeting_over(self, transcript: str) -> bool:\n return False\n\n def decide_next_speaker(self, transcript: str) -> SME:\n while True:\n next_speaker = self.query_gpt(transcript).strip().rstrip(\".\")\n logger.info(f\"Chairman called speaker: {next_speaker}\")\n\n next_executive = next(\n (exec for exec in self.executives if exec.name == next_speaker), None\n )\n\n if next_executive is not None:\n next_executive.spoken_count += 1 # Update the frequency count\n self.user_prompt = self.update_user_prompt(self.executives)\n self.client.user_prompt = self.user_prompt\n return next_executive\n\n logger.info(f\"{next_speaker} is not a valid exec...\")"
},
{
"identifier": "SME",
"path": "dream_team_gpt/agents/sme.py",
"snippet": "class SME(Agent):\n def __init__(self, client_factory: Callable, name: str, expertise: str, concerns: list[str]):\n # Construct the user_prompt string\n user_prompt = USER_PROMPT_TEMPLATE.format(\n name=name, expertise=expertise, concerns=\", \".join(concerns)\n )\n\n # Call the superclass constructor with the constructed user_prompt\n super().__init__(client_factory, name, user_prompt)\n self.expertise = expertise\n self.concerns = concerns\n self.spoken_count = 0\n\n def opinion(self, transcript: str) -> str:\n return self.query_gpt(transcript)"
},
{
"identifier": "IdeaRefiner",
"path": "dream_team_gpt/agents/idea_refiner.py",
"snippet": "class IdeaRefiner(Agent):\n def __init__(self, client_factory: Callable, name: str = \"Refiner\"):\n # Call the superclass constructor with the constructed user_prompt\n super().__init__(client_factory, name, REFINER_PROMPT)\n\n def refine_idea(self, idea: str) -> str:\n return self.query_gpt(idea)"
},
{
"identifier": "AIClientConfig",
"path": "dream_team_gpt/clients/config.py",
"snippet": "class AIClientConfig:\n client_type: AIClientType\n api_key: str\n model: Models | None"
},
{
"identifier": "AIClientType",
"path": "dream_team_gpt/clients/config.py",
"snippet": "class AIClientType(str, Enum):\n ChatGPT = \"ChatGPT\""
},
{
"identifier": "ai_client_factory",
"path": "dream_team_gpt/clients/get_client.py",
"snippet": "def ai_client_factory(config: AIClientConfig) -> Callable[[Any], AIClient]:\n return lambda _: get_ai_client(config)"
},
{
"identifier": "Models",
"path": "dream_team_gpt/clients/gpt_client.py",
"snippet": "class Models(str, Enum):\n GPT3 = \"gpt-3.5-turbo\"\n GPT4 = \"gpt-4\""
},
{
"identifier": "DEFAULT_SME_DICT",
"path": "dream_team_gpt/constants/default_sme.py",
"snippet": "DEFAULT_SME_DICT = (\n {\n \"name\": \"CEO\",\n \"expertise\": \"Corporate Strategy\",\n \"concerns\": [\"Market Entry\", \"Competitive Positioning\"],\n },\n {\n \"name\": \"CFO\",\n \"expertise\": \"Financial Products\",\n \"concerns\": [\"Rate Management\", \"Regulatory Compliance\"],\n },\n {\n \"name\": \"COO\",\n \"expertise\": \"Operational Efficiency\",\n \"concerns\": [\"Scalability\", \"Cost Optimization\"],\n },\n {\n \"name\": \"CMO\",\n \"expertise\": \"Customer Acquisition\",\n \"concerns\": [\"Target Market\", \"Onboarding Experience\"],\n },\n {\n \"name\": \"CTO\",\n \"expertise\": \"Technical Infrastructure\",\n \"concerns\": [\"Data Security\", \"System Integration\"],\n },\n {\n \"name\": \"CRO\",\n \"expertise\": \"Risk Management\",\n \"concerns\": [\"Fraud Detection\", \"Compliance\"],\n },\n {\n \"name\": \"CCO\",\n \"expertise\": \"Customer Experience\",\n \"concerns\": [\"UX/UI Design\", \"Customer Support\"],\n },\n {\n \"name\": \"CPO\",\n \"expertise\": \"Product Management\",\n \"concerns\": [\"Feature Rollout\", \"Customer Feedback\"],\n },\n)"
},
{
"identifier": "NO_COMMENT",
"path": "dream_team_gpt/constants/strings.py",
"snippet": "NO_COMMENT = \"NO COMMENT\""
},
{
"identifier": "print_with_wrap",
"path": "dream_team_gpt/utils/print_with_wrap.py",
"snippet": "def print_with_wrap(text: str, wrap_length: int = 180) -> None:\n lines = text.split(\"\\n\")\n for line in lines:\n wrapped_text = textwrap.wrap(line, wrap_length)\n for segment in wrapped_text:\n print(segment)"
},
{
"identifier": "parse_yaml_config",
"path": "dream_team_gpt/utils/parse_config.py",
"snippet": "def parse_yaml_config(file_path: Path) -> list[dict]:\n logger.info(f\"Loading SMEs config file: {file_path}\")\n data = read_yaml(file_path)\n\n items = []\n for item in data:\n item_dict = {\n \"name\": item[\"name\"],\n \"expertise\": item[\"expertise\"],\n \"concerns\": item[\"concerns\"],\n }\n items.append(item_dict)\n\n return items"
}
] | from dataclasses import dataclass, field
from pathlib import Path
from textwrap import dedent
from loguru import logger
from dream_team_gpt.agents import SME, Chairman
from dream_team_gpt.agents.idea_refiner import IdeaRefiner
from dream_team_gpt.clients import AIClientConfig, AIClientType, Models, ai_client_factory
from dream_team_gpt.constants import DEFAULT_SME_DICT, NO_COMMENT
from dream_team_gpt.utils import parse_yaml_config, print_with_wrap
import os | 1,897 |
@dataclass
class Transcript(str):
idea: str
refined_idea: str = None
opinions: list[str] = field(default_factory=list)
def __str__(self) -> str:
opinions = "\n".join(opinion for opinion in self.opinions)
return dedent(
f"""\
We are here to discuss the following idea:
{self.refined_idea if self.refined_idea else self.idea}
{opinions if opinions else ""}"""
)
def add_opinion(self, opinion: str) -> None:
self.opinions.append(opinion)
def __add__(self, other: str) -> "Transcript":
if not isinstance(other, str):
raise ValueError("Only can add string opinion to Transcript")
self.add_opinion(other)
return self
@dataclass
class Meeting:
idea: str
config: Path = None
def __post_init__(self) -> None:
"""Create agents"""
client_factory = ai_client_factory(
AIClientConfig(
client_type=AIClientType.ChatGPT,
model=Models.GPT4,
api_key=os.environ["openai.api_key"],
)
)
if self.config:
sme_dict = parse_yaml_config(self.config)
else:
sme_dict = DEFAULT_SME_DICT
|
@dataclass
class Transcript(str):
idea: str
refined_idea: str = None
opinions: list[str] = field(default_factory=list)
def __str__(self) -> str:
opinions = "\n".join(opinion for opinion in self.opinions)
return dedent(
f"""\
We are here to discuss the following idea:
{self.refined_idea if self.refined_idea else self.idea}
{opinions if opinions else ""}"""
)
def add_opinion(self, opinion: str) -> None:
self.opinions.append(opinion)
def __add__(self, other: str) -> "Transcript":
if not isinstance(other, str):
raise ValueError("Only can add string opinion to Transcript")
self.add_opinion(other)
return self
@dataclass
class Meeting:
idea: str
config: Path = None
def __post_init__(self) -> None:
"""Create agents"""
client_factory = ai_client_factory(
AIClientConfig(
client_type=AIClientType.ChatGPT,
model=Models.GPT4,
api_key=os.environ["openai.api_key"],
)
)
if self.config:
sme_dict = parse_yaml_config(self.config)
else:
sme_dict = DEFAULT_SME_DICT | self.smes = [SME(client_factory=client_factory, **d) for d in sme_dict] | 1 | 2023-10-18 22:45:50+00:00 | 4k |
MeetingAgent/MeetingAgent-Core | meeting_buddy.py | [
{
"identifier": "MyTTS",
"path": "voice_cloning/clone.py",
"snippet": "class MyTTS:\n def __init__(self):\n # Get device\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n self.tts = TTS(\"tts_models/en/ljspeech/tacotron2-DDC\")\n self.use_default_speaker = False\n self.speaker_wav = self._get_speaker()\n\n def _get_speaker(self):\n # speaker audio file\n wav_files = glob.glob(\"voice_cloning/audio_samples/*.wav\")\n print(\"WAV FILES: \", wav_files)\n if wav_files:\n if self.use_default_speaker:\n wav_file = \"voice_cloning/audio_samples/default_audio.wav\"\n else: \n wav_file = wav_files[0] if wav_files[0] != \"default_audio.wav\" else FileNotFoundError(\"Add your audio.wav to /voice_cloning/audio_samples\")\n\n print(\"WAV FILE: \", wav_file)\n return wav_file\n\n def text_to_speech(self, text, output_file):\n self.tts.tts_with_vc_to_file(\n text,\n speaker_wav=self.speaker_wav,\n file_path=output_file\n )"
},
{
"identifier": "gpt_4_answer",
"path": "meeting_buddy_system/gpt_utils.py",
"snippet": "def gpt_4_answer(\n messages,\n model=\"gpt-4\",\n max_tokens=750,\n temperature=0.6,\n top_p=0.9,\n frequency_penalty=1.2,\n presence_penalty=0.5,\n):\n completion_params = {\n \"model\": model,\n \"messages\": messages,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"frequency_penalty\": frequency_penalty,\n \"presence_penalty\": presence_penalty, \n \"max_tokens\": max_tokens,\n }\n\n response = openai.ChatCompletion.create(**completion_params)\n\n return response[\"choices\"][0][\"message\"][\"content\"]"
},
{
"identifier": "gpt_3_5_turbo_16k_answer",
"path": "meeting_buddy_system/gpt_utils.py",
"snippet": "def gpt_3_5_turbo_16k_answer(\n messages,\n model=\"gpt-3.5-turbo-16k\",\n max_tokens=750,\n temperature=0.6,\n top_p=0.9,\n frequency_penalty=1.2,\n presence_penalty=0.5,\n):\n completion_params = {\n \"model\": model,\n \"messages\": messages,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"frequency_penalty\": frequency_penalty,\n \"presence_penalty\": presence_penalty, \n \"max_tokens\": max_tokens,\n }\n\n response = openai.ChatCompletion.create(**completion_params)\n\n return response[\"choices\"][0][\"message\"][\"content\"]"
},
{
"identifier": "MEETING_BUDDY_MAIN_PROMPT",
"path": "meeting_buddy_system/prompts.py",
"snippet": "MEETING_BUDDY_MAIN_PROMPT = \"\"\"\n<!-->IMPORTANT CONTEXT<--!>\nAn answer should be coherent and include some point form arguments.\n<!-->IMPORTANT CONTEXT<--!>\n\nHere is context for the meeting: {meeting_context}\n\nGiven a question, answer it coherently and several possible points that can be derived from the question.\nIf the question is simple, like an arithmetic question, no need to further explain any detail. Just give the result with a short explanation of how it was achieved it.\n\"\"\""
},
{
"identifier": "EXTRACT_QUERY_PROMPT",
"path": "meeting_buddy_system/prompts.py",
"snippet": "EXTRACT_QUERY_PROMPT = \"\"\"\nGiven some input text, extract a query from the text. You are to do this in the language of the text. \nIf no query exists, interpret the text as is and see if a question can be captured from it.\n\"\"\""
}
] | import pyaudio
import wave
import whisper
import threading
import time
import pygame
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.switch import Switch
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.uix.textinput import TextInput
from kivy.core.window import Window
from kivy.support import install_twisted_reactor
from gtts import gTTS
from pydub import AudioSegment
from ftlangdetect import detect
from voice_cloning.clone import MyTTS
from meeting_buddy_system.gpt_utils import gpt_4_answer, gpt_3_5_turbo_16k_answer
from meeting_buddy_system.prompts import MEETING_BUDDY_MAIN_PROMPT, EXTRACT_QUERY_PROMPT | 2,694 | play_audio('meeting_buddy_audio/output.mp3')
else:
# Update the answer text without text-to-speech
Clock.schedule_once(lambda dt: app.update_answer_text(aggregated_text))
return query, answer
def meeting_buddy(meeting_context: str) -> None:
global audio_thread
audio_thread = threading.Thread(target=get_audio)
audio_thread.start()
audio_thread.join()
input_text = whisper_process_audio("meeting_buddy_audio/audio.wav")
question, answer = gpt_pipeline(meeting_context=meeting_context, input_text=input_text)
print(f"Question: {question}")
print(f"Answer: {answer}")
Window.size = (800, 600)
class MeetingBuddyApp(App):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.audio_thread = None
self.context_input = TextInput(
hint_text='Paste your meeting notes here',
multiline=True,
size_hint=(1, 0.2),
font_size='20sp',
background_color=[0, 0, 0, 1],
foreground_color=[1, 1, 1, 1]
)
self.tts = None
def on_start(self):
self.load_tts_model()
def build(self):
self.answer_output = TextInput(
text='',
multiline=True,
size_hint=(1, 0.6),
font_size='20sp',
readonly=True,
background_color=[0, 0, 0, 1],
foreground_color=[1, 1, 1, 1]
)
start_button = Button(
text='Start Recording',
on_release=self.start_meeting_buddy,
size_hint=(1, 0.1),
font_size='20sp'
)
stop_button_layout = BoxLayout(orientation='vertical', spacing=10, size_hint=(1, 0.3))
stop_button = Button(
text='Stop Recording',
on_release=self.stop_recording,
size_hint=(1, 0.1),
font_size='20sp'
)
switch_layout = BoxLayout(
orientation='horizontal',
spacing=10,
size_hint=(None, None),
size=(200, 175),
pos_hint={'center_x': 0.5}
)
tts_label = Label(
text='Text to Speech:',
size_hint=(None, None),
size=(0, 200)
)
self.tts_switch = Switch(size_hint=(None, None), size=(400, 200))
switch_layout.add_widget(tts_label)
switch_layout.add_widget(self.tts_switch)
stop_button_layout.add_widget(stop_button)
stop_button_layout.add_widget(switch_layout)
layout = BoxLayout(orientation='vertical', spacing=10, padding=10)
layout.add_widget(self.context_input)
layout.add_widget(start_button)
layout.add_widget(stop_button_layout)
layout.add_widget(self.answer_output)
return layout
def update_answer_text(self, text):
self.answer_output.text = f'{text}'
def start_meeting_buddy(self, instance):
global app
app = self
meeting_context = self.context_input.text
global audio_thread
audio_thread = threading.Thread(target=meeting_buddy, args=(meeting_context,))
audio_thread.start()
stop_audio_playback()
def stop_recording(self, instance):
stop_audio()
if self.audio_thread is not None:
self.audio_thread.join()
Clock.schedule_once(self.delayed_update, 1)
def delayed_update(self, dt):
self.update_answer_text("Getting answer...")
def load_tts_model(self):
| # Audio Processing
# GUI
install_twisted_reactor()
# gtts text to speech
# personalized voice text to speech
# Local
recording = False
audio_thread = None
def get_audio() -> None:
global recording
recording = True
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024)
frames = []
try:
print("Recording...")
while recording:
data = stream.read(1024)
frames.append(data)
print("Finished recording.")
finally:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open('meeting_buddy_audio/input_audio.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(44100)
wf.writeframes(b''.join(frames))
wf.close()
def stop_audio() -> None:
global recording
recording = False
def whisper_process_audio(audio_file: str) -> str:
model = whisper.load_model("base") # for multilingual
result = model.transcribe(audio_file)
return result["text"]
def detect_language(text: str) -> str:
cleaned_text = text.replace('\n', ' ')
return detect(text=cleaned_text, low_memory=True)
def gtts_text_to_speech(text: str, output_file='meeting_buddy_audio/output.mp3') -> None:
language = detect_language(text=text)["lang"]
tts = gTTS(text=text, lang=language, slow=False)
tts.save(output_file)
print(f'Audio saved as {output_file}')
def voice_clone_text_to_speech(text: str, output_file='meeting_buddy_audio/output.wav') -> None:
app.tts.text_to_speech(text, output_file)
print(f'Audio saved as {output_file}')
# initialize mixer
pygame.mixer.init()
def play_audio(file_path):
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
def stop_audio_playback():
pygame.mixer.music.stop()
def gpt_pipeline(meeting_context: str, input_text: str) -> str:
"""
Extract query from text and produce the final answer to query.
"""
print("\n\n\n###### EXTRACTING QUERY FROM TEXT ######\n\n\n")
messages = [{"role": "system", "content": EXTRACT_QUERY_PROMPT}, {"role": "user", "content": input_text}]
query = gpt_3_5_turbo_16k_answer(messages=messages)
full_query_text = f"Extracted Query: {query}"
print("\n\n\n###### FINISHED EXTRACTING QUERY FROM TEXT ######\n\n\n")
print("\n\n\n###### RESPONDING TO QUERY ######\n\n\n")
messages = [{"role": "system", "content": MEETING_BUDDY_MAIN_PROMPT.format(meeting_context=meeting_context)}, {"role": "user", "content": query}]
answer = gpt_4_answer(messages=messages)
full_answer_text = f"Answer: {answer}"
print("\n\n\n###### RESPONDED TO QUERY ######\n\n\n")
aggregated_text = full_query_text + "\n\n" + full_answer_text
if app.tts_switch.active:
try:
print("\n\n###### GETTING TTS TEXT TO SPEECH RESPONSE ######\n\n")
# getting custom voice text to speech response
voice_clone_text_to_speech(answer)
Clock.schedule_once(lambda dt: app.update_answer_text(aggregated_text))
play_audio('meeting_buddy_audio/output.wav')
except:
print("\n\n###### GETTING GTTS TEXT TO SPEECH RESPONSE ######\n\n")
# getting gtts text to speech response
gtts_text_to_speech(answer)
Clock.schedule_once(lambda dt: app.update_answer_text(aggregated_text))
play_audio('meeting_buddy_audio/output.mp3')
else:
# Update the answer text without text-to-speech
Clock.schedule_once(lambda dt: app.update_answer_text(aggregated_text))
return query, answer
def meeting_buddy(meeting_context: str) -> None:
global audio_thread
audio_thread = threading.Thread(target=get_audio)
audio_thread.start()
audio_thread.join()
input_text = whisper_process_audio("meeting_buddy_audio/audio.wav")
question, answer = gpt_pipeline(meeting_context=meeting_context, input_text=input_text)
print(f"Question: {question}")
print(f"Answer: {answer}")
Window.size = (800, 600)
class MeetingBuddyApp(App):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.audio_thread = None
self.context_input = TextInput(
hint_text='Paste your meeting notes here',
multiline=True,
size_hint=(1, 0.2),
font_size='20sp',
background_color=[0, 0, 0, 1],
foreground_color=[1, 1, 1, 1]
)
self.tts = None
def on_start(self):
self.load_tts_model()
def build(self):
self.answer_output = TextInput(
text='',
multiline=True,
size_hint=(1, 0.6),
font_size='20sp',
readonly=True,
background_color=[0, 0, 0, 1],
foreground_color=[1, 1, 1, 1]
)
start_button = Button(
text='Start Recording',
on_release=self.start_meeting_buddy,
size_hint=(1, 0.1),
font_size='20sp'
)
stop_button_layout = BoxLayout(orientation='vertical', spacing=10, size_hint=(1, 0.3))
stop_button = Button(
text='Stop Recording',
on_release=self.stop_recording,
size_hint=(1, 0.1),
font_size='20sp'
)
switch_layout = BoxLayout(
orientation='horizontal',
spacing=10,
size_hint=(None, None),
size=(200, 175),
pos_hint={'center_x': 0.5}
)
tts_label = Label(
text='Text to Speech:',
size_hint=(None, None),
size=(0, 200)
)
self.tts_switch = Switch(size_hint=(None, None), size=(400, 200))
switch_layout.add_widget(tts_label)
switch_layout.add_widget(self.tts_switch)
stop_button_layout.add_widget(stop_button)
stop_button_layout.add_widget(switch_layout)
layout = BoxLayout(orientation='vertical', spacing=10, padding=10)
layout.add_widget(self.context_input)
layout.add_widget(start_button)
layout.add_widget(stop_button_layout)
layout.add_widget(self.answer_output)
return layout
def update_answer_text(self, text):
self.answer_output.text = f'{text}'
def start_meeting_buddy(self, instance):
global app
app = self
meeting_context = self.context_input.text
global audio_thread
audio_thread = threading.Thread(target=meeting_buddy, args=(meeting_context,))
audio_thread.start()
stop_audio_playback()
def stop_recording(self, instance):
stop_audio()
if self.audio_thread is not None:
self.audio_thread.join()
Clock.schedule_once(self.delayed_update, 1)
def delayed_update(self, dt):
self.update_answer_text("Getting answer...")
def load_tts_model(self): | self.tts = MyTTS() | 0 | 2023-10-18 06:50:56+00:00 | 4k |
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja | modules.py | [
{
"identifier": "init_weights",
"path": "commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)"
},
{
"identifier": "piecewise_rational_quadratic_transform",
"path": "transforms.py",
"snippet": "def piecewise_rational_quadratic_transform(\n inputs,\n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None,\n tail_bound=1.0,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE,\n):\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\"tails\": tails, \"tail_bound\": tail_bound}\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet"
},
{
"identifier": "Encoder",
"path": "attentions.py",
"snippet": "class Encoder(nn.Module):\n def __init__(\n self,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size=1,\n p_dropout=0.0,\n window_size=4,\n isflow=True,\n **kwargs\n ):\n super().__init__()\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.window_size = window_size\n # if isflow:\n # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)\n # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)\n # self.cond_layer = weight_norm(cond_layer, name='weight')\n # self.gin_channels = 256\n self.cond_layer_idx = self.n_layers\n if \"gin_channels\" in kwargs:\n self.gin_channels = kwargs[\"gin_channels\"]\n if self.gin_channels != 0:\n self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)\n # vits2 says 3rd block, so idx is 2 by default\n self.cond_layer_idx = (\n kwargs[\"cond_layer_idx\"] if \"cond_layer_idx\" in kwargs else 2\n )\n logging.debug(self.gin_channels, self.cond_layer_idx)\n assert (\n self.cond_layer_idx < self.n_layers\n ), \"cond_layer_idx should be less than n_layers\"\n self.drop = nn.Dropout(p_dropout)\n self.attn_layers = nn.ModuleList()\n self.norm_layers_1 = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n self.norm_layers_2 = nn.ModuleList()\n for i in range(self.n_layers):\n self.attn_layers.append(\n MultiHeadAttention(\n hidden_channels,\n hidden_channels,\n n_heads,\n p_dropout=p_dropout,\n window_size=window_size,\n )\n )\n self.norm_layers_1.append(LayerNorm(hidden_channels))\n self.ffn_layers.append(\n FFN(\n hidden_channels,\n hidden_channels,\n filter_channels,\n kernel_size,\n p_dropout=p_dropout,\n )\n )\n self.norm_layers_2.append(LayerNorm(hidden_channels))\n\n def forward(self, x, x_mask, g=None):\n attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)\n x = x * x_mask\n for i in range(self.n_layers):\n if i == self.cond_layer_idx and g is not None:\n g = self.spk_emb_linear(g.transpose(1, 2))\n g = g.transpose(1, 2)\n x = x + g\n x = x * x_mask\n y = self.attn_layers[i](x, x, attn_mask)\n y = self.drop(y)\n x = self.norm_layers_1[i](x + y)\n\n y = self.ffn_layers[i](x, x_mask)\n y = self.drop(y)\n x = self.norm_layers_2[i](x + y)\n x = x * x_mask\n return x"
}
] | import math
import torch
import commons
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d
from torch.nn.utils import weight_norm, remove_weight_norm
from commons import init_weights, get_padding
from transforms import piecewise_rational_quadratic_transform
from attentions import Encoder | 2,950 | dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(
nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
) | self.convs1.apply(init_weights) | 0 | 2023-10-16 10:04:32+00:00 | 4k |
KaichengGroup/FUSE-Flow | test.py | [
{
"identifier": "NPZDataset",
"path": "data_modules/npz_dataset.py",
"snippet": "class NPZDataset(VisionDataset):\n \"\"\"Load datasets from NPZ files.\n NPZ files are assumed to have 2 files named \"x\" and \"y\"\n that represent the input and target, respectively.\n\n Parameters\n ----------\n root : str\n Root directory of dataset where directory.\n ``celebA.npz`` exists or will be saved to if download is set to True.\n transform : callable, optional\n A function/transform that takes in a Numpy array.\n \"\"\"\n\n def __init__(\n self,\n root: str,\n filename: str,\n transform: Optional[Callable] = None\n ):\n super().__init__(root, transform=transform)\n self.data, self.target = self._load_npz(os.path.join(root, f'{filename}.npz'))\n\n @staticmethod\n def _load_npz(npz_path):\n data = np.load(npz_path)\n input_array = data['x']\n output_array = data['y']\n return input_array, output_array\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n input_image = self.data[index]\n output_image = self.target[index]\n\n if self.transform is not None:\n input_image = self.transform(input_image)\n output_image = self.transform(output_image)\n\n return input_image, output_image\n\n def __len__(self) -> int:\n return len(self.data)"
},
{
"identifier": "load_config",
"path": "utils/utils.py",
"snippet": "def load_config(filename):\n with open(filename, 'r') as f:\n config_dict = yaml.safe_load(f)\n return config_dict"
},
{
"identifier": "create_subset",
"path": "utils/utils.py",
"snippet": "def create_subset(dataset, sample_size):\n \"\"\"Returns a subset of a PyTorch dataset.\n\n Parameters\n ----------\n dataset : torch.utils.data.Dataset or Union\n sample_size : float or int\n Represents number or proportion of original images (not patches) to sample.\n If float, assumes between 0 and 1, which represents proportion.\n\n Returns\n -------\n subset : torch.utils.data.Dataset\n \"\"\"\n if sample_size is None:\n subset = dataset\n else:\n if isinstance(sample_size, int):\n subset_size = min(sample_size, len(dataset))\n elif 0 < sample_size < 1:\n subset_size = int(sample_size*len(dataset))\n else:\n print(f'Indeterminate subset size. Using full dataset. '\n f'Got {str(sample_size)}. '\n f'Only accept integer [0,{str(len(dataset))}] or '\n f'float [0.0, 1.0].')\n subset_size = len(dataset)\n if isinstance(dataset, list):\n subset = dataset[:subset_size]\n else:\n subset = Subset(dataset, list(range(subset_size)))\n return subset"
},
{
"identifier": "save_train_results",
"path": "utils/utils.py",
"snippet": "def save_train_results(root):\n \"\"\"\n\n Parameters\n ----------\n root\n \"\"\"\n os.makedirs(root, exist_ok=True)\n _save_loss_curve(root)"
},
{
"identifier": "CONFIG_PATH",
"path": "utils/utils.py",
"snippet": "CONFIG_PATH = os.path.join('configurations.yaml')"
},
{
"identifier": "determine_version_name",
"path": "utils/utils.py",
"snippet": "def determine_version_name(dataset_dir, run_name, to_increment):\n if run_name is not None:\n version_name = str(run_name)\n else:\n os.makedirs(dataset_dir, exist_ok=True)\n past_runs = os.listdir(dataset_dir)\n current_versions = [int(folder_name) for folder_name in past_runs if str(folder_name).isnumeric()]\n if len(current_versions) > 0:\n version_name = str(max(current_versions) + int(to_increment))\n else:\n version_name = '0'\n return version_name"
},
{
"identifier": "get_latest_checkpoint_path",
"path": "utils/utils.py",
"snippet": "def get_latest_checkpoint_path(version_dir):\n checkpoint_dir = os.path.join(str(version_dir), 'checkpoints')\n latest_checkpoint = sorted(os.listdir(checkpoint_dir),\n key=lambda x: int(x.split('.')[0].split('-')[1].split('=')[1]))[-1]\n checkpoint_path = os.path.join(checkpoint_dir, latest_checkpoint)\n return checkpoint_path"
},
{
"identifier": "CONFIG_FILENAME",
"path": "utils/utils.py",
"snippet": "CONFIG_FILENAME = 'configurations.yaml'"
},
{
"identifier": "save_outputs",
"path": "utils/utils.py",
"snippet": "def save_outputs(dataset_dir, outputs, batch_size, npy_name):\n n_posterior = len(outputs[0])//batch_size\n output_shape = np.array(outputs[0].shape[1:]).squeeze()\n n_samples = (len(outputs) - 1) * batch_size + len(outputs[-1]) // n_posterior\n sr_array = np.empty((n_samples, n_posterior, *output_shape), dtype=np.uint8)\n\n for i, sr_batch in enumerate(outputs):\n curr_batch_size = len(sr_batch) // n_posterior\n for j in range(n_posterior):\n posterior_sample = sr_batch[(j * curr_batch_size):((j + 1) * curr_batch_size)].numpy().astype(np.uint8)\n sr_array[(i * batch_size):(i * batch_size + curr_batch_size), j] = posterior_sample\n\n np.save(os.path.join(dataset_dir, npy_name), sr_array)"
},
{
"identifier": "initialize_model",
"path": "utils/utils.py",
"snippet": "def initialize_model(config, model_config, temperature, version_name,\n checkpoint_path, input_shape, output_shape):\n \"\"\"Initializes trained model for testing.\n Uses code snapshot that should match weight structure if available, else uses master code.\n\n Parameters\n ----------\n config : dict\n model_config : dict\n temperature : float\n version_name : str\n checkpoint_path : str\n input_shape : tuple\n output_shape : tuple\n\n Returns\n -------\n model : pl.LightningModule\n \"\"\"\n try:\n # specify the module that needs to be imported relative to the path of the module\n spec = importlib.util.spec_from_file_location(\n 'FUSE_Flow',\n os.path.join(\n config['data']['log_dir'],\n config['data']['dataset'],\n version_name,\n 'FUSE_Flow',\n 'fuse_flow.py'\n )\n )\n # creates a new module based on spec\n snapshot_module = importlib.util.module_from_spec(spec)\n # executes the module in its own namespace when a module is imported or reloaded.\n spec.loader.exec_module(snapshot_module)\n\n model = snapshot_module.FUSEFlow.load_from_checkpoint(\n checkpoint_path,\n input_shape=input_shape,\n output_shape=output_shape,\n ablation=model_config['ablation'],\n hyper=model_config['hyper-parameters'],\n temperature=temperature,\n augmentations=model_config['augmentations'],\n sample_size=config['testing']['posterior_sample_size']\n )\n print('Model loaded from snapshot.')\n except FileNotFoundError:\n try:\n model = FUSEFlow.load_from_checkpoint(\n checkpoint_path,\n input_shape=input_shape,\n output_shape=output_shape,\n ablation=model_config['ablation'],\n hyper=model_config['hyper-parameters'],\n temperature=temperature,\n augmentations=model_config['augmentations'],\n sample_size=config['testing']['posterior_sample_size']\n )\n print('Code snapshot not found. Model loaded from master.')\n except KeyError:\n print('Code snapshot not found. Weights mismatch. Aborting training...')\n exit()\n except TypeError:\n print('Code snapshot found but weights mismatch. Aborting training...')\n exit()\n return model"
}
] | import os
import pytorch_lightning as pl
import torch
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor, Compose
from data_modules.npz_dataset import NPZDataset
from utils.utils import load_config, create_subset, save_train_results, CONFIG_PATH, determine_version_name, \
get_latest_checkpoint_path, CONFIG_FILENAME, save_outputs, initialize_model | 1,937 |
if __name__ == '__main__':
pl.seed_everything(42)
# "highest" (default), float32 matrix multiplications use the float32 datatype for internal computations.
# "high", float32 matrix multiplications use the TensorFloat32 or bfloat16_3x
# "medium", float32 matrix multiplications use the bfloat16 datatype
torch.set_float32_matmul_precision('highest')
|
if __name__ == '__main__':
pl.seed_everything(42)
# "highest" (default), float32 matrix multiplications use the float32 datatype for internal computations.
# "high", float32 matrix multiplications use the TensorFloat32 or bfloat16_3x
# "medium", float32 matrix multiplications use the bfloat16 datatype
torch.set_float32_matmul_precision('highest')
| config = load_config(CONFIG_PATH) | 1 | 2023-10-19 06:49:31+00:00 | 4k |
TheAcharya/Airlift | airlift/airtable_upload.py | [
{
"identifier": "new_client",
"path": "airlift/airtable_client.py",
"snippet": "class new_client:\n\n def __init__(self,token:str,base:str,table:str):\n\n self.api = token\n self.base = base\n self.table = table\n self.headers = {\n \"Authorization\": \"Bearer \" + self.api,\n \"Content-Type\": \"application/json\"\n }\n\n self.single_upload_url = f\"https://api.airtable.com/v0/{self.base}/{self.table}\"\n logger.debug(\"Airtable Client Created\")\n\n def single_upload(self,data:ATDATATYPE) -> None:\n\n data[\"typecast\"] = True\n response = requests.post(self.single_upload_url, headers=self.headers, data=json.dumps(data))\n\n if response.status_code == 200:\n pass\n #logger.debug(\"Request completed successfully!\")\n else:\n logger.warning(f\"Error creating records: {response}\")\n raise AirtableError(\"Unable to upload data!\")\n \n def missing_field_single(self,field:str):\n\n airtable_table_fields = []\n url = f\"https://api.airtable.com/v0/meta/bases/{self.base}/tables\"\n response = requests.get(url,headers=self.headers)\n tables = json.loads(response.text)\n\n for x in tables['tables']:\n if x['id'] == self.table or x['name'] == self.table:\n for fields in x['fields']:\n airtable_table_fields.append(fields['name'])\n \n if field in airtable_table_fields:\n return True\n \n return False\n\n\n def missing_fields_check(self,data:ATDATATYPE,disable_bypass:bool,ignore_columns:List[str]):\n \n airtable_table_fields = []\n user_csv_fields = []\n\n url = f\"https://api.airtable.com/v0/meta/bases/{self.base}/tables\"\n response = requests.get(url,headers=self.headers)\n tables = json.loads(response.text)\n\n for x in tables['tables']:\n if x['id'] == self.table or x['name'] == self.table:\n\n for fields in x['fields']:\n airtable_table_fields.append(fields['name'])\n\n if ignore_columns:\n for column in ignore_columns:\n airtable_table_fields.append(column)\n \n for csv_key,csv_value in data[0]['fields'].items():\n user_csv_fields.append(csv_key)\n\n missing_columns = list(set(user_csv_fields) - set(airtable_table_fields))\n\n if missing_columns:\n for column in missing_columns:\n if disable_bypass:\n self._create_new_field(column)\n else:\n logger.warning(f\"Column {column} would be skipped!\")\n for datas in data:\n try:\n del datas['fields'][column]\n except:\n logger.warning(f\"{column} not present in this row\")\n\n else:\n logger.info(\"All the columns are verified and present in both the file and Airtable!\")\n\n return data\n \n def _create_new_field(self,field_name:str) -> None:\n URL = f\"https://api.airtable.com/v0/meta/bases/{self.base}/tables/{self.table}/fields\"\n new_field = {\"name\":field_name,\"description\":\"This is a field created by Airtable\",\"type\":\"multilineText\"}\n\n response = requests.post(URL,headers=self.headers,data=json.dumps(new_field))\n\n if response.status_code == 200:\n logger.info(f\"Created new column {field_name} in Airtable\")\n elif response.status_code == 422:\n logger.warning(\"Encountered an 422 error in creating a new column in Airtable!\")\n \n else:\n logger.warning(f\"unknown error : {response.text}\")"
},
{
"identifier": "dropbox_client",
"path": "airlift/dropbox_client.py",
"snippet": "class dropbox_client:\n def __init__(self,access_token,md:bool):\n \n try:\n try:\n creds = self._get_tokens(access_token)\n self.dbx = dropbox.Dropbox(oauth2_refresh_token=creds[1],app_key=creds[0])\n logger.info(\"Created a Dropbox Client\")\n except:\n raise CriticalError('Failed to create the Dropbox client')\n\n if md:\n self.main_folder = \"/Marker Data\"\n try:\n self.dbx.files_create_folder(\"/Marker Data\")\n except Exception as e:\n logger.warning(f\"The folder Marker Data already exists.\")\n else:\n self.main_folder = \"/Airlift\"\n try:\n self.dbx.files_create_folder(\"/Airlift\")\n except Exception as e:\n logger.warning(f\"The folder Airlift already exists.\")\n\n c = datetime.now()\n self.sub_folder = f\"{self.main_folder}{self.main_folder} {c.strftime('%Y-%m-%d')} {c.strftime('%H-%M-%S')}\"\n\n try:\n self.dbx.files_create_folder(self.sub_folder)\n except dropbox.exceptions.ApiError as e:\n logger.warning(f\"The folder {self.sub_folder} already exists.\")\n except Exception as e:\n raise CriticalError(\"Error during Dropbox client creation\",e)\n\n\n def _get_tokens(self,access_token):\n with open(access_token,'r') as file:\n creds = json.load(file)\n \n try:\n app_key = creds['app_key']\n except:\n logger.warning(\"app_key not present in json file\")\n raise CriticalError(\"app_key not present in the json file, please check!\")\n \n try:\n refresh_token = creds['refresh_token']\n except:\n auth_flow = DropboxOAuth2FlowNoRedirect(app_key, use_pkce=True, token_access_type='offline')\n\n authorize_url = auth_flow.start()\n logger.warning(\"1. Go to: \" + authorize_url)\n logger.warning(\"2. Click \\\"Allow\\\" (you might have to log in first).\")\n logger.warning(\"3. Copy the authorization code.\")\n auth_code = input(\"Enter the authorization code here: \").strip()\n\n try:\n oauth_result = auth_flow.finish(auth_code)\n refresh_token = oauth_result.refresh_token\n with open(access_token,'r') as file:\n creds_data = json.load(file)\n \n creds_data['refresh_token'] = refresh_token\n\n with open(access_token,'w') as file:\n json.dump(creds_data,file,indent=2)\n\n except Exception as e:\n logger.warning(\"error during retreival of refresh token\")\n raise CriticalError(\"error during retreival of refresh token\")\n \n return (app_key,refresh_token)\n \n\n\n def upload_to_dropbox(self,filename):\n with open(filename, 'rb') as f:\n image_data = f.read()\n\n \n file_path = os.path.split(filename)\n filename = file_path[1]\n\n if file_path[0]:\n last_dir = os.path.split(file_path[0])\n\n if last_dir:\n if last_dir[0] is None:\n final_path = f'{filename}'\n else:\n final_path = f'{last_dir[1]}/{filename}'\n else:\n final_path = f'{filename}'\n \n dropbox_path = f\"{self.sub_folder}/{final_path}\"\n self.dbx.files_upload(image_data, dropbox_path)\n\n shared_link_metadata = self.dbx.sharing_create_shared_link(path=dropbox_path)\n shared_url = shared_link_metadata.url\n\n \n direct_download_url = shared_url.replace('www.dropbox.com', 'dl.dropboxusercontent.com').replace('?dl=0', '?dl=1')\n\n return direct_download_url"
},
{
"identifier": "dropbox_client",
"path": "airlift/dropbox_client.py",
"snippet": "class dropbox_client:\n def __init__(self,access_token,md:bool):\n \n try:\n try:\n creds = self._get_tokens(access_token)\n self.dbx = dropbox.Dropbox(oauth2_refresh_token=creds[1],app_key=creds[0])\n logger.info(\"Created a Dropbox Client\")\n except:\n raise CriticalError('Failed to create the Dropbox client')\n\n if md:\n self.main_folder = \"/Marker Data\"\n try:\n self.dbx.files_create_folder(\"/Marker Data\")\n except Exception as e:\n logger.warning(f\"The folder Marker Data already exists.\")\n else:\n self.main_folder = \"/Airlift\"\n try:\n self.dbx.files_create_folder(\"/Airlift\")\n except Exception as e:\n logger.warning(f\"The folder Airlift already exists.\")\n\n c = datetime.now()\n self.sub_folder = f\"{self.main_folder}{self.main_folder} {c.strftime('%Y-%m-%d')} {c.strftime('%H-%M-%S')}\"\n\n try:\n self.dbx.files_create_folder(self.sub_folder)\n except dropbox.exceptions.ApiError as e:\n logger.warning(f\"The folder {self.sub_folder} already exists.\")\n except Exception as e:\n raise CriticalError(\"Error during Dropbox client creation\",e)\n\n\n def _get_tokens(self,access_token):\n with open(access_token,'r') as file:\n creds = json.load(file)\n \n try:\n app_key = creds['app_key']\n except:\n logger.warning(\"app_key not present in json file\")\n raise CriticalError(\"app_key not present in the json file, please check!\")\n \n try:\n refresh_token = creds['refresh_token']\n except:\n auth_flow = DropboxOAuth2FlowNoRedirect(app_key, use_pkce=True, token_access_type='offline')\n\n authorize_url = auth_flow.start()\n logger.warning(\"1. Go to: \" + authorize_url)\n logger.warning(\"2. Click \\\"Allow\\\" (you might have to log in first).\")\n logger.warning(\"3. Copy the authorization code.\")\n auth_code = input(\"Enter the authorization code here: \").strip()\n\n try:\n oauth_result = auth_flow.finish(auth_code)\n refresh_token = oauth_result.refresh_token\n with open(access_token,'r') as file:\n creds_data = json.load(file)\n \n creds_data['refresh_token'] = refresh_token\n\n with open(access_token,'w') as file:\n json.dump(creds_data,file,indent=2)\n\n except Exception as e:\n logger.warning(\"error during retreival of refresh token\")\n raise CriticalError(\"error during retreival of refresh token\")\n \n return (app_key,refresh_token)\n \n\n\n def upload_to_dropbox(self,filename):\n with open(filename, 'rb') as f:\n image_data = f.read()\n\n \n file_path = os.path.split(filename)\n filename = file_path[1]\n\n if file_path[0]:\n last_dir = os.path.split(file_path[0])\n\n if last_dir:\n if last_dir[0] is None:\n final_path = f'{filename}'\n else:\n final_path = f'{last_dir[1]}/{filename}'\n else:\n final_path = f'{filename}'\n \n dropbox_path = f\"{self.sub_folder}/{final_path}\"\n self.dbx.files_upload(image_data, dropbox_path)\n\n shared_link_metadata = self.dbx.sharing_create_shared_link(path=dropbox_path)\n shared_url = shared_link_metadata.url\n\n \n direct_download_url = shared_url.replace('www.dropbox.com', 'dl.dropboxusercontent.com').replace('?dl=0', '?dl=1')\n\n return direct_download_url"
}
] | import logging
import concurrent.futures
import os
from airlift.airtable_client import new_client
from typing import Any, Dict, Iterable, Iterator, List, Optional
from queue import Queue, Empty
from airlift.dropbox_client import dropbox_client
from tqdm import tqdm
from icecream import ic
from airlift.dropbox_client import dropbox_client | 2,823 |
logger = logging.getLogger(__name__)
ATDATA = List[Dict[str, Dict[str, str]]]
class Upload:
|
logger = logging.getLogger(__name__)
ATDATA = List[Dict[str, Dict[str, str]]]
class Upload: | def __init__(self,client: new_client, new_data:ATDATA,dbx:dropbox_client,args:dict): | 0 | 2023-10-21 01:57:41+00:00 | 4k |
zytedata/zyte-spider-templates | zyte_spider_templates/spiders/base.py | [
{
"identifier": "GEOLOCATION_OPTIONS_WITH_CODE",
"path": "zyte_spider_templates/_geolocations.py",
"snippet": "GEOLOCATION_OPTIONS_WITH_CODE = {\n code: f\"{name} ({code})\" for code, name in GEOLOCATION_OPTIONS.items()\n}"
},
{
"identifier": "Geolocation",
"path": "zyte_spider_templates/_geolocations.py",
"snippet": "class Geolocation(str, Enum):\n AF: str = \"AF\"\n AL: str = \"AL\"\n DZ: str = \"DZ\"\n AS: str = \"AS\"\n AD: str = \"AD\"\n AO: str = \"AO\"\n AI: str = \"AI\"\n AQ: str = \"AQ\"\n AG: str = \"AG\"\n AR: str = \"AR\"\n AM: str = \"AM\"\n AW: str = \"AW\"\n AU: str = \"AU\"\n AT: str = \"AT\"\n AZ: str = \"AZ\"\n BS: str = \"BS\"\n BH: str = \"BH\"\n BD: str = \"BD\"\n BB: str = \"BB\"\n BY: str = \"BY\"\n BE: str = \"BE\"\n BZ: str = \"BZ\"\n BJ: str = \"BJ\"\n BM: str = \"BM\"\n BT: str = \"BT\"\n BO: str = \"BO\"\n BQ: str = \"BQ\"\n BA: str = \"BA\"\n BW: str = \"BW\"\n BV: str = \"BV\"\n BR: str = \"BR\"\n IO: str = \"IO\"\n BN: str = \"BN\"\n BG: str = \"BG\"\n BF: str = \"BF\"\n BI: str = \"BI\"\n CV: str = \"CV\"\n KH: str = \"KH\"\n CM: str = \"CM\"\n CA: str = \"CA\"\n KY: str = \"KY\"\n CF: str = \"CF\"\n TD: str = \"TD\"\n CL: str = \"CL\"\n CN: str = \"CN\"\n CX: str = \"CX\"\n CC: str = \"CC\"\n CO: str = \"CO\"\n KM: str = \"KM\"\n CG: str = \"CG\"\n CD: str = \"CD\"\n CK: str = \"CK\"\n CR: str = \"CR\"\n HR: str = \"HR\"\n CU: str = \"CU\"\n CW: str = \"CW\"\n CY: str = \"CY\"\n CZ: str = \"CZ\"\n CI: str = \"CI\"\n DK: str = \"DK\"\n DJ: str = \"DJ\"\n DM: str = \"DM\"\n DO: str = \"DO\"\n EC: str = \"EC\"\n EG: str = \"EG\"\n SV: str = \"SV\"\n GQ: str = \"GQ\"\n ER: str = \"ER\"\n EE: str = \"EE\"\n SZ: str = \"SZ\"\n ET: str = \"ET\"\n FK: str = \"FK\"\n FO: str = \"FO\"\n FJ: str = \"FJ\"\n FI: str = \"FI\"\n FR: str = \"FR\"\n GF: str = \"GF\"\n PF: str = \"PF\"\n TF: str = \"TF\"\n GA: str = \"GA\"\n GM: str = \"GM\"\n GE: str = \"GE\"\n DE: str = \"DE\"\n GH: str = \"GH\"\n GI: str = \"GI\"\n GR: str = \"GR\"\n GL: str = \"GL\"\n GD: str = \"GD\"\n GP: str = \"GP\"\n GU: str = \"GU\"\n GT: str = \"GT\"\n GG: str = \"GG\"\n GN: str = \"GN\"\n GW: str = \"GW\"\n GY: str = \"GY\"\n HT: str = \"HT\"\n HM: str = \"HM\"\n VA: str = \"VA\"\n HN: str = \"HN\"\n HK: str = \"HK\"\n HU: str = \"HU\"\n IS: str = \"IS\"\n IN: str = \"IN\"\n ID: str = \"ID\"\n IR: str = \"IR\"\n IQ: str = \"IQ\"\n IE: str = \"IE\"\n IM: str = \"IM\"\n IL: str = \"IL\"\n IT: str = \"IT\"\n JM: str = \"JM\"\n JP: str = \"JP\"\n JE: str = \"JE\"\n JO: str = \"JO\"\n KZ: str = \"KZ\"\n KE: str = \"KE\"\n KI: str = \"KI\"\n KP: str = \"KP\"\n KR: str = \"KR\"\n KW: str = \"KW\"\n KG: str = \"KG\"\n LA: str = \"LA\"\n LV: str = \"LV\"\n LB: str = \"LB\"\n LS: str = \"LS\"\n LR: str = \"LR\"\n LY: str = \"LY\"\n LI: str = \"LI\"\n LT: str = \"LT\"\n LU: str = \"LU\"\n MO: str = \"MO\"\n MG: str = \"MG\"\n MW: str = \"MW\"\n MY: str = \"MY\"\n MV: str = \"MV\"\n ML: str = \"ML\"\n MT: str = \"MT\"\n MH: str = \"MH\"\n MQ: str = \"MQ\"\n MR: str = \"MR\"\n MU: str = \"MU\"\n YT: str = \"YT\"\n MX: str = \"MX\"\n FM: str = \"FM\"\n MD: str = \"MD\"\n MC: str = \"MC\"\n MN: str = \"MN\"\n ME: str = \"ME\"\n MS: str = \"MS\"\n MA: str = \"MA\"\n MZ: str = \"MZ\"\n MM: str = \"MM\"\n NA: str = \"NA\"\n NR: str = \"NR\"\n NP: str = \"NP\"\n NL: str = \"NL\"\n NC: str = \"NC\"\n NZ: str = \"NZ\"\n NI: str = \"NI\"\n NE: str = \"NE\"\n NG: str = \"NG\"\n NU: str = \"NU\"\n NF: str = \"NF\"\n MK: str = \"MK\"\n MP: str = \"MP\"\n NO: str = \"NO\"\n OM: str = \"OM\"\n PK: str = \"PK\"\n PW: str = \"PW\"\n PS: str = \"PS\"\n PA: str = \"PA\"\n PG: str = \"PG\"\n PY: str = \"PY\"\n PE: str = \"PE\"\n PH: str = \"PH\"\n PN: str = \"PN\"\n PL: str = \"PL\"\n PT: str = \"PT\"\n PR: str = \"PR\"\n QA: str = \"QA\"\n RO: str = \"RO\"\n RU: str = \"RU\"\n RW: str = \"RW\"\n RE: str = \"RE\"\n BL: str = \"BL\"\n SH: str = \"SH\"\n KN: str = \"KN\"\n LC: str = \"LC\"\n MF: str = \"MF\"\n PM: str = \"PM\"\n VC: str = \"VC\"\n WS: str = \"WS\"\n SM: str = \"SM\"\n ST: str = \"ST\"\n SA: str = \"SA\"\n SN: str = \"SN\"\n RS: str = \"RS\"\n SC: str = \"SC\"\n SL: str = \"SL\"\n SG: str = \"SG\"\n SX: str = \"SX\"\n SK: str = \"SK\"\n SI: str = \"SI\"\n SB: str = \"SB\"\n SO: str = \"SO\"\n ZA: str = \"ZA\"\n GS: str = \"GS\"\n SS: str = \"SS\"\n ES: str = \"ES\"\n LK: str = \"LK\"\n SD: str = \"SD\"\n SR: str = \"SR\"\n SJ: str = \"SJ\"\n SE: str = \"SE\"\n CH: str = \"CH\"\n SY: str = \"SY\"\n TW: str = \"TW\"\n TJ: str = \"TJ\"\n TZ: str = \"TZ\"\n TH: str = \"TH\"\n TL: str = \"TL\"\n TG: str = \"TG\"\n TK: str = \"TK\"\n TO: str = \"TO\"\n TT: str = \"TT\"\n TN: str = \"TN\"\n TM: str = \"TM\"\n TC: str = \"TC\"\n TV: str = \"TV\"\n TR: str = \"TR\"\n UG: str = \"UG\"\n UA: str = \"UA\"\n AE: str = \"AE\"\n GB: str = \"GB\"\n US: str = \"US\"\n UM: str = \"UM\"\n UY: str = \"UY\"\n UZ: str = \"UZ\"\n VU: str = \"VU\"\n VE: str = \"VE\"\n VN: str = \"VN\"\n VG: str = \"VG\"\n VI: str = \"VI\"\n WF: str = \"WF\"\n EH: str = \"EH\"\n YE: str = \"YE\"\n ZM: str = \"ZM\"\n ZW: str = \"ZW\"\n AX: str = \"AX\""
}
] | from importlib.metadata import version
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from scrapy.crawler import Crawler
from scrapy.utils.url import parse_url
from zyte_spider_templates._geolocations import (
GEOLOCATION_OPTIONS_WITH_CODE,
Geolocation,
)
import scrapy | 2,504 |
# Higher priority than command-line-defined settings (40).
ARG_SETTING_PRIORITY: int = 50
class BaseSpiderParams(BaseModel):
url: str = Field(
title="URL",
description="Initial URL for the crawl.",
pattern=r"^https?:\/\/[^:\/\s]+(:\d{1,5})?(\/[^\s]*)*(#[^\s]*)?$",
)
|
# Higher priority than command-line-defined settings (40).
ARG_SETTING_PRIORITY: int = 50
class BaseSpiderParams(BaseModel):
url: str = Field(
title="URL",
description="Initial URL for the crawl.",
pattern=r"^https?:\/\/[^:\/\s]+(:\d{1,5})?(\/[^\s]*)*(#[^\s]*)?$",
) | geolocation: Optional[Geolocation] = Field( | 1 | 2023-10-18 10:58:44+00:00 | 4k |
DegangWang97/IEEE_TGRS_PDBSNet | main.py | [
{
"identifier": "PDBSNet",
"path": "model.py",
"snippet": "class PDBSNet(nn.Module):\n def __init__(self, nch_in=189, nch_out=189, nch_ker=64, nblk=9):\n super().__init__()\n\n ly = []\n ly += [ nn.Conv2d(nch_in, nch_ker, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\n self.head = nn.Sequential(*ly)\n\n self.branch1 = DC_branchl(2, nch_ker, nblk)\n self.branch2 = DC_branchl(3, nch_ker, nblk)\n\n ly = []\n ly += [ nn.Conv2d(nch_ker*2, nch_ker, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\n ly += [ nn.Conv2d(nch_ker, nch_ker//2, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\n ly += [ nn.Conv2d(nch_ker//2, nch_out, kernel_size=1) ]\n self.tail = nn.Sequential(*ly)\n\n def forward(self, x):\n x = self.head(x)\n\n br1 = self.branch1(x)\n br2 = self.branch2(x)\n\n x = torch.cat([br1, br2], dim=1)\n\n return self.tail(x)\n\n def _initialize_weights(self):\n # Liyong version\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)"
},
{
"identifier": "PDBSNetData",
"path": "dataset.py",
"snippet": "def PDBSNetData(opt):\r\n \r\n # train dataloader\r\n data_dir = './data/'\r\n image_file = data_dir + opt.dataset + '.mat'\r\n \r\n input_data = sio.loadmat(image_file)\r\n image = input_data['data']\r\n image = image.astype(np.float32)\r\n\r\n image = ((image - image.min()) / (image.max() - image.min()))\r\n band = image.shape[2]\r\n\r\n train_data = np.expand_dims(image, axis=0)\r\n loader_train = torch.from_numpy(train_data.transpose(0,3,1,2)).type(torch.FloatTensor)\r\n \r\n print(\"The training dataloader construction process is done\")\r\n print('-' * 50)\r\n return loader_train, band\r"
},
{
"identifier": "pixel_shuffle_up_sampling",
"path": "dataset.py",
"snippet": "def pixel_shuffle_up_sampling(x:torch.Tensor, f:int, pad:int=0):\r\n '''\r\n inverse of pixel-shuffle down-sampling (PD)\r\n see more details about PD in pixel_shuffle_down_sampling()\r\n Args:\r\n x (Tensor) : input tensor\r\n f (int) : factor of PD\r\n pad (int) : number of pad will be removed\r\n '''\r\n # single image tensor\r\n if len(x.shape) == 3:\r\n c,w,h = x.shape\r\n before_shuffle = x.view(c,f,w//f,f,h//f).permute(0,1,3,2,4).reshape(c*f*f,w//f,h//f)\r\n if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad]\r\n return F.pixel_shuffle(before_shuffle, f)\r\n # batched image tensor\r\n else:\r\n b,c,w,h = x.shape\r\n before_shuffle = x.view(b,c,f,w//f,f,h//f).permute(0,1,2,4,3,5).reshape(b,c*f*f,w//f,h//f)\r\n if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad]\r\n return F.pixel_shuffle(before_shuffle, f)"
},
{
"identifier": "pixel_shuffle_down_sampling",
"path": "dataset.py",
"snippet": "def pixel_shuffle_down_sampling(x:torch.Tensor, f:int, pad:int=0, pad_value:float=0.):\r\n '''\r\n pixel-shuffle down-sampling (PD) from \"When AWGN-denoiser meets real-world noise.\" (AAAI 2019)\r\n Args:\r\n x (Tensor) : input tensor\r\n f (int) : factor of PD\r\n pad (int) : number of pad between each down-sampled images\r\n pad_value (float) : padding value\r\n Return:\r\n pd_x (Tensor) : down-shuffled image tensor with pad or not\r\n '''\r\n # single image tensor\r\n if len(x.shape) == 3:\r\n c,w,h = x.shape\r\n unshuffled = F.pixel_unshuffle(x, f)\r\n if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value)\r\n return unshuffled.view(c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,3,2,4).reshape(c, w+2*f*pad, h+2*f*pad)\r\n # batched image tensor\r\n else:\r\n b,c,w,h = x.shape\r\n unshuffled = F.pixel_unshuffle(x, f)\r\n if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value)\r\n return unshuffled.view(b,c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,2,4,3,5).reshape(b,c,w+2*f*pad, h+2*f*pad)\r"
},
{
"identifier": "get_auc",
"path": "utils.py",
"snippet": "def get_auc(HSI_old, HSI_new, gt):\r\n n_row, n_col, n_band = HSI_old.shape\r\n n_pixels = n_row * n_col\r\n \r\n img_olds = np.reshape(HSI_old, (n_pixels, n_band), order='F')\r\n img_news = np.reshape(HSI_new, (n_pixels, n_band), order='F') \r\n sub_img = img_olds - img_news\r\n\r\n detectmap = np.linalg.norm(sub_img, ord = 2, axis = 1, keepdims = True)**2\r\n detectmap = detectmap/n_band\r\n\r\n # nomalization\r\n detectmap = map01(detectmap)\r\n\r\n # get auc\r\n label = np.reshape(gt, (n_pixels,1), order='F')\r\n \r\n auc = roc_auc_score(label, detectmap)\r\n \r\n detectmap = np.reshape(detectmap, (n_row, n_col), order='F')\r\n \r\n return auc, detectmap\r"
},
{
"identifier": "setup_seed",
"path": "utils.py",
"snippet": "def setup_seed(seed):\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r"
},
{
"identifier": "TensorToHSI",
"path": "utils.py",
"snippet": "def TensorToHSI(img):\r\n HSI = img.squeeze().cpu().data.numpy().transpose((1, 2, 0))\r\n return HSI\r"
}
] | import argparse
import torch
import torch.nn as nn
import scipy.io as sio
import os
import numpy as np
import time
from model import PDBSNet
from dataset import PDBSNetData, pixel_shuffle_up_sampling, pixel_shuffle_down_sampling
from utils import get_auc, setup_seed, TensorToHSI
from torch import optim
from torch.utils.tensorboard import SummaryWriter
| 3,107 | Trains a PyTorch `nn.Module` object provided in `model`
on training sets provided in `dataloader`
using `criterion` and `optimizer`.
Saves model weight snapshots every `save_freq` epochs and saves the
weights at the end of training.
Parameters
----------
model : torch model object, with callable `forward` method.
criterion : callable taking inputs and targets, returning loss.
optimizer : torch.optim optimizer.
dataloader : train dataloaders.
model_path : string. output path for model.
logs_path : string. output path for log.
save_freq : integer. Number of epochs between model checkpoints. Default = 50.
scheduler : learning rate scheduler.
'''
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.dataloader = dataloader
self.device = device
self.model_path = model_path
self.logs_path = logs_path
self.save_freq = save_freq
self.scheduler = scheduler
self.opt = opt
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
self.log_output = open(f"{self.logs_path}/log.txt", 'w')
self.writer = SummaryWriter(logs_path)
print(self.opt)
print(self.opt, file=self.log_output)
def train_epoch(self) -> None:
# Run a train phase for each epoch
self.model.train(True)
loss_train = []
train_data = pixel_shuffle_down_sampling(self.dataloader, self.opt.factor_train, pad=0)
loader_train = self.dataloader.to(self.device)
train_data = train_data.to(self.device)
# forward net
output = self.model(train_data)
# backward net
self.optimizer.zero_grad()
outputs = pixel_shuffle_up_sampling(output, self.opt.factor_train, pad=0)
loss = self.criterion(outputs, loader_train)
loss.backward()
self.optimizer.step()
# get losses
loss_train = loss.item()
print("Train Loss:" + str(round(loss_train, 4)))
print("Train Loss:" + str(round(loss_train, 4)), file = self.log_output)
# ============ TensorBoard logging ============#
# Log the scalar values
info = {
'Loss_train': np.mean(loss_train)
}
for tag, value in info.items():
self.writer.add_scalar(tag, value, self.epoch + 1)
# Saving model
if ((self.epoch + 1) % self.save_freq == 0):
torch.save(self.model.state_dict(), os.path.join(self.model_path, 'PDBSNet' + '_' + self.opt.dataset + '_' + str(self.epoch + 1) + '.pkl'))
def train(self) -> nn.Module:
for epoch in range(self.opt.epochs):
self.epoch = epoch
print('-' * 50)
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs))
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs), file = self.log_output)
print('-' * 50)
# run training epoch
self.train_epoch()
if self.scheduler is not None:
self.scheduler.step()
return self.model
def train_model(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
if not os.path.exists(trainfile):
os.makedirs(trainfile)
# Device
device = torch.device('cuda:{}'.format(opt.gpu_ids)) if torch.cuda.is_available() else torch.device('cpu')
# Directories for storing model and output samples
model_path = os.path.join(trainfile, 'model')
logs_path = os.path.join(trainfile, './logs')
setup_seed(opt.seed)
loader_train, band = PDBSNetData(opt)
| """
See more details in papers:
[1] D. Wang, L. Zhuang, L. Gao, X. Sun, M. Huang, and A. Plaza,
“PDBSNet: Pixel-Shuffle Downsampling Blind-Spot Reconstruction Network
for Hyperspectral Anomaly Detection,” IEEE Trans. Geosci. Remote Sens.,
vol. 61, 2023, Art. no. 5511914. DOI: 10.1109/TGRS.2023.3276175
URL: https://ieeexplore.ieee.org/abstract/document/10124448
------------------------------------------------------------------------------
Copyright (May, 2023):
Degang Wang ([email protected])
Lina Zhuang ([email protected])
Lianru Gao ([email protected])
Xu Sun ([email protected])
Min Huang ([email protected])
Antonio Plaza ([email protected])
PDBSNet is distributed under the terms of the GNU General Public License 2.0.
Permission to use, copy, modify, and distribute this software for
any purpose without fee is hereby granted, provided that this entire
notice is included in all copies of any software which is or includes
a copy or modification of this software and in all copies of the
supporting documentation for such software.
This software is being provided "as is", without any express or
implied warranty. In particular, the authors do not make any
representation or warranty of any kind concerning the merchantability
of this software or its fitness for any particular purpose.
------------------------------------------------------------------------------
"""
class Trainer(object):
'''
Trains a model
'''
def __init__(self,
opt,
model,
criterion,
optimizer,
dataloader,
device,
model_path: str,
logs_path: str,
save_freq: int=50,
scheduler = None):
'''
Trains a PyTorch `nn.Module` object provided in `model`
on training sets provided in `dataloader`
using `criterion` and `optimizer`.
Saves model weight snapshots every `save_freq` epochs and saves the
weights at the end of training.
Parameters
----------
model : torch model object, with callable `forward` method.
criterion : callable taking inputs and targets, returning loss.
optimizer : torch.optim optimizer.
dataloader : train dataloaders.
model_path : string. output path for model.
logs_path : string. output path for log.
save_freq : integer. Number of epochs between model checkpoints. Default = 50.
scheduler : learning rate scheduler.
'''
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.dataloader = dataloader
self.device = device
self.model_path = model_path
self.logs_path = logs_path
self.save_freq = save_freq
self.scheduler = scheduler
self.opt = opt
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
self.log_output = open(f"{self.logs_path}/log.txt", 'w')
self.writer = SummaryWriter(logs_path)
print(self.opt)
print(self.opt, file=self.log_output)
def train_epoch(self) -> None:
# Run a train phase for each epoch
self.model.train(True)
loss_train = []
train_data = pixel_shuffle_down_sampling(self.dataloader, self.opt.factor_train, pad=0)
loader_train = self.dataloader.to(self.device)
train_data = train_data.to(self.device)
# forward net
output = self.model(train_data)
# backward net
self.optimizer.zero_grad()
outputs = pixel_shuffle_up_sampling(output, self.opt.factor_train, pad=0)
loss = self.criterion(outputs, loader_train)
loss.backward()
self.optimizer.step()
# get losses
loss_train = loss.item()
print("Train Loss:" + str(round(loss_train, 4)))
print("Train Loss:" + str(round(loss_train, 4)), file = self.log_output)
# ============ TensorBoard logging ============#
# Log the scalar values
info = {
'Loss_train': np.mean(loss_train)
}
for tag, value in info.items():
self.writer.add_scalar(tag, value, self.epoch + 1)
# Saving model
if ((self.epoch + 1) % self.save_freq == 0):
torch.save(self.model.state_dict(), os.path.join(self.model_path, 'PDBSNet' + '_' + self.opt.dataset + '_' + str(self.epoch + 1) + '.pkl'))
def train(self) -> nn.Module:
for epoch in range(self.opt.epochs):
self.epoch = epoch
print('-' * 50)
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs))
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs), file = self.log_output)
print('-' * 50)
# run training epoch
self.train_epoch()
if self.scheduler is not None:
self.scheduler.step()
return self.model
def train_model(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
if not os.path.exists(trainfile):
os.makedirs(trainfile)
# Device
device = torch.device('cuda:{}'.format(opt.gpu_ids)) if torch.cuda.is_available() else torch.device('cpu')
# Directories for storing model and output samples
model_path = os.path.join(trainfile, 'model')
logs_path = os.path.join(trainfile, './logs')
setup_seed(opt.seed)
loader_train, band = PDBSNetData(opt)
| net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
| 0 | 2023-10-16 08:28:56+00:00 | 4k |
AVAniketh0905/fluidspy | fluidspylib/fluidspy/tests/test_fdm.py | [
{
"identifier": "Bottom",
"path": "fluidspylib/fluidspy/numerical/boundary/direction.py",
"snippet": "class Bottom(Direction):\n \"\"\"Bottom direction.\"\"\"\n\n def __init__(\n self,\n initial_value: float,\n state: SimulationState,\n boundary_condition: BoundaryCondition,\n ):\n super().__init__(initial_value, state, boundary_condition)\n self.axis = 0\n\n if self.state.get_dimension() == 1:\n raise ValueError(\"Bottom direction is not available for 1D.\")\n\n def get_cell_indices(self):\n return np.array([[-1 for _ in range(self.state.get_state().shape[1])]])\n\n def get_cells(self):\n bottom_cells = np.take(self.state.get_state(), -1, axis=self.axis)\n bottom_cells_indices = self.get_cell_indices()\n adjacent_cells = np.take(self.state.get_state(), -2, axis=self.axis)\n return bottom_cells, bottom_cells_indices, adjacent_cells\n\n def get_new_cells(self, cells, adjacent_cells):\n return self.boundary_condition.apply(self.initial_value, cells, adjacent_cells)"
},
{
"identifier": "CompositeBoundary",
"path": "fluidspylib/fluidspy/numerical/boundary/composite.py",
"snippet": "class CompositeBoundary:\n children: List[Direction]\n\n def __init__(self, *args) -> None:\n self.children = list(args)\n\n def init_apply(self):\n for child in self.children:\n child.init_apply()\n\n def apply(self):\n for child in self.children:\n child.apply()"
},
{
"identifier": "Constant",
"path": "fluidspylib/fluidspy/numerical/boundary/conditions.py",
"snippet": "class Constant(BoundaryCondition):\n \"\"\"Constant boundary condition.\"\"\"\n\n def apply(\n self, initial_value: float, state: np.ndarray, adjacent_states: np.ndarray\n ):\n if isinstance(initial_value, float):\n state = initial_value\n else:\n state = np.full_like(state, initial_value, dtype=np.float64)\n\n return state"
},
{
"identifier": "Insulated",
"path": "fluidspylib/fluidspy/numerical/boundary/conditions.py",
"snippet": "class Insulated(BoundaryCondition):\n \"\"\"Insulated boundary condition.\"\"\"\n\n def apply(\n self, initial_value: float, state: np.ndarray, adjacent_states: np.ndarray\n ):\n state = np.copy(adjacent_states)\n return state"
},
{
"identifier": "Left",
"path": "fluidspylib/fluidspy/numerical/boundary/direction.py",
"snippet": "class Left(Direction):\n \"\"\"Left direction.\"\"\"\n\n def __init__(\n self,\n initial_value: float,\n state: SimulationState,\n boundary_condition: BoundaryCondition,\n ):\n super().__init__(initial_value, state, boundary_condition)\n self.axis = 0 if state.get_dimension() == 1 else 1\n\n def get_cell_indices(self):\n if self.axis == 1:\n return np.array([[0] for _ in range(self.state.get_state().shape[0])])\n\n return np.array([0])\n\n def get_cells(self):\n left_cells = np.take(self.state.get_state(), 0, axis=self.axis)\n left_cells_indices = self.get_cell_indices()\n adjacent_cells = np.take(self.state.get_state(), 1, axis=self.axis)\n\n return left_cells, left_cells_indices, adjacent_cells\n\n def get_new_cells(self, cells, adjacent_cells):\n new_cells = self.boundary_condition.apply(\n self.initial_value, cells, adjacent_cells\n )\n\n return [[new_cell] for new_cell in new_cells] if self.axis == 1 else [new_cells]"
},
{
"identifier": "Right",
"path": "fluidspylib/fluidspy/numerical/boundary/direction.py",
"snippet": "class Right(Direction):\n \"\"\"Right direction.\"\"\"\n\n def __init__(\n self,\n initial_value: float,\n state: SimulationState,\n boundary_condition: BoundaryCondition,\n ):\n super().__init__(initial_value, state, boundary_condition)\n self.axis = 0 if state.get_dimension() == 1 else 1\n\n def get_cell_indices(self):\n if self.axis == 1:\n return np.array([[-1] for _ in range(self.state.get_state().shape[0])])\n\n return np.array([-1])\n\n def get_cells(self):\n right_cells = np.take(self.state.get_state(), -1, axis=self.axis)\n right_cells_indices = self.get_cell_indices()\n adjacent_cells = np.take(self.state.get_state(), -2, axis=self.axis)\n return right_cells, right_cells_indices, adjacent_cells\n\n def get_new_cells(self, cells, adjacent_cells):\n new_cells = self.boundary_condition.apply(\n self.initial_value, cells, adjacent_cells\n )\n\n return [[new_cell] for new_cell in new_cells] if self.axis == 1 else [new_cells]"
},
{
"identifier": "Top",
"path": "fluidspylib/fluidspy/numerical/boundary/direction.py",
"snippet": "class Top(Direction):\n \"\"\"Top direction.\"\"\"\n\n def __init__(\n self,\n initial_value: float,\n state: SimulationState,\n boundary_condition: BoundaryCondition,\n ):\n super().__init__(initial_value, state, boundary_condition)\n self.axis = 0\n\n if self.state.get_dimension() == 1:\n raise ValueError(\"Top direction is not available for 1D.\")\n\n def get_cell_indices(self):\n return np.array([[0 for _ in range(self.state.get_state().shape[1])]])\n\n def get_cells(self):\n top_cells = np.take(self.state.get_state(), 0, axis=self.axis)\n top_cells_indices = self.get_cell_indices()\n adjacent_cells = np.take(self.state.get_state(), 1, axis=self.axis)\n return top_cells, top_cells_indices, adjacent_cells\n\n def get_new_cells(self, cells, adjacent_cells):\n return self.boundary_condition.apply(self.initial_value, cells, adjacent_cells)"
},
{
"identifier": "OneDimSpatial",
"path": "fluidspylib/fluidspy/numerical/dim/dimension.py",
"snippet": "class OneDimSpatial(Dimension):\n \"\"\"One dimensional spatial dimension.\n\n Args:\n initial_conditions (List[float]): Initial conditions for the simulation.\n \"\"\"\n\n initial_conditions: List[float] = None\n\n def create_grid(self, num_points: int, base_value: float = 0.0):\n \"\"\"Create a grid of num_points points with base_value(or 0).\n\n Args:\n num_points (int): Number of points.\n base_value (float): Base value for the grid. Defaults to 0.0.\n\n Returns:\n np.ndarray: Grid of num_points points with base_value. (1D)\n \"\"\"\n super().create_grid(num_points, base_value)\n\n @staticmethod\n def convolution(\n state_matrix: np.ndarray,\n parametric_matrix: np.ndarray,\n mode=\"same\",\n ):\n return convolve(\n state_matrix,\n parametric_matrix,\n mode=mode,\n )"
},
{
"identifier": "TwoDimSpatial",
"path": "fluidspylib/fluidspy/numerical/dim/dimension.py",
"snippet": "class TwoDimSpatial(Dimension):\n \"\"\"Two dimensional spatial dimension.\n\n Args:\n initial_conditions (List[List[float]]): Initial conditions for the simulation.\n \"\"\"\n\n initial_conditions: List[List[float]] = None\n\n def create_grid(self, num_points: Tuple[int, int], base_value: float = 0.0):\n \"\"\"Create a grid of i * j points with base_value.\n\n Args:\n num_points (Tuple[int, int]): Number of points in each dimension.\n base_value (float): Base value for the grid. Defaults to 0.0.\n\n Returns:\n np.ndarray: Grid of i * j points with base_value. (2D)\n \"\"\"\n super().create_grid(num_points, base_value)\n\n @staticmethod\n def convolution(\n state_matrix: np.ndarray,\n parametric_matrix: np.ndarray,\n mode=\"same\",\n boundary=\"wrap\",\n ):\n return convolve2d(\n state_matrix,\n parametric_matrix,\n mode=mode,\n boundary=boundary,\n fillvalue=0,\n )"
},
{
"identifier": "ThermalProperties",
"path": "fluidspylib/fluidspy/numerical/material_properties/material.py",
"snippet": "class ThermalProperties(MaterialProperties):\n \"\"\"Thermal properties.\n\n Args:\n name (str): Material name.\n density (float): Material density.(kg/m^3)\n specific_heat (float): Material specific heat.(J/kg.K)\n prandtl (float): Material Prandtl number.\n thermal_conductivity (float): Material thermal conductivity.(W/m.K)\n thermal_expansion_coefficient (float): Material thermal expansion coefficient.(1/K)\n \"\"\"\n\n thermal_conductivity: float\n thermal_expansion_coefficient: float"
},
{
"identifier": "FTCS",
"path": "fluidspylib/fluidspy/numerical/methods/finite_differential.py",
"snippet": "class FTCS(FiniteDifferentialMethod):\n F_o: Vector\n\n def __init__(\n self,\n state: SimulationState,\n dim: Dimension,\n properties: ThermalProperties,\n boundary_conditions: CompositeBoundary,\n step: Step,\n ):\n super().__init__(state, dim, properties, boundary_conditions, step)\n self.alpha = self.properties.thermal_expansion_coefficient\n self.get_fourier_number()\n self.stability()\n\n def get_fourier_number(self):\n self.F_o = Vector()\n self.F_o.x = self.alpha * self.step.time / (self.step.vec.x**2)\n self.F_o.y = self.alpha * self.step.time / (self.step.vec.y**2)\n self.F_o.z = self.alpha * self.step.time / (self.step.vec.z**2)\n\n def stability(self):\n if self.F_o > Vector(0.5, 0.5, 0.5):\n raise ValueError(\n \"The solution is unstable! Please choose a smaller time step.\"\n )\n\n def get_parametric(self):\n if self.state.get_dimension() == 1:\n return np.array([self.F_o.x, 1 - 2 * self.F_o.x, self.F_o.x])\n\n return np.array(\n [\n [0, self.F_o.y, 0],\n [self.F_o.x, -2 * (self.F_o.x + self.F_o.y), self.F_o.x],\n [0, self.F_o.y, 0],\n ]\n )"
},
{
"identifier": "SimulationState",
"path": "fluidspylib/fluidspy/numerical/state.py",
"snippet": "class SimulationState:\n state: np.ndarray | NoneType = None\n\n def get_state(self) -> np.ndarray:\n return self.state\n\n def set_state(self, value: np.ndarray):\n self.state = value\n\n def get_dimension(self):\n return self.state.ndim"
},
{
"identifier": "Step",
"path": "fluidspylib/fluidspy/numerical/step.py",
"snippet": "class Step:\n time: float\n vec: Vector\n\n def __init__(\n self,\n time: float,\n vec: Vector = Vector(),\n ):\n \"\"\"\n Create the time step and the spatial step.\n\n Args:\n time (float): The time step.\n vec (Vector): The spatial step. Defaults to (0, 0, 0).\n \"\"\"\n\n self.time = time\n self.vec = vec\n\n def __repr__(self) -> str:\n return f\"({self.time}, {self.vec})\""
},
{
"identifier": "Vector",
"path": "fluidspylib/fluidspy/numerical/step.py",
"snippet": "class Vector:\n x: float\n y: float\n z: float\n\n def __init__(self, x=inf, y=inf, z=inf) -> None:\n \"\"\"\n Create the spatial step.\n \"\"\"\n self.x = x\n self.y = y\n self.z = z\n\n def __repr__(self) -> str:\n return f\"({self.x}, {self.y}, {self.z})\""
}
] | import pytest
from ..numerical.boundary import Bottom
from ..numerical.boundary import CompositeBoundary
from ..numerical.boundary import Constant
from ..numerical.boundary import Insulated
from ..numerical.boundary import Left
from ..numerical.boundary import Right
from ..numerical.boundary import Top
from ..numerical.dim.dimension import OneDimSpatial
from ..numerical.dim.dimension import TwoDimSpatial
from ..numerical.material_properties import ThermalProperties
from ..numerical.methods.finite_differential import FTCS
from ..numerical.state import SimulationState
from ..numerical.step import Step
from ..numerical.step import Vector | 3,076 |
def create_state_dim(state, dim, shape):
dim = dim(state)
dim.create_grid(shape)
return dim
def test_ftcs():
state = SimulationState()
dim = create_state_dim(state, OneDimSpatial, 10)
boundary = CompositeBoundary(
Left(5, state, Constant()), Right(10, state, Insulated())
)
boundary.init_apply()
material = ThermalProperties("Copper", 8940, 385, 0.71, 401, 0.0016)
|
def create_state_dim(state, dim, shape):
dim = dim(state)
dim.create_grid(shape)
return dim
def test_ftcs():
state = SimulationState()
dim = create_state_dim(state, OneDimSpatial, 10)
boundary = CompositeBoundary(
Left(5, state, Constant()), Right(10, state, Insulated())
)
boundary.init_apply()
material = ThermalProperties("Copper", 8940, 385, 0.71, 401, 0.0016) | step = Step(0.1, Vector(0.1)) | 13 | 2023-10-21 06:55:58+00:00 | 4k |
jobless-devs/Jobhub | lambdas/packages/python/psycopg2/extras.py | [
{
"identifier": "PY2",
"path": "lambdas/packages/python/psycopg2/compat.py",
"snippet": "PY2 = True\nPY3 = False\nPY2 = False\nPY3 = True"
},
{
"identifier": "adapt",
"path": "lambdas/packages/python/psycopg2/extensions.py",
"snippet": "ISOLATION_LEVEL_AUTOCOMMIT = 0\nISOLATION_LEVEL_READ_UNCOMMITTED = 4\nISOLATION_LEVEL_READ_COMMITTED = 1\nISOLATION_LEVEL_REPEATABLE_READ = 2\nISOLATION_LEVEL_SERIALIZABLE = 3\nISOLATION_LEVEL_DEFAULT = None\nSTATUS_SETUP = 0\nSTATUS_READY = 1\nSTATUS_BEGIN = 2\nSTATUS_SYNC = 3 # currently unused\nSTATUS_ASYNC = 4 # currently unused\nSTATUS_PREPARED = 5\nSTATUS_IN_TRANSACTION = STATUS_BEGIN\nPOLL_OK = 0\nPOLL_READ = 1\nPOLL_WRITE = 2\nPOLL_ERROR = 3\nTRANSACTION_STATUS_IDLE = 0\nTRANSACTION_STATUS_ACTIVE = 1\nTRANSACTION_STATUS_INTRANS = 2\nTRANSACTION_STATUS_INERROR = 3\nTRANSACTION_STATUS_UNKNOWN = 4\n JSON, JSONARRAY = register_default_json()\n JSONB, JSONBARRAY = register_default_jsonb()\ndef register_adapter(typ, callable):\n def __init__(self, seq):\n def prepare(self, conn):\n def getquoted(self):\n def __str__(self):\n def __init__(self, obj):\n def getquoted(self, _null=b\"NULL\"):\ndef make_dsn(dsn=None, **kwargs):\ndef _param_escape(s,\n re_escape=_re.compile(r\"([\\\\'])\"),\n re_space=_re.compile(r'\\s')):\nclass SQL_IN(object):\nclass NoneAdapter(object):"
},
{
"identifier": "connection",
"path": "lambdas/packages/python/psycopg2/extensions.py",
"snippet": "ISOLATION_LEVEL_AUTOCOMMIT = 0\nISOLATION_LEVEL_READ_UNCOMMITTED = 4\nISOLATION_LEVEL_READ_COMMITTED = 1\nISOLATION_LEVEL_REPEATABLE_READ = 2\nISOLATION_LEVEL_SERIALIZABLE = 3\nISOLATION_LEVEL_DEFAULT = None\nSTATUS_SETUP = 0\nSTATUS_READY = 1\nSTATUS_BEGIN = 2\nSTATUS_SYNC = 3 # currently unused\nSTATUS_ASYNC = 4 # currently unused\nSTATUS_PREPARED = 5\nSTATUS_IN_TRANSACTION = STATUS_BEGIN\nPOLL_OK = 0\nPOLL_READ = 1\nPOLL_WRITE = 2\nPOLL_ERROR = 3\nTRANSACTION_STATUS_IDLE = 0\nTRANSACTION_STATUS_ACTIVE = 1\nTRANSACTION_STATUS_INTRANS = 2\nTRANSACTION_STATUS_INERROR = 3\nTRANSACTION_STATUS_UNKNOWN = 4\n JSON, JSONARRAY = register_default_json()\n JSONB, JSONBARRAY = register_default_jsonb()\ndef register_adapter(typ, callable):\n def __init__(self, seq):\n def prepare(self, conn):\n def getquoted(self):\n def __str__(self):\n def __init__(self, obj):\n def getquoted(self, _null=b\"NULL\"):\ndef make_dsn(dsn=None, **kwargs):\ndef _param_escape(s,\n re_escape=_re.compile(r\"([\\\\'])\"),\n re_space=_re.compile(r'\\s')):\nclass SQL_IN(object):\nclass NoneAdapter(object):"
},
{
"identifier": "cursor",
"path": "lambdas/packages/python/psycopg2/extensions.py",
"snippet": "ISOLATION_LEVEL_AUTOCOMMIT = 0\nISOLATION_LEVEL_READ_UNCOMMITTED = 4\nISOLATION_LEVEL_READ_COMMITTED = 1\nISOLATION_LEVEL_REPEATABLE_READ = 2\nISOLATION_LEVEL_SERIALIZABLE = 3\nISOLATION_LEVEL_DEFAULT = None\nSTATUS_SETUP = 0\nSTATUS_READY = 1\nSTATUS_BEGIN = 2\nSTATUS_SYNC = 3 # currently unused\nSTATUS_ASYNC = 4 # currently unused\nSTATUS_PREPARED = 5\nSTATUS_IN_TRANSACTION = STATUS_BEGIN\nPOLL_OK = 0\nPOLL_READ = 1\nPOLL_WRITE = 2\nPOLL_ERROR = 3\nTRANSACTION_STATUS_IDLE = 0\nTRANSACTION_STATUS_ACTIVE = 1\nTRANSACTION_STATUS_INTRANS = 2\nTRANSACTION_STATUS_INERROR = 3\nTRANSACTION_STATUS_UNKNOWN = 4\n JSON, JSONARRAY = register_default_json()\n JSONB, JSONBARRAY = register_default_jsonb()\ndef register_adapter(typ, callable):\n def __init__(self, seq):\n def prepare(self, conn):\n def getquoted(self):\n def __str__(self):\n def __init__(self, obj):\n def getquoted(self, _null=b\"NULL\"):\ndef make_dsn(dsn=None, **kwargs):\ndef _param_escape(s,\n re_escape=_re.compile(r\"([\\\\'])\"),\n re_space=_re.compile(r'\\s')):\nclass SQL_IN(object):\nclass NoneAdapter(object):"
}
] | import logging as _logging
import os as _os
import re as _re
import time as _time
import psycopg2
import uuid
import warnings
import select
from collections import namedtuple, OrderedDict
from psycopg2 import extensions as _ext
from psycopg2._ipaddress import register_ipaddress # noqa
from psycopg2._json import ( # noqa
json, Json, register_json, register_default_json, register_default_jsonb)
from psycopg2._psycopg import ( # noqa
REPLICATION_PHYSICAL, REPLICATION_LOGICAL,
ReplicationConnection as _replicationConnection,
ReplicationCursor as _replicationCursor,
ReplicationMessage)
from psycopg2._range import ( # noqa
Range, NumericRange, DateRange, DateTimeRange, DateTimeTZRange,
register_range, RangeAdapter, RangeCaster)
from .compat import PY2, PY3, lru_cache
from .extensions import adapt as _A, quote_ident
from .extensions import connection as _connection
from .extensions import cursor as _cursor
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
from psycopg2.sql import Composable | 2,715 | if self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
return res
def fetchall(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchall()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchall()
return res
def __iter__(self):
try:
if self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
yield first
while True:
yield next(res)
except StopIteration:
return
class DictConnection(_connection):
"""A connection that uses `DictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', self.cursor_factory or DictCursor)
return super(DictConnection, self).cursor(*args, **kwargs)
class DictCursor(DictCursorBase):
"""A cursor that keeps a list of column name -> index mappings."""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = DictRow
super(DictCursor, self).__init__(*args, **kwargs)
self._prefetch = True
def execute(self, query, vars=None):
self.index = OrderedDict()
self._query_executed = True
return super(DictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.index = OrderedDict()
self._query_executed = True
return super(DictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed and self.description:
for i in range(len(self.description)):
self.index[self.description[i][0]] = i
self._query_executed = False
class DictRow(list):
"""A row object that allow by-column-name access to data."""
__slots__ = ('_index',)
def __init__(self, cursor):
self._index = cursor.index
self[:] = [None] * len(cursor.description)
def __getitem__(self, x):
if not isinstance(x, (int, slice)):
x = self._index[x]
return super(DictRow, self).__getitem__(x)
def __setitem__(self, x, v):
if not isinstance(x, (int, slice)):
x = self._index[x]
super(DictRow, self).__setitem__(x, v)
def items(self):
g = super(DictRow, self).__getitem__
return ((n, g(self._index[n])) for n in self._index)
def keys(self):
return iter(self._index)
def values(self):
g = super(DictRow, self).__getitem__
return (g(self._index[n]) for n in self._index)
def get(self, x, default=None):
try:
return self[x]
except Exception:
return default
def copy(self):
return OrderedDict(self.items())
def __contains__(self, x):
return x in self._index
def __reduce__(self):
# this is apparently useless, but it fixes #1073
return super(DictRow, self).__reduce__()
def __getstate__(self):
return self[:], self._index.copy()
def __setstate__(self, data):
self[:] = data[0]
self._index = data[1]
| """Miscellaneous goodies for psycopg2
This module is a generic place used to hold little helper functions
and classes until a better place in the distribution is found.
"""
# psycopg/extras.py - miscellaneous extra goodies for psycopg
#
# Copyright (C) 2003-2019 Federico Di Gregorio <[email protected]>
# Copyright (C) 2020 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Expose ipaddress-related objects
# expose the json adaptation stuff into the module
# Expose range-related objects
class DictCursorBase(_cursor):
"""Base class for all dict-like cursors."""
def __init__(self, *args, **kwargs):
if 'row_factory' in kwargs:
row_factory = kwargs['row_factory']
del kwargs['row_factory']
else:
raise NotImplementedError(
"DictCursorBase can't be instantiated without a row factory.")
super(DictCursorBase, self).__init__(*args, **kwargs)
self._query_executed = False
self._prefetch = False
self.row_factory = row_factory
def fetchone(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchone()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchone()
return res
def fetchmany(self, size=None):
if self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
return res
def fetchall(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchall()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchall()
return res
def __iter__(self):
try:
if self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
yield first
while True:
yield next(res)
except StopIteration:
return
class DictConnection(_connection):
"""A connection that uses `DictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', self.cursor_factory or DictCursor)
return super(DictConnection, self).cursor(*args, **kwargs)
class DictCursor(DictCursorBase):
"""A cursor that keeps a list of column name -> index mappings."""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = DictRow
super(DictCursor, self).__init__(*args, **kwargs)
self._prefetch = True
def execute(self, query, vars=None):
self.index = OrderedDict()
self._query_executed = True
return super(DictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.index = OrderedDict()
self._query_executed = True
return super(DictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed and self.description:
for i in range(len(self.description)):
self.index[self.description[i][0]] = i
self._query_executed = False
class DictRow(list):
"""A row object that allow by-column-name access to data."""
__slots__ = ('_index',)
def __init__(self, cursor):
self._index = cursor.index
self[:] = [None] * len(cursor.description)
def __getitem__(self, x):
if not isinstance(x, (int, slice)):
x = self._index[x]
return super(DictRow, self).__getitem__(x)
def __setitem__(self, x, v):
if not isinstance(x, (int, slice)):
x = self._index[x]
super(DictRow, self).__setitem__(x, v)
def items(self):
g = super(DictRow, self).__getitem__
return ((n, g(self._index[n])) for n in self._index)
def keys(self):
return iter(self._index)
def values(self):
g = super(DictRow, self).__getitem__
return (g(self._index[n]) for n in self._index)
def get(self, x, default=None):
try:
return self[x]
except Exception:
return default
def copy(self):
return OrderedDict(self.items())
def __contains__(self, x):
return x in self._index
def __reduce__(self):
# this is apparently useless, but it fixes #1073
return super(DictRow, self).__reduce__()
def __getstate__(self):
return self[:], self._index.copy()
def __setstate__(self, data):
self[:] = data[0]
self._index = data[1]
| if PY2: | 0 | 2023-10-22 20:09:51+00:00 | 4k |
kyegomez/gradient-ascent | visualization.py | [
{
"identifier": "GradientAscent",
"path": "gradient_ascent/main.py",
"snippet": "class GradientAscent:\n \"\"\"\n Gradient Ascent Optimizer\n\n Optimizer that performs gradient ascent on the parameters of the model.\n\n Args:\n parameters (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): learning rate (default: 0.01)\n momentum (float, optional): momentum factor (default: 0.9)\n beta (float, optional): beta factor (default: 0.999)\n eps (float, optional): epsilon (default: 1e-8)\n nesterov (bool, optional): enables Nesterov accelerated gradient (default: False)\n clip_value (float, optional): gradient clipping value (default: None)\n lr_decay (float, optional): learning rate decay (default: None)\n warmup_steps (int, optional): warmup steps (default: 0)\n logging_interval (int, optional): logging interval (default: 10)\n\n\n Attributes:\n defaults (dict): default optimization options\n parameters (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float): learning rate\n momentum (float): momentum factor\n beta (float): beta factor\n eps (float): epsilon\n v (dict): momentum\n m (dict): adaptive learning rate\n\n Example:\n >>> optimizer = GradientAscent(model.parameters(), lr=0.01)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n\n \"\"\"\n\n def __init__(\n self,\n parameters,\n lr=0.01,\n momentum=0.9,\n beta=0.999,\n eps=1e-8,\n nesterov=False,\n clip_value=None,\n lr_decay=None,\n warmup_steps=0,\n logging_interval=10,\n ):\n self.parameters = list(parameters)\n self.lr = lr\n self.momentum = momentum\n self.beta = beta\n self.eps = eps\n # Nesterov accelerated gradient NAG => Provides a lookahead in the direction of the parameter updates => optimizer converge faster\n self.nesterov = nesterov\n # Gradient Clipping => Prevents exploding gradients\n self.clip_value = clip_value\n # Learning Rate Decay => Prevents oscillations\n self.lr_decay = lr_decay\n self.warmup_steps = warmup_steps\n self.logging_interval = logging_interval\n\n self.step_count = 0\n\n # Initalize momentum and adaptive learning rate\n self.v = {p: torch.zeros_like(p.data) for p in self.parameters}\n self.m = {p: torch.zeros_like(p.data) for p in self.parameters}\n\n def step(self):\n self.step_count += 1\n \"\"\"Step function for gradient ascent optimizer\"\"\"\n for param in self.parameters:\n try:\n if param.grad is not None:\n if self.clip_value:\n torch.nn.utils.clip_grad_value_(param.grad, self.clip_value)\n\n # Nesterov Accelerated Gradient\n if self.nesterov:\n grad = param.grad + self.momentum * self.v[param]\n else:\n grad = param.grad\n\n # Momentum\n self.v[param] = self.momentum * self.v[param] + grad\n\n # Adaptive learning rate\n self.m[param] = (\n self.beta * self.m[param] + (1 - self.beta) * grad**2\n )\n adapted_lr = self.lr / (torch.sqrt(self.m[param]) + self.eps)\n\n # Warmup Learning Rate\n if self.step_count <= self.warmup_steps:\n warmup_factor = self.step_count / float(self.warmup_steps)\n adapted_lr *= warmup_factor\n\n # Gradient Ascent\n param.data.add_(adapted_lr * self.v[param])\n\n # Learning Rate Decay\n if self.lr_decay:\n self.lr *= self.lr_decay\n\n if self.step_count % self.logging_interval == 0:\n print(\n f\"Step: {self.step_count}, Learning Rate: {self.lr}, Gradient Norm: {torch.norm(param.grad)}\"\n )\n\n except Exception as error:\n print(f\"Exception during optimization: {error}\")\n\n def zero_grad(self):\n \"\"\"Zero the gradient of the parameters\"\"\"\n for param in self.parameters:\n if param.grad is not None:\n param.grad.detach_()\n param.grad.zero_()"
},
{
"identifier": "GradientAscent",
"path": "gradient_ascent/main.py",
"snippet": "class GradientAscent:\n \"\"\"\n Gradient Ascent Optimizer\n\n Optimizer that performs gradient ascent on the parameters of the model.\n\n Args:\n parameters (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): learning rate (default: 0.01)\n momentum (float, optional): momentum factor (default: 0.9)\n beta (float, optional): beta factor (default: 0.999)\n eps (float, optional): epsilon (default: 1e-8)\n nesterov (bool, optional): enables Nesterov accelerated gradient (default: False)\n clip_value (float, optional): gradient clipping value (default: None)\n lr_decay (float, optional): learning rate decay (default: None)\n warmup_steps (int, optional): warmup steps (default: 0)\n logging_interval (int, optional): logging interval (default: 10)\n\n\n Attributes:\n defaults (dict): default optimization options\n parameters (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float): learning rate\n momentum (float): momentum factor\n beta (float): beta factor\n eps (float): epsilon\n v (dict): momentum\n m (dict): adaptive learning rate\n\n Example:\n >>> optimizer = GradientAscent(model.parameters(), lr=0.01)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n\n \"\"\"\n\n def __init__(\n self,\n parameters,\n lr=0.01,\n momentum=0.9,\n beta=0.999,\n eps=1e-8,\n nesterov=False,\n clip_value=None,\n lr_decay=None,\n warmup_steps=0,\n logging_interval=10,\n ):\n self.parameters = list(parameters)\n self.lr = lr\n self.momentum = momentum\n self.beta = beta\n self.eps = eps\n # Nesterov accelerated gradient NAG => Provides a lookahead in the direction of the parameter updates => optimizer converge faster\n self.nesterov = nesterov\n # Gradient Clipping => Prevents exploding gradients\n self.clip_value = clip_value\n # Learning Rate Decay => Prevents oscillations\n self.lr_decay = lr_decay\n self.warmup_steps = warmup_steps\n self.logging_interval = logging_interval\n\n self.step_count = 0\n\n # Initalize momentum and adaptive learning rate\n self.v = {p: torch.zeros_like(p.data) for p in self.parameters}\n self.m = {p: torch.zeros_like(p.data) for p in self.parameters}\n\n def step(self):\n self.step_count += 1\n \"\"\"Step function for gradient ascent optimizer\"\"\"\n for param in self.parameters:\n try:\n if param.grad is not None:\n if self.clip_value:\n torch.nn.utils.clip_grad_value_(param.grad, self.clip_value)\n\n # Nesterov Accelerated Gradient\n if self.nesterov:\n grad = param.grad + self.momentum * self.v[param]\n else:\n grad = param.grad\n\n # Momentum\n self.v[param] = self.momentum * self.v[param] + grad\n\n # Adaptive learning rate\n self.m[param] = (\n self.beta * self.m[param] + (1 - self.beta) * grad**2\n )\n adapted_lr = self.lr / (torch.sqrt(self.m[param]) + self.eps)\n\n # Warmup Learning Rate\n if self.step_count <= self.warmup_steps:\n warmup_factor = self.step_count / float(self.warmup_steps)\n adapted_lr *= warmup_factor\n\n # Gradient Ascent\n param.data.add_(adapted_lr * self.v[param])\n\n # Learning Rate Decay\n if self.lr_decay:\n self.lr *= self.lr_decay\n\n if self.step_count % self.logging_interval == 0:\n print(\n f\"Step: {self.step_count}, Learning Rate: {self.lr}, Gradient Norm: {torch.norm(param.grad)}\"\n )\n\n except Exception as error:\n print(f\"Exception during optimization: {error}\")\n\n def zero_grad(self):\n \"\"\"Zero the gradient of the parameters\"\"\"\n for param in self.parameters:\n if param.grad is not None:\n param.grad.detach_()\n param.grad.zero_()"
}
] | import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.animation import FuncAnimation
from gradient_ascent import GradientAscent
from gradient_ascent.main import GradientAscent | 2,636 |
class SimpleModel(torch.nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = torch.nn.Linear(1, 1)
def forward(self, x):
return self.fc(x)
# Set up real-time plotting
plt.ion() # Turn on interactive mode
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_xlim(0, 1000) # Assuming 1000 epochs
ax.set_ylim(0, 10) # Arbitrary y-axis limits for visualization purposes
ax.set_title("Model Output vs. Target during Training")
ax.set_xlabel("Epoch")
ax.set_ylabel("Value")
(line_output,) = ax.plot([], [], "r-", label="Model Output")
(line_target,) = ax.plot([], [], "g-", label="Target Value")
ax.legend()
# Initialization function for the animation
def init():
line_output.set_data([], [])
line_target.set_data([], [])
return line_output, line_target
def update(epoch, model_output_value):
x_data_output, y_data_output = line_output.get_data()
x_data_target, y_data_target = line_target.get_data()
# Convert numpy arrays to lists only if they aren't already lists
if not isinstance(x_data_output, list):
x_data_output = x_data_output.tolist()
if not isinstance(y_data_output, list):
y_data_output = y_data_output.tolist()
if not isinstance(x_data_target, list):
x_data_target = x_data_target.tolist()
if not isinstance(y_data_target, list):
y_data_target = y_data_target.tolist()
# Append new data
x_data_output.append(epoch)
y_data_output.append(model_output_value)
x_data_target.append(epoch)
y_data_target.append(target.item())
line_output.set_data(x_data_output, y_data_output)
line_target.set_data(x_data_target, y_data_target)
fig.canvas.flush_events()
return line_output, line_target
# Test the optimizer
model = SimpleModel()
# Define the optimizer
|
class SimpleModel(torch.nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = torch.nn.Linear(1, 1)
def forward(self, x):
return self.fc(x)
# Set up real-time plotting
plt.ion() # Turn on interactive mode
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_xlim(0, 1000) # Assuming 1000 epochs
ax.set_ylim(0, 10) # Arbitrary y-axis limits for visualization purposes
ax.set_title("Model Output vs. Target during Training")
ax.set_xlabel("Epoch")
ax.set_ylabel("Value")
(line_output,) = ax.plot([], [], "r-", label="Model Output")
(line_target,) = ax.plot([], [], "g-", label="Target Value")
ax.legend()
# Initialization function for the animation
def init():
line_output.set_data([], [])
line_target.set_data([], [])
return line_output, line_target
def update(epoch, model_output_value):
x_data_output, y_data_output = line_output.get_data()
x_data_target, y_data_target = line_target.get_data()
# Convert numpy arrays to lists only if they aren't already lists
if not isinstance(x_data_output, list):
x_data_output = x_data_output.tolist()
if not isinstance(y_data_output, list):
y_data_output = y_data_output.tolist()
if not isinstance(x_data_target, list):
x_data_target = x_data_target.tolist()
if not isinstance(y_data_target, list):
y_data_target = y_data_target.tolist()
# Append new data
x_data_output.append(epoch)
y_data_output.append(model_output_value)
x_data_target.append(epoch)
y_data_target.append(target.item())
line_output.set_data(x_data_output, y_data_output)
line_target.set_data(x_data_target, y_data_target)
fig.canvas.flush_events()
return line_output, line_target
# Test the optimizer
model = SimpleModel()
# Define the optimizer | optimizer = GradientAscent( | 1 | 2023-10-21 01:14:22+00:00 | 4k |
cfs-energy/cfspopcon | cfspopcon/formulas/scrape_off_layer_model/lambda_q.py | [
{
"identifier": "LambdaQScaling",
"path": "cfspopcon/named_options.py",
"snippet": "class LambdaQScaling(Enum):\n \"\"\"Options for heat flux decay length scaling.\"\"\"\n\n Brunner = auto()\n EichRegression14 = auto()\n EichRegression15 = auto()"
},
{
"identifier": "wraps_ufunc",
"path": "cfspopcon/unit_handling/decorator.py",
"snippet": "def wraps_ufunc( # noqa: PLR0915\n input_units: dict[str, Union[str, Unit, None]],\n return_units: dict[str, Union[str, Unit, None]],\n pass_as_kwargs: tuple = (),\n # kwargs for apply_ufunc\n input_core_dims: Optional[Sequence[Sequence]] = None,\n output_core_dims: Optional[Sequence[Sequence]] = ((),),\n exclude_dims: Set = frozenset(),\n vectorize: bool = True,\n join: str = \"exact\",\n dataset_join: str = \"exact\",\n keep_attrs: str = \"drop_conflicts\",\n dask: str = \"forbidden\",\n output_dtypes: Optional[Sequence] = None,\n output_sizes: Optional[Mapping[Any, int]] = None,\n dask_gufunc_kwargs: Optional[dict[str, Any]] = None,\n) -> FunctionType:\n \"\"\"Decorator for functions to add in unit and dimension handling.\n\n input_units and return_units must be provided, as dictionaries giving\n a mapping between the function arguments/returns and their units.\n\n pass_as_kwargs can be used to optionally declare that specific arguments\n should be pass directly into the function, rather than vectorized.\n\n The remaining arguments for the wrapper correspond to arguments for\n xr.apply_ufunc.\n https://docs.xarray.dev/en/stable/examples/apply_ufunc_vectorize_1d.html\n \"\"\"\n input_units = _check_units(input_units)\n return_units = _check_units(return_units)\n\n ufunc_kwargs: dict[str, Any] = dict(\n input_core_dims=input_core_dims,\n output_core_dims=output_core_dims,\n exclude_dims=exclude_dims,\n vectorize=vectorize,\n join=join,\n dataset_join=dataset_join,\n keep_attrs=keep_attrs,\n dask=dask,\n output_dtypes=output_dtypes,\n output_sizes=output_sizes,\n dask_gufunc_kwargs=dask_gufunc_kwargs,\n )\n input_keys = list(input_units.keys())\n\n if not isinstance(pass_as_kwargs, tuple):\n raise ValueError(f\"pass_as_kwargs must be passed as a tuple of keys, not {str(type(pass_as_kwargs))[1:-1]}\")\n\n pass_as_positional_args = [key for key in input_keys if key not in pass_as_kwargs]\n for arg in pass_as_kwargs:\n kwarg_position = input_keys.index(arg)\n if kwarg_position < len(pass_as_positional_args):\n raise ValueError(f\"Argument {arg} in pass_as_kwargs appears before the positional args {pass_as_positional_args}\")\n\n if input_core_dims is not None:\n if not len(input_core_dims) == len(pass_as_positional_args):\n raise ValueError(\n f\"input_core_dims (len {len(input_core_dims)}) must the same length as positional_args ({pass_as_positional_args}, len {len(pass_as_positional_args)})\"\n )\n else:\n input_core_dims = len(pass_as_positional_args) * [()]\n\n def _wraps_ufunc(func: FunctionType) -> FunctionType:\n\n func_signature = signature(func)\n func_parameters = func_signature.parameters\n\n if not list(input_units.keys()) == list(func_parameters.keys()):\n raise ValueError(\n f\"Keys for input_units {input_units.keys()} did not match func_parameters {func_parameters.keys()} (n.b. order matters!)\"\n )\n\n default_values = {key: val.default for key, val in func_parameters.items() if val.default is not Parameter.empty}\n\n @functools.wraps(func)\n def popcon_ufunc_wrapped_call(*args: Any, **kwargs: Any) -> Any: # noqa: PLR0912\n \"\"\"Transform args and kwargs, then call the inner function.\"\"\"\n # if anything goes wrong we can do some extra work to provide a better error below\n try:\n args_dict = dict(zip(input_keys, args))\n\n if not set(args_dict.keys()).isdisjoint(kwargs.keys()):\n raise RuntimeError(\n f\"{func.__name__} was called with repeat arguments. Input was interpreted as args={args_dict}, kwargs={kwargs}\"\n )\n\n args_dict = {**args_dict, **kwargs}\n args_dict = {**args_dict, **{key: val for key, val in default_values.items() if key not in args_dict.keys()}}\n\n args_dict = _return_magnitude_in_specified_units(args_dict, input_units)\n\n positional_args = []\n for i, key in enumerate(pass_as_positional_args):\n arg = args_dict[key]\n if not isinstance(arg, xr.DataArray):\n positional_args.append(xr.DataArray(arg).expand_dims(input_core_dims[i]))\n else:\n positional_args.append(arg)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", category=UnitStrippedWarning)\n function_return = xr.apply_ufunc(\n func,\n *positional_args,\n kwargs={key: args_dict[key] for key in pass_as_kwargs},\n **ufunc_kwargs,\n )\n\n if len(return_units) == 0:\n # Assume that the function return None\n return function_return.item()\n\n function_return = _convert_return_to_quantities(function_return, return_units)\n\n function_return = list(function_return.values())\n\n if len(function_return) > 1:\n return tuple(function_return)\n else:\n return function_return[0]\n\n except Exception as e:\n # the below checks if we are inside FunctionWrapper being called from another FunctionWrapper\n # if that is the case we try and give a more helpful error\n # if anything goes wrong in our frame inspection or we find that we aren't in a chained\n # call we raise the previous exception\n err = \"\"\n try:\n import inspect\n\n frames = inspect.getouterframes(inspect.currentframe())\n # the first entry is the current call so check if any of the earlier callees are a __call__ from a FunctionWrapper\n for frame in frames[1:]:\n if frame.function == \"popcon_ufunc_wrapped_call\":\n f = frames[1]\n err = \"Calling `wraps_ufunc` decorated function from within `wraps_ufunc` decorated function is not allowed!\\n\"\n err += f\"Error at {f.filename}:{f.lineno}\\n\"\n err += \"\\n\".join(f.code_context) if f.code_context else \"\"\n err += f\"Try using `{frames[0].frame.f_locals['func'].__name__}.unitless_func(...)` instead.\"\n break\n except Exception:\n # error while determining if we are withing a chained FunctionWrapper so re-raise original error\n raise e from None\n\n # if err is not empty we have determined we are within a chained call so we raise a better error\n if err:\n raise RuntimeError(err) from None\n else:\n raise e\n\n # more meaningfull alias to the scalar non-unit version of the function\n popcon_ufunc_wrapped_call.unitless_func = popcon_ufunc_wrapped_call.__wrapped__ # type:ignore[attr-defined]\n popcon_ufunc_wrapped_call.__signature__ = _make_new_sig(func_signature, input_units, return_units) # type:ignore[attr-defined]\n return popcon_ufunc_wrapped_call\n\n return _wraps_ufunc"
},
{
"identifier": "ureg",
"path": "cfspopcon/unit_handling/setup_unit_handling.py",
"snippet": "def suppress_downcast_warning(func: Callable[Params, Ret]) -> Callable[Params, Ret]:\n def wrapper(*args: Params.args, **kwargs: Params.kwargs) -> Ret:\ndef convert_units(array: xr.DataArray, units: Union[str, pint.Unit]) -> xr.DataArray:\ndef convert_units(array: pint.Quantity, units: Union[str, pint.Unit]) -> pint.Quantity:\ndef convert_units(array: Union[xr.DataArray, pint.Quantity], units: Any) -> Union[xr.DataArray, pint.Quantity]:\ndef magnitude(array: Union[xr.DataArray, pint.Quantity]) -> Union[npt.NDArray[np.float32], float]:\ndef dimensionless_magnitude(array: Union[xr.DataArray, pint.Quantity]) -> Union[npt.NDArray[np.float32], float]:"
}
] | from ...named_options import LambdaQScaling
from ...unit_handling import ureg, wraps_ufunc | 2,160 | """Routines to calculate the heat flux decay length (lambda_q), for several different scalings."""
@wraps_ufunc(
return_units=dict(lambda_q=ureg.millimeter),
input_units=dict(
lambda_q_scaling=None,
average_total_pressure=ureg.atm,
power_crossing_separatrix=ureg.megawatt,
major_radius=ureg.meter,
B_pol_omp=ureg.tesla,
inverse_aspect_ratio=ureg.dimensionless,
),
)
def calc_lambda_q(
| """Routines to calculate the heat flux decay length (lambda_q), for several different scalings."""
@wraps_ufunc(
return_units=dict(lambda_q=ureg.millimeter),
input_units=dict(
lambda_q_scaling=None,
average_total_pressure=ureg.atm,
power_crossing_separatrix=ureg.megawatt,
major_radius=ureg.meter,
B_pol_omp=ureg.tesla,
inverse_aspect_ratio=ureg.dimensionless,
),
)
def calc_lambda_q( | lambda_q_scaling: LambdaQScaling, | 0 | 2023-10-19 16:58:23+00:00 | 4k |
GXimingLu/IPA | policy_gp3.py | [
{
"identifier": "ConstrainedHypothesis",
"path": "lexical_constraints.py",
"snippet": "class ConstrainedHypothesis:\n\n def __init__(self,\n constraint_list: List[List[List[int]]],\n eos_tokens: List[int]) -> None:\n self.clauses = []\n for idx, clause in enumerate(constraint_list):\n self.clauses.append(Clause(idx=idx, phrases=clause))\n self.eos_tokens = eos_tokens\n\n def __len__(self) -> int:\n \"\"\"\n :return: The number of constraints.\n \"\"\"\n return len(self.clauses)\n\n def __str__(self) -> str:\n return '\\n'.join([str(c) for c in self.clauses])\n\n def num_met(self) -> int:\n \"\"\"\n :return: the number of constraints that have been met.\n \"\"\"\n return sum([int(c.satisfy) for c in self.clauses])\n\n def advance(self, word_id: int) -> 'ConstrainedHypothesis':\n obj = pickle.loads(pickle.dumps(self))\n\n for clause in obj.clauses:\n if clause.satisfy:\n continue\n clause.advance(word_id)\n\n return obj\n\n def avoid(self) -> Set[int]:\n \"\"\"\n :return: the tokens to avoid for next generation\n \"\"\"\n allowed_token, avoid_token = set(), set()\n unsatisfied_clauses = [c for c in self.clauses if not c.satisfy]\n sorted_clauses = sorted(unsatisfied_clauses, key=lambda x: x.idx)\n\n for j, clause in enumerate(sorted_clauses):\n assert not clause.satisfy\n for literal in clause.literals:\n assert literal.pointer < len(literal.tokens) - 1 and not literal.satisfy\n tokens = {literal.tokens[literal.pointer + 1], literal.tokens[0]}\n if j == 0:\n allowed_token.update(tokens)\n else:\n avoid_token.update(tokens)\n\n negative_token = {t for t in avoid_token if t not in allowed_token}\n\n if self.eos_tokens is not None and not all(c.satisfy for c in self.clauses):\n negative_token.update(self.eos_tokens)\n return negative_token"
},
{
"identifier": "init_batch",
"path": "lexical_constraints.py",
"snippet": "def init_batch(raw_constraints: List[List[List[List[int]]]],\n eos_tokens: List[int]) -> List[Optional[ConstrainedHypothesis]]:\n \"\"\"\n :param raw_constraints: The list of clause constraints.\n :param beam_size: The beam size.\n :param eos_id: The target-language vocabulary ID of the EOS symbol.\n :param ordered: Whether enforce constraints to be satisfied in given order\n :return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).\n \"\"\"\n constraints_list = [None] * len(raw_constraints) # type: List[Optional[ConstrainedHypothesis]]\n for i, raw_list in enumerate(raw_constraints):\n constraints_list[i] = ConstrainedHypothesis(raw_list, eos_tokens)\n return constraints_list"
},
{
"identifier": "NEGATIVE_INF",
"path": "utils/constants.py",
"snippet": "NEGATIVE_INF = -100000.0"
},
{
"identifier": "OPENAI_API_KEY",
"path": "utils/constants.py",
"snippet": "OPENAI_API_KEY = 'YOUR_OPENAI_KEY'"
},
{
"identifier": "process_generation",
"path": "utils/utils.py",
"snippet": "def process_generation(text):\n for eos in ['.', '!']:\n parts = text.split(eos)\n if len(parts) >= 2:\n text = text.split(eos)[0] + eos\n return text"
},
{
"identifier": "add_control_code",
"path": "utils/generation_utils.py",
"snippet": "def add_control_code(input_ids, attention_mask, control_code):\n input_ids = torch.cat([input_ids.new([control_code] * len(input_ids))[:, None], input_ids], dim=1)\n attention_mask = torch.cat([attention_mask.new([1] * len(attention_mask))[:, None], attention_mask], dim=1)\n return input_ids, attention_mask"
},
{
"identifier": "get_model_output",
"path": "utils/generation_utils.py",
"snippet": "def get_model_output(model, step, input_ids, attention_mask, model_kwargs):\n # prepare model inputs\n batch_size, _ = input_ids.shape\n model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)\n\n # forward pass to get next token\n outputs = model(\n **model_inputs,\n return_dict=True,\n output_attentions=False,\n output_hidden_states=False,\n )\n\n # in the first decoding step, we want to use the 'real' last position for each sentence\n if step == 0:\n last_non_masked_idx = torch.sum(attention_mask, dim=1) - 1\n next_token_logits = outputs.logits[range(batch_size), last_non_masked_idx, :]\n else:\n next_token_logits = outputs.logits[:, -1, :]\n\n return outputs, next_token_logits"
}
] | import torch
import torch.nn.functional as F
import json
import numpy as np
import openai
from typing import Union, List, Dict
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from lexical_constraints import ConstrainedHypothesis, init_batch
from utils.constants import NEGATIVE_INF, OPENAI_API_KEY
from utils.utils import process_generation
from utils.generation_utils import add_control_code, get_model_output | 2,234 |
openai.api_key = OPENAI_API_KEY
class Policy:
def __init__(self, value_model_name, value_model_checkpoint, device, tree_tokens, alpha, force_eos):
self.device = device
self.value_model = GPT2LMHeadModel.from_pretrained(value_model_name)
self.tokenizer = GPT2Tokenizer.from_pretrained(value_model_name, pad_token="<|endoftext|>")
self.value_model.config.pad_token_id = self.tokenizer.pad_token_id
self.tokenizer.add_tokens(tree_tokens, special_tokens=True)
self.value_model.resize_token_embeddings(len(self.tokenizer))
self.value_model.load_state_dict(value_model_checkpoint)
self.value_model = self.value_model.to(self.device)
self.value_model.parallelize()
self.best_cat = tree_tokens[0]
self.best_cat_id = self.tokenizer.convert_tokens_to_ids(self.best_cat)
self.alpha = alpha
self.eos_tokens = None
if force_eos:
self.eos_tokens = self.tokenizer.convert_tokens_to_ids(['.', 'Ġ.', '!', 'Ġ!'])
def request(self, queries: List[str], model='text-davinci-003'):
# Retry request (handles connection errors, timeouts, and overloaded API)
while True:
try:
return openai.Completion.create(
engine=model,
prompt=queries,
max_tokens=1, # get logits for next token
logprobs=100, # max tokens allowable
n=1,
echo=True,
)
except Exception as e:
print(str(e))
print("Retrying...")
def get_gpt3_logits(self, input_ids):
queries = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True)
response = self.request(queries)
response_logits = [choice['logprobs']['top_logprobs'] for choice in response['choices']]
gpt3_logits = -50000.0 * torch.ones([len(queries), len(self.tokenizer)], dtype=torch.float32).to(self.device)
for i in range(len(queries)):
response_dict = response_logits[i][-1] # get 0 index predictions
for token, logit in response_dict.items():
token_idx = self.tokenizer.convert_tokens_to_ids(token.replace(' ', 'Ġ').replace('\n', 'Ċ'))
if token != '<|endoftext|>' and token_idx == 50256:
continue
gpt3_logits[i, token_idx] = logit
return gpt3_logits
def sample(self,
prompts: Union[str, List[str]] = None,
input_ids: torch.Tensor = None,
attention_mask: torch.Tensor = None,
constraints: List[ConstrainedHypothesis] = None,
max_len: int = 64,
min_len: int = 16,
sample: bool = True,
top_k: int = None,
top_p: float = None,
temperature: float = None,
use_control_code: bool = False) -> Dict[str, Union[torch.Tensor, List[str]]]:
use_constraints = constraints is not None
if use_constraints:
constraints = init_batch([json.loads(x) for x in constraints], self.eos_tokens)
if prompts is not None:
assert input_ids is None and attention_mask is None, 'repeated input'
if isinstance(prompts, str):
prompts = [prompts]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids'].to(self.device)
attention_mask = encodings_dict['attention_mask'].to(self.device)
else:
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
batch_size, input_seq_len = input_ids.shape
value_input_ids, value_attention_mask = add_control_code(input_ids, attention_mask, self.best_cat_id)
value_model_kwargs = {'attention_mask': value_attention_mask}
logits_warper = self.value_model._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature, num_beams=1
)
unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=self.device)
output_logprob = torch.zeros([batch_size, 0], dtype=torch.float, device=self.device)
output_mask = torch.ones([batch_size, 0], dtype=torch.long, device=self.device)
self.value_model.eval()
with torch.no_grad():
for step in range(max_len):
next_token_logits = self.get_gpt3_logits(input_ids)
# get logit from value model
if use_control_code:
|
openai.api_key = OPENAI_API_KEY
class Policy:
def __init__(self, value_model_name, value_model_checkpoint, device, tree_tokens, alpha, force_eos):
self.device = device
self.value_model = GPT2LMHeadModel.from_pretrained(value_model_name)
self.tokenizer = GPT2Tokenizer.from_pretrained(value_model_name, pad_token="<|endoftext|>")
self.value_model.config.pad_token_id = self.tokenizer.pad_token_id
self.tokenizer.add_tokens(tree_tokens, special_tokens=True)
self.value_model.resize_token_embeddings(len(self.tokenizer))
self.value_model.load_state_dict(value_model_checkpoint)
self.value_model = self.value_model.to(self.device)
self.value_model.parallelize()
self.best_cat = tree_tokens[0]
self.best_cat_id = self.tokenizer.convert_tokens_to_ids(self.best_cat)
self.alpha = alpha
self.eos_tokens = None
if force_eos:
self.eos_tokens = self.tokenizer.convert_tokens_to_ids(['.', 'Ġ.', '!', 'Ġ!'])
def request(self, queries: List[str], model='text-davinci-003'):
# Retry request (handles connection errors, timeouts, and overloaded API)
while True:
try:
return openai.Completion.create(
engine=model,
prompt=queries,
max_tokens=1, # get logits for next token
logprobs=100, # max tokens allowable
n=1,
echo=True,
)
except Exception as e:
print(str(e))
print("Retrying...")
def get_gpt3_logits(self, input_ids):
queries = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True)
response = self.request(queries)
response_logits = [choice['logprobs']['top_logprobs'] for choice in response['choices']]
gpt3_logits = -50000.0 * torch.ones([len(queries), len(self.tokenizer)], dtype=torch.float32).to(self.device)
for i in range(len(queries)):
response_dict = response_logits[i][-1] # get 0 index predictions
for token, logit in response_dict.items():
token_idx = self.tokenizer.convert_tokens_to_ids(token.replace(' ', 'Ġ').replace('\n', 'Ċ'))
if token != '<|endoftext|>' and token_idx == 50256:
continue
gpt3_logits[i, token_idx] = logit
return gpt3_logits
def sample(self,
prompts: Union[str, List[str]] = None,
input_ids: torch.Tensor = None,
attention_mask: torch.Tensor = None,
constraints: List[ConstrainedHypothesis] = None,
max_len: int = 64,
min_len: int = 16,
sample: bool = True,
top_k: int = None,
top_p: float = None,
temperature: float = None,
use_control_code: bool = False) -> Dict[str, Union[torch.Tensor, List[str]]]:
use_constraints = constraints is not None
if use_constraints:
constraints = init_batch([json.loads(x) for x in constraints], self.eos_tokens)
if prompts is not None:
assert input_ids is None and attention_mask is None, 'repeated input'
if isinstance(prompts, str):
prompts = [prompts]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids'].to(self.device)
attention_mask = encodings_dict['attention_mask'].to(self.device)
else:
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
batch_size, input_seq_len = input_ids.shape
value_input_ids, value_attention_mask = add_control_code(input_ids, attention_mask, self.best_cat_id)
value_model_kwargs = {'attention_mask': value_attention_mask}
logits_warper = self.value_model._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature, num_beams=1
)
unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=self.device)
output_logprob = torch.zeros([batch_size, 0], dtype=torch.float, device=self.device)
output_mask = torch.ones([batch_size, 0], dtype=torch.long, device=self.device)
self.value_model.eval()
with torch.no_grad():
for step in range(max_len):
next_token_logits = self.get_gpt3_logits(input_ids)
# get logit from value model
if use_control_code: | value_outputs, value_next_token_logits = get_model_output(self.value_model, step, value_input_ids, | 6 | 2023-10-20 08:30:18+00:00 | 4k |
yifei-he/GOAT | experiments.py | [
{
"identifier": "ot_ablation",
"path": "ot_util.py",
"snippet": "def ot_ablation(size, mode):\n ns, nt = size, size\n plan = np.zeros((ns, nt))\n ran = np.arange(ns*nt)\n np.random.shuffle(ran)\n idx = ran[:size]\n\n for i in idx:\n row = i // nt\n col = i-i//nt * nt\n if mode == \"random\":\n plan[row, col] = np.random.uniform()\n elif mode == \"uniform\":\n plan[row, col] = 1\n \n plan /= np.sum(plan, 1, keepdims=True)\n plan[~ np.isfinite(plan)] = 0\n\n return plan"
},
{
"identifier": "generate_domains",
"path": "ot_util.py",
"snippet": "def generate_domains(n_inter, dataset_s, dataset_t, plan=None, entry_cutoff=0, conf=0):\n print(\"------------Generate Intermediate domains----------\")\n all_domains = []\n \n xs, xt = dataset_s.data, dataset_t.data\n ys = dataset_s.targets\n\n if plan is None:\n if len(xs.shape) > 2:\n xs_flat, xt_flat = nn.Flatten()(xs), nn.Flatten()(xt)\n plan = get_OT_plan(xs_flat, xt_flat, solver='emd', entry_cutoff=entry_cutoff)\n else:\n plan = get_OT_plan(xs, xt, solver='emd', entry_cutoff=entry_cutoff)\n\n logits_t = get_transported_labels(plan, ys, logit=True)\n yt_hat, conf_idx = get_conf_idx(logits_t, confidence_q=conf)\n xt = xt[conf_idx]\n plan = plan[:, conf_idx]\n yt_hat = yt_hat[conf_idx]\n\n print(f\"Remaining data after confidence filter: {len(conf_idx)}\")\n\n for i in range(1, n_inter+1):\n x, weights = pushforward(xs, xt, plan, i / (n_inter+1))\n if isinstance(x, np.ndarray):\n all_domains.append(DomainDataset(torch.from_numpy(x).float(), weights))\n else:\n all_domains.append(DomainDataset(x, weights))\n all_domains.append(dataset_t)\n\n print(f\"Total data for each intermediate domain: {len(x)}\")\n\n return all_domains"
}
] | import torch
import torch.optim as optim
import copy
import argparse
import random
import torch.backends.cudnn as cudnn
import time
from model import *
from train_model import *
from util import *
from ot_util import ot_ablation
from da_algo import *
from ot_util import generate_domains
from dataset import * | 1,769 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True):
print("Start training source model")
model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
for epoch in range(1, epochs+1):
train(epoch, trainloader, model, optimizer, verbose=verbose)
if epoch % 5 == 0:
test(testloader, model, verbose=verbose)
return model
def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10):
# get the performance of direct adaptation from the source to target, st involves self-training on target
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs)
# get the performance of GST from the source to target, st involves self-training on target
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs)
# encode the source and target domains
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
# encode the intermediate ground-truth domains
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# generate intermediate domains
generated_acc = 0
if generated_domains > 0:
all_domains = []
for i in range(len(encoded_intersets)-1):
all_domains += generate_domains(generated_domains, encoded_intersets[i], encoded_intersets[i+1])
_, generated_acc = self_train(args, source_model.mlp, all_domains, epochs=epochs)
return direct_acc, st_acc, direct_acc_all, st_acc_all, generated_acc
def run_mnist_experiment(target, gt_domains, generated_domains):
t = time.time()
src_trainset, tgt_trainset = get_single_rotate(False, 0), get_single_rotate(False, target)
encoder = ENCODER().to(device)
source_model = get_source_model(args, src_trainset, src_trainset, 10, "mnist", encoder=encoder, epochs=5)
model_copy = copy.deepcopy(source_model)
all_sets = []
for i in range(1, gt_domains+1):
all_sets.append(get_single_rotate(False, i*target//(gt_domains+1)))
print(i*target//(gt_domains+1))
all_sets.append(tgt_trainset)
direct_acc, st_acc, direct_acc_all, st_acc_all, generated_acc = run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=5)
elapsed = round(time.time() - t, 2)
print(elapsed)
with open(f"logs/mnist_{target}_{gt_domains}_layer.txt", "a") as f:
f.write(f"seed{args.seed}with{gt_domains}gt{generated_domains}generated,{round(direct_acc, 2)},{round(st_acc, 2)},{round(direct_acc_all, 2)},{round(st_acc_all, 2)},{round(generated_acc, 2)}\n")
def run_mnist_ablation(target, gt_domains, generated_domains):
encoder = ENCODER().to(device)
src_trainset, tgt_trainset = get_single_rotate(False, 0), get_single_rotate(False, target)
source_model = get_source_model(args, src_trainset, src_trainset, 10, "mnist", encoder=encoder, epochs=20)
model_copy = copy.deepcopy(source_model)
all_sets = []
for i in range(1, gt_domains+1):
all_sets.append(get_single_rotate(False, i*target//(gt_domains+1)))
print(i*target//(gt_domains+1))
all_sets.append(tgt_trainset)
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=10)
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=10)
model_copy1 = copy.deepcopy(source_model)
model_copy2 = copy.deepcopy(source_model)
model_copy3 = copy.deepcopy(source_model)
model_copy4 = copy.deepcopy(source_model)
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# random plan
all_domains1 = []
for i in range(len(encoded_intersets)-1):
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True):
print("Start training source model")
model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
for epoch in range(1, epochs+1):
train(epoch, trainloader, model, optimizer, verbose=verbose)
if epoch % 5 == 0:
test(testloader, model, verbose=verbose)
return model
def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10):
# get the performance of direct adaptation from the source to target, st involves self-training on target
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs)
# get the performance of GST from the source to target, st involves self-training on target
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs)
# encode the source and target domains
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
# encode the intermediate ground-truth domains
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# generate intermediate domains
generated_acc = 0
if generated_domains > 0:
all_domains = []
for i in range(len(encoded_intersets)-1):
all_domains += generate_domains(generated_domains, encoded_intersets[i], encoded_intersets[i+1])
_, generated_acc = self_train(args, source_model.mlp, all_domains, epochs=epochs)
return direct_acc, st_acc, direct_acc_all, st_acc_all, generated_acc
def run_mnist_experiment(target, gt_domains, generated_domains):
t = time.time()
src_trainset, tgt_trainset = get_single_rotate(False, 0), get_single_rotate(False, target)
encoder = ENCODER().to(device)
source_model = get_source_model(args, src_trainset, src_trainset, 10, "mnist", encoder=encoder, epochs=5)
model_copy = copy.deepcopy(source_model)
all_sets = []
for i in range(1, gt_domains+1):
all_sets.append(get_single_rotate(False, i*target//(gt_domains+1)))
print(i*target//(gt_domains+1))
all_sets.append(tgt_trainset)
direct_acc, st_acc, direct_acc_all, st_acc_all, generated_acc = run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=5)
elapsed = round(time.time() - t, 2)
print(elapsed)
with open(f"logs/mnist_{target}_{gt_domains}_layer.txt", "a") as f:
f.write(f"seed{args.seed}with{gt_domains}gt{generated_domains}generated,{round(direct_acc, 2)},{round(st_acc, 2)},{round(direct_acc_all, 2)},{round(st_acc_all, 2)},{round(generated_acc, 2)}\n")
def run_mnist_ablation(target, gt_domains, generated_domains):
encoder = ENCODER().to(device)
src_trainset, tgt_trainset = get_single_rotate(False, 0), get_single_rotate(False, target)
source_model = get_source_model(args, src_trainset, src_trainset, 10, "mnist", encoder=encoder, epochs=20)
model_copy = copy.deepcopy(source_model)
all_sets = []
for i in range(1, gt_domains+1):
all_sets.append(get_single_rotate(False, i*target//(gt_domains+1)))
print(i*target//(gt_domains+1))
all_sets.append(tgt_trainset)
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=10)
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=10)
model_copy1 = copy.deepcopy(source_model)
model_copy2 = copy.deepcopy(source_model)
model_copy3 = copy.deepcopy(source_model)
model_copy4 = copy.deepcopy(source_model)
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# random plan
all_domains1 = []
for i in range(len(encoded_intersets)-1): | plan = ot_ablation(len(src_trainset), "random") | 0 | 2023-10-20 16:41:00+00:00 | 4k |
ansible/django-ansible-base | ansible_base/tests/unit/utils/test_validation.py | [
{
"identifier": "to_python_boolean",
"path": "ansible_base/utils/validation.py",
"snippet": "def to_python_boolean(value, allow_none=False):\n value = str(value)\n if value.lower() in ('true', '1', 't'):\n return True\n elif value.lower() in ('false', '0', 'f'):\n return False\n elif allow_none and (value is None or value.lower() in ('none', 'null')):\n return None\n else:\n raise ValueError(_(u'Unable to convert \"%s\" to boolean') % value)"
},
{
"identifier": "validate_cert_with_key",
"path": "ansible_base/utils/validation.py",
"snippet": "def validate_cert_with_key(public_cert_string, private_key_string):\n # Returns:\n # None if one of the parameters wasn't set\n # False if we failed to load an item (should be pre-tried by your serializer)\n # A ValidationError exception if the key/value don't match\n # True if everything checks out\n\n if not private_key_string or not public_cert_string:\n return None\n\n private_key = None\n public_cert = None\n try:\n private_key = serialization.load_pem_private_key(bytes(private_key_string, \"UTF-8\"), password=None)\n public_cert = load_pem_x509_certificate(bytes(public_cert_string, \"UTF-8\"))\n except Exception:\n return False\n\n try:\n # We have both pieces of the puzzle, lets make sure they interlock\n private_key.public_key().verify(\n public_cert.signature,\n public_cert.tbs_certificate_bytes,\n # Depends on the algorithm used to create the certificate\n padding.PKCS1v15(),\n public_cert.signature_hash_algorithm,\n )\n except InvalidSignature:\n raise ValidationError(_(\"The certificate and private key do not match\"))\n except Exception as e:\n error = _(\"Unable to validate SP cert and key\")\n if hasattr(e, 'message'):\n error = f\"{error}: {e.message}\"\n else:\n error = f\"{error}: {e.__class__.__name__}\"\n raise ValidationError(error)\n\n return True"
},
{
"identifier": "validate_image_data",
"path": "ansible_base/utils/validation.py",
"snippet": "def validate_image_data(data: str) -> None:\n # in case we are passed an empty string, we can skip validation\n if not data:\n return None\n\n CUSTOM_LOGO_RE = re.compile(r'^data:image/(?:png|jpeg|gif);base64,([A-Za-z0-9+/=]+?)$')\n\n match = CUSTOM_LOGO_RE.match(data)\n if not match:\n raise ValidationError(_(\"Invalid format for custom logo. Must be a data URL with a base64-encoded GIF, PNG or JPEG image.\"))\n b64data = match.group(1)\n try:\n base64.b64decode(b64data)\n except (TypeError, binascii.Error):\n raise ValidationError(_(\"Invalid base64-encoded data in data URL.\"))"
},
{
"identifier": "validate_url",
"path": "ansible_base/utils/validation.py",
"snippet": "def validate_url(url: str, schemes: list = ['https'], allow_plain_hostname: bool = False) -> None:\n if type(url) is not str:\n raise ValidationError(VALID_STRING)\n if allow_plain_hostname:\n # The default validator will not allow names like https://junk so, if we are ok with simple hostnames we are going to munge up the URL for the validator\n try:\n url_parts = urlparse(url)\n except ValueError as e:\n raise ValidationError(str(e)) from e\n\n # Determine the user_info part of the URL\n user_info = ''\n if url_parts.username:\n user_info = url_parts.username\n if url_parts.password:\n user_info = f'{user_info}:{url_parts.password}'\n if user_info:\n user_info = f\"{user_info}@\"\n\n if url_parts.hostname and '.' not in url_parts.hostname:\n hostname = f'{url_parts.hostname}.localhost'\n port = f':{url_parts.port}' if url_parts.port else ''\n netloc = f\"{user_info}{hostname}{port}\"\n # Reconstruct and override the URL with a valid hostname\n url = urlunsplit([url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment])\n\n validator = URLValidator(schemes=schemes)\n try:\n validator(url)\n except LowLevelValidationError as e:\n raise ValidationError(e.message)"
}
] | import pytest
from rest_framework.exceptions import ValidationError
from ansible_base.utils.validation import to_python_boolean, validate_cert_with_key, validate_image_data, validate_url | 1,721 |
@pytest.mark.parametrize(
"valid,url,schemes,allow_plain_hostname",
[
(False, 4, [], True),
(False, "https://example", ['https'], False),
(True, "https://example", ['https'], True),
(True, "https://somedomain.example.com/sso/complete/saml/", ['https'], True),
(False, "https://somedomain.example.com/sso/complete/saml/", ['ldaps'], True),
(True, "ldaps://somedomain.example.com/sso/complete/saml/", ['ldaps'], True),
(False, "https://somedomain.[obfuscated.domain]/sso/complete/saml/", ['https'], True),
],
)
def test_validate_bad_urls(valid, url, schemes, allow_plain_hostname):
exception = None
try:
validate_url(url, schemes=schemes, allow_plain_hostname=allow_plain_hostname)
except ValidationError as e:
exception = e
if valid and exception:
assert False, f"Configuration should have been valid but got exception: {exception}"
elif not valid and not exception:
assert False, "Expected an exception but test passed"
@pytest.mark.parametrize(
"cert, key",
[
(False, False),
(None, None),
(None, False),
(False, None),
("", ""),
("", None),
(None, ""),
("", "asdf"),
("asdf", ""),
("asdf", None),
(None, "asdf"),
],
)
def test_validate_cert_with_key_falsy_param(cert, key):
"""
Ensure that validate_cert_with_key returns None when passed falsy values.
"""
assert validate_cert_with_key(cert, key) is None
@pytest.mark.parametrize(
"cert, key",
[
("asdf", "asdf"),
# In the below, None, means use the value from the fixture
(None, "asdf"),
("asdf", None),
],
)
def test_validate_cert_with_key_invalid_params(rsa_keypair_with_cert, cert, key):
"""
Ensure that validate_cert_with_key is False when it fails to load a cert or key.
"""
if cert is None:
cert = rsa_keypair_with_cert.certificate
if key is None:
key = rsa_keypair_with_cert.private
assert validate_cert_with_key(cert, key) is False
def test_validate_cert_with_key_mismatch(rsa_keypair_with_cert_1, rsa_keypair_with_cert_2):
"""
Ensure that validate_cert_with_key raises a ValidationError when the cert and key don't match.
"""
with pytest.raises(ValidationError) as e:
validate_cert_with_key(rsa_keypair_with_cert_1.certificate, rsa_keypair_with_cert_2.private)
assert "The certificate and private key do not match" in str(e.value)
def test_validate_image_data_with_valid_data():
"""
Ensure that validate_image_data accepts valid data.
"""
image_data = "data:image/gif;base64,R0lGODlhAQABAIABAP///wAAACwAAAAAAQABAAACAkQBADs="
|
@pytest.mark.parametrize(
"valid,url,schemes,allow_plain_hostname",
[
(False, 4, [], True),
(False, "https://example", ['https'], False),
(True, "https://example", ['https'], True),
(True, "https://somedomain.example.com/sso/complete/saml/", ['https'], True),
(False, "https://somedomain.example.com/sso/complete/saml/", ['ldaps'], True),
(True, "ldaps://somedomain.example.com/sso/complete/saml/", ['ldaps'], True),
(False, "https://somedomain.[obfuscated.domain]/sso/complete/saml/", ['https'], True),
],
)
def test_validate_bad_urls(valid, url, schemes, allow_plain_hostname):
exception = None
try:
validate_url(url, schemes=schemes, allow_plain_hostname=allow_plain_hostname)
except ValidationError as e:
exception = e
if valid and exception:
assert False, f"Configuration should have been valid but got exception: {exception}"
elif not valid and not exception:
assert False, "Expected an exception but test passed"
@pytest.mark.parametrize(
"cert, key",
[
(False, False),
(None, None),
(None, False),
(False, None),
("", ""),
("", None),
(None, ""),
("", "asdf"),
("asdf", ""),
("asdf", None),
(None, "asdf"),
],
)
def test_validate_cert_with_key_falsy_param(cert, key):
"""
Ensure that validate_cert_with_key returns None when passed falsy values.
"""
assert validate_cert_with_key(cert, key) is None
@pytest.mark.parametrize(
"cert, key",
[
("asdf", "asdf"),
# In the below, None, means use the value from the fixture
(None, "asdf"),
("asdf", None),
],
)
def test_validate_cert_with_key_invalid_params(rsa_keypair_with_cert, cert, key):
"""
Ensure that validate_cert_with_key is False when it fails to load a cert or key.
"""
if cert is None:
cert = rsa_keypair_with_cert.certificate
if key is None:
key = rsa_keypair_with_cert.private
assert validate_cert_with_key(cert, key) is False
def test_validate_cert_with_key_mismatch(rsa_keypair_with_cert_1, rsa_keypair_with_cert_2):
"""
Ensure that validate_cert_with_key raises a ValidationError when the cert and key don't match.
"""
with pytest.raises(ValidationError) as e:
validate_cert_with_key(rsa_keypair_with_cert_1.certificate, rsa_keypair_with_cert_2.private)
assert "The certificate and private key do not match" in str(e.value)
def test_validate_image_data_with_valid_data():
"""
Ensure that validate_image_data accepts valid data.
"""
image_data = "data:image/gif;base64,R0lGODlhAQABAIABAP///wAAACwAAAAAAQABAAACAkQBADs=" | res = validate_image_data(image_data) | 2 | 2023-10-20 13:20:12+00:00 | 4k |
violet-sto/HN-GFN | dataset.py | [
{
"identifier": "MolMDPExtended",
"path": "mol_mdp_ext.py",
"snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)"
},
{
"identifier": "BlockMoleculeDataExtended",
"path": "mol_mdp_ext.py",
"snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}"
}
] | import pandas as pd
import numpy as np
import torch
import time
import threading
import json
from sklearn.utils import shuffle
from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended
from tqdm import tqdm
from botorch.utils.multi_objective.hypervolume import Hypervolume | 3,098 |
class Dataset:
def __init__(self, args, bpath, oracle, device):
self.test_split_rng = np.random.RandomState(142857)
self.train_rng = np.random.RandomState(int(time.time()))
self.train_mols = []
self.test_mols = []
self.all_mols = []
self.train_mols_map = {}
|
class Dataset:
def __init__(self, args, bpath, oracle, device):
self.test_split_rng = np.random.RandomState(142857)
self.train_rng = np.random.RandomState(int(time.time()))
self.train_mols = []
self.test_mols = []
self.all_mols = []
self.train_mols_map = {}
| self.mdp = MolMDPExtended(bpath) | 0 | 2023-10-24 14:10:35+00:00 | 4k |
line/Skeleton-Temporal-Action-Localization | evaluation/eval.py | [
{
"identifier": "getClassificationMAP",
"path": "evaluation/classificationMAP.py",
"snippet": "def getClassificationMAP(confidence, labels):\n \"\"\" confidence and labels are of dimension n_samples x n_label \"\"\"\n\n AP = []\n for i in range(np.shape(labels)[1]):\n AP.append(getAP(confidence[:, i], labels[:, i]))\n return 100 * sum(AP) / len(AP)"
},
{
"identifier": "getSingleStreamDetectionMAP",
"path": "evaluation/detectionMAP.py",
"snippet": "def getSingleStreamDetectionMAP(\n vid_preds, frm_preds, vid_lens, annotation_path, args, multi=False, factor=1.0\n):\n iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n dmap_list = []\n seg = getActLoc(\n vid_preds,\n frm_preds,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),\n annotation_path,\n args,\n multi=multi,\n )\n # print (len(seg))\n for iou in iou_list:\n print(\"Testing for IoU %f\" % iou)\n dmap_list.append(\n getLocMAP(seg, iou, annotation_path, args, multi=multi, factor=factor)\n )\n return dmap_list, iou_list"
},
{
"identifier": "getTwoStreamDetectionMAP",
"path": "evaluation/detectionMAP.py",
"snippet": "def getTwoStreamDetectionMAP(\n rgb_vid_preds,\n flow_vid_preds,\n rgb_frm_preds,\n flow_frm_preds,\n vid_lens,\n annotation_path,\n args,\n):\n iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n dmap_list = []\n rgb_seg = getActLoc(\n rgb_vid_preds,\n rgb_frm_preds * 0.1,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval)\n * 0.1,\n annotation_path,\n args,\n )\n flow_seg = getActLoc(\n flow_vid_preds,\n flow_frm_preds,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),\n annotation_path,\n args,\n )\n seg = IntergrateSegs(rgb_seg, flow_seg, 0.9, args)\n for iou in iou_list:\n print(\"Testing for IoU %f\" % iou)\n dmap_list.append(getLocMAP(seg, iou, annotation_path, args))\n\n return dmap_list, iou_list"
},
{
"identifier": "write_results_to_eval_file",
"path": "evaluation/utils.py",
"snippet": "def write_results_to_eval_file(args, dmap, itr1, itr2):\n file_folder = \"./ckpt/\" + args.dataset_name + \"/eval/\"\n file_name = args.dataset_name + \"-results.log\"\n fid = open(file_folder + file_name, \"a+\")\n string_to_write = str(itr1)\n string_to_write += \" \" + str(itr2)\n for item in dmap:\n string_to_write += \" \" + \"%.2f\" % item\n fid.write(string_to_write + \"\\n\")\n fid.close()"
},
{
"identifier": "write_results_to_file",
"path": "evaluation/utils.py",
"snippet": "def write_results_to_file(args, dmap, cmap, itr):\n file_folder = \"./ckpt/\" + args.dataset_name + \"/\" + str(args.model_id) + \"/\"\n file_name = args.dataset_name + \"-results.log\"\n fid = open(file_folder + file_name, \"a+\")\n string_to_write = str(itr)\n for item in dmap:\n string_to_write += \" \" + \"%.2f\" % item\n string_to_write += \" \" + \"%.2f\" % cmap\n fid.write(string_to_write + \"\\n\")\n fid.close()"
}
] | import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from .classificationMAP import getClassificationMAP as cmAP
from .detectionMAP import getSingleStreamDetectionMAP as dsmAP
from .detectionMAP import getTwoStreamDetectionMAP as dtmAP
from .utils import write_results_to_eval_file, write_results_to_file | 1,845 |
def ss_eval(epoch, dataloader, args, logger, model, device):
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
features = sample["data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
features = torch.from_numpy(features).float().to(device)
with torch.no_grad():
_, vid_pred, _, frm_scr = model(Variable(features))
frm_pred = F.softmax(frm_scr, -1)
vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0)
frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
vid_preds.append(vid_pred)
frm_preds.append(frm_pred)
vid_lens.append(vid_len)
labels.append(label)
vid_preds = np.array(vid_preds)
frm_preds = np.array(frm_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
cmap = cmAP(vid_preds, labels)
dmap, iou = dsmAP(
vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args
)
print("Classification map %f" % cmap)
for item in list(zip(iou, dmap)):
print("Detection map @ %f = %f" % (item[0], item[1]))
logger.log_value("Test Classification mAP", cmap, epoch)
for item in list(zip(dmap, iou)):
logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
write_results_to_file(args, dmap, cmap, epoch)
def ts_eval(dataloader, args, logger, rgb_model, flow_model, device):
rgb_vid_preds = []
rgb_frame_preds = []
flow_vid_preds = []
flow_frame_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
rgb_features = sample["rgb_data"].numpy()
flow_features = sample["flow_data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
rgb_features_inp = torch.from_numpy(rgb_features).float().to(device)
flow_features_inp = torch.from_numpy(flow_features).float().to(device)
with torch.no_grad():
_, rgb_video_pred, _, rgb_frame_scr = rgb_model(Variable(rgb_features_inp))
_, flow_video_pred, _, flow_frame_scr = flow_model(
Variable(flow_features_inp)
)
rgb_frame_pred = F.softmax(rgb_frame_scr, -1)
flow_frame_pred = F.softmax(flow_frame_scr, -1)
rgb_frame_pred = np.squeeze(rgb_frame_pred.cpu().data.numpy(), axis=0)
flow_frame_pred = np.squeeze(flow_frame_pred.cpu().data.numpy(), axis=0)
rgb_video_pred = np.squeeze(rgb_video_pred.cpu().data.numpy(), axis=0)
flow_video_pred = np.squeeze(flow_video_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
rgb_vid_preds.append(rgb_video_pred)
rgb_frame_preds.append(rgb_frame_pred)
flow_vid_preds.append(flow_video_pred)
flow_frame_preds.append(flow_frame_pred)
vid_lens.append(vid_len)
labels.append(label)
rgb_vid_preds = np.array(rgb_vid_preds)
rgb_frame_preds = np.array(rgb_frame_preds)
flow_vid_preds = np.array(flow_vid_preds)
flow_frame_preds = np.array(flow_frame_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
|
def ss_eval(epoch, dataloader, args, logger, model, device):
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
features = sample["data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
features = torch.from_numpy(features).float().to(device)
with torch.no_grad():
_, vid_pred, _, frm_scr = model(Variable(features))
frm_pred = F.softmax(frm_scr, -1)
vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0)
frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
vid_preds.append(vid_pred)
frm_preds.append(frm_pred)
vid_lens.append(vid_len)
labels.append(label)
vid_preds = np.array(vid_preds)
frm_preds = np.array(frm_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
cmap = cmAP(vid_preds, labels)
dmap, iou = dsmAP(
vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args
)
print("Classification map %f" % cmap)
for item in list(zip(iou, dmap)):
print("Detection map @ %f = %f" % (item[0], item[1]))
logger.log_value("Test Classification mAP", cmap, epoch)
for item in list(zip(dmap, iou)):
logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
write_results_to_file(args, dmap, cmap, epoch)
def ts_eval(dataloader, args, logger, rgb_model, flow_model, device):
rgb_vid_preds = []
rgb_frame_preds = []
flow_vid_preds = []
flow_frame_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
rgb_features = sample["rgb_data"].numpy()
flow_features = sample["flow_data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
rgb_features_inp = torch.from_numpy(rgb_features).float().to(device)
flow_features_inp = torch.from_numpy(flow_features).float().to(device)
with torch.no_grad():
_, rgb_video_pred, _, rgb_frame_scr = rgb_model(Variable(rgb_features_inp))
_, flow_video_pred, _, flow_frame_scr = flow_model(
Variable(flow_features_inp)
)
rgb_frame_pred = F.softmax(rgb_frame_scr, -1)
flow_frame_pred = F.softmax(flow_frame_scr, -1)
rgb_frame_pred = np.squeeze(rgb_frame_pred.cpu().data.numpy(), axis=0)
flow_frame_pred = np.squeeze(flow_frame_pred.cpu().data.numpy(), axis=0)
rgb_video_pred = np.squeeze(rgb_video_pred.cpu().data.numpy(), axis=0)
flow_video_pred = np.squeeze(flow_video_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
rgb_vid_preds.append(rgb_video_pred)
rgb_frame_preds.append(rgb_frame_pred)
flow_vid_preds.append(flow_video_pred)
flow_frame_preds.append(flow_frame_pred)
vid_lens.append(vid_len)
labels.append(label)
rgb_vid_preds = np.array(rgb_vid_preds)
rgb_frame_preds = np.array(rgb_frame_preds)
flow_vid_preds = np.array(flow_vid_preds)
flow_frame_preds = np.array(flow_frame_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
| dmap, iou = dtmAP( | 0 | 2023-10-20 05:38:16+00:00 | 4k |
SALT-NLP/Efficient_Unlearning | src/models/transformers/parameter-efficient-finetuning/modeling.py | [
{
"identifier": "AdapterConfig",
"path": "src/models/transformers/parameter-efficient-finetuning/configuration.py",
"snippet": "class AdapterConfig(AdapterConfigBase):\n \"\"\"\n Base class that models the architecture of an adapter.\n\n Args:\n mh_adapter (:obj:`bool`): If True, add adapter modules after the multi-head attention block of each layer.\n output_adapter (:obj:`bool`): If True, add adapter modules after the output FFN of each layer.\n reduction_factor (:obj:`float` or :obj:`Mapping`):\n Either a scalar float (> 0) specifying the reduction factor for all layers or a mapping specifying the\n reduction_factor for individual layers. If not all layers are represented in the mapping a default value\n should be given e.g. {'1': 8, '6': 32, 'default': 16}. Specifying a reduction factor < 1 will result in an\n up-projection layer.\n non_linearity (:obj:`str`): The activation function to use in the adapter bottleneck.\n original_ln_before (:obj:`bool`, optional):\n If True, apply layer pre-trained normalization and residual connection before the adapter modules. Defaults\n to False. Only applicable if :obj:`is_parallel` is False.\n original_ln_after (:obj:`bool`, optional):\n If True, apply pre-trained layer normalization and residual connection after the adapter modules. Defaults\n to True.\n ln_before (:obj:`bool`, optional): If True, add a new layer normalization before the adapter bottleneck.\n Defaults to False.\n ln_after (:obj:`bool`, optional): If True, add a new layer normalization after the adapter bottleneck.\n Defaults to False.\n init_weights (:obj:`str`, optional): Initialization method for the weights of the adapter modules.\n Currently, this can be either \"bert\" (default) or \"mam_adapter\".\n is_parallel (:obj:`bool`, optional): If True, apply adapter transformations in parallel.\n By default (False), sequential application is used.\n scaling (:obj:`float` or :obj:`str`, optional):\n Scaling factor to use for scaled addition of adapter outputs as done by He et al. (2021). Can bei either a\n constant factor (float) or the string \"learned\", in which case the scaling factor is learned. Defaults to\n 1.0.\n use_gating (:obj:`bool`, optional):\n Place a trainable gating module besides the added parameter module to control module activation. This is\n e.g. used for PEFT. Defaults to False.\n residual_before_ln (:obj:`bool`, optional):\n If True, take the residual connection around the adapter bottleneck before the layer normalization. Only\n applicable if :obj:`original_ln_before` is True.\n adapter_residual_before_ln (:obj:`bool`, optional):\n If True, apply the residual connection around the adapter modules before the new layer normalization within\n the adapter. Only applicable if :obj:`ln_after` is True and :obj:`is_parallel` is False.\n inv_adapter (:obj:`str`, optional):\n If not None (default), add invertible adapter modules after the model embedding layer. Currently, this can\n be either \"nice\" or \"glow\".\n inv_adapter_reduction_factor (:obj:`float`, optional):\n The reduction to use within the invertible adapter modules. Only applicable if :obj:`inv_adapter` is not\n None.\n cross_adapter (:obj:`bool`, optional):\n If True, add adapter modules after the cross attention block of each decoder layer in an encoder-decoder\n model. Defaults to False.\n leave_out (:obj:`List[int]`, optional):\n The IDs of the layers (starting at 0) where NO adapter modules should be added.\n phm_layer (:obj:`bool`, optional): If True the down and up projection layers are a PHMLayer.\n Defaults to False\n phm_dim (:obj:`int`, optional): The dimension of the phm matrix.\n Defaults to None.\n shared_phm_rule (:obj:`bool`, optional): Whether the phm matrix is shared across all layers.\n Defaults to True\n factorized_phm_rule (:obj:`bool`, optional):\n Whether the phm matrix is factorized into a left and right matrix. Defaults to False.\n learn_phm (:obj:`bool`, optional): Whether the phm matrix should be learned during training.\n Defaults to True\n factorized_phm_W (:\n obj:`bool`, optional): Whether the weights matrix is factorized into a left and right matrix. Defaults to\n True\n shared_W_phm (:obj:`bool`, optional): Whether the weights matrix is shared across all layers.\n Defaults to False.\n phm_c_init (:obj:`str`, optional): The initialization function for the weights of the phm matrix.\n The possible values are `[\"normal\", \"uniform\"]`. Defaults to `normal`.\n phm_init_range (:obj:`float`, optional): std for initializing phm weights if `phm_c_init=\"normal\"`.\n Defaults to 0.0001.\n hypercomplex_nonlinearity (:obj:`str`, optional):\n This specifies the distribution to draw the weights in the phm layer from. Defaults to `glorot-uniform`.\n phm_rank (:obj:`int`, optional):\n If the weight matrix is factorized this specifies the rank of the matrix. E.g. the left matrix of the down\n projection has the shape (phm_dim, _in_feats_per_axis, phm_rank) and the right matrix (phm_dim, phm_rank,\n _out_feats_per_axis). Defaults to 1\n phm_bias (:obj:`bool`, optional):\n If True the down and up projection PHMLayer has a bias term. If `phm_layer` is False this is ignored.\n Defaults to True\n \"\"\"\n\n # Required options\n mh_adapter: bool\n output_adapter: bool\n\n reduction_factor: Union[float, Mapping]\n non_linearity: str\n\n # Options with defaults\n original_ln_before: bool = False\n original_ln_after: bool = True\n ln_before: bool = False\n ln_after: bool = False\n init_weights: str = \"bert\"\n is_parallel: bool = False\n scaling: Union[float, str] = 1.0\n use_gating: bool = False\n residual_before_ln: bool = True\n adapter_residual_before_ln: bool = False\n inv_adapter: Optional[str] = None\n inv_adapter_reduction_factor: Optional[float] = None\n cross_adapter: bool = False\n leave_out: List[int] = field(default_factory=list)\n phm_layer: bool = False\n phm_dim: int = 4\n factorized_phm_W: Optional[bool] = True\n shared_W_phm: Optional[bool] = False\n shared_phm_rule: Optional[bool] = True\n factorized_phm_rule: Optional[bool] = False\n phm_c_init: Optional[str] = \"normal\"\n phm_init_range: Optional[float] = 0.0001\n learn_phm: Optional[bool] = True\n hypercomplex_nonlinearity: Optional[str] = \"glorot-uniform\"\n phm_rank: Optional[int] = 1\n phm_bias: Optional[bool] = True\n\n # We want to emulate a simple form of immutability while keeping the ability to add custom attributes.\n # Therefore, we don't allow changing attribute values if set once.\n def __setattr__(self, name, value):\n if name in self.__dict__:\n raise FrozenInstanceError()\n elif name == \"invertible_adapter\":\n # This is for backwards compatibility. In v1, invertible adapters were specified in a nested config dict.\n # Now, we have two config keys directly in the adapter config.\n if value:\n object.__setattr__(self, \"inv_adapter\", value[\"block_type\"])\n object.__setattr__(self, \"inv_adapter_reduction_factor\", value[\"reduction_factor\"])\n else:\n object.__setattr__(self, name, value)"
},
{
"identifier": "AdapterFusionConfig",
"path": "src/models/transformers/parameter-efficient-finetuning/configuration.py",
"snippet": "class AdapterFusionConfig(AdapterConfigBase):\n \"\"\"Base class that models the architecture of an adapter fusion layer.\"\"\"\n\n key: bool\n query: bool\n value: bool\n query_before_ln: bool\n regularization: bool\n residual_before: bool\n temperature: bool\n value_before_softmax: bool\n value_initialized: str\n\n @classmethod\n def load(cls, config: Union[dict, str], **kwargs):\n \"\"\"\n Loads a given adapter fusion configuration specifier into a full AdapterFusionConfig instance.\n\n Args:\n config (Union[dict, str]): The configuration to load. Can be either:\n\n - a dictionary representing the full config\n - an identifier string available in ADAPTERFUSION_CONFIG_MAP\n - the path to a file containing a full adapter fusion configuration\n\n Returns:\n dict: The resolved adapter fusion configuration dictionary.\n \"\"\"\n # currently storing AdapterFusion weights on AdapterHub is not supported.\n config_dict = resolve_adapter_config(config, local_map=ADAPTERFUSION_CONFIG_MAP, try_loading_from_hub=False)\n # convert back to dict to allow attr overrides\n if isinstance(config_dict, AdapterFusionConfig):\n config_dict = config_dict.to_dict()\n config_dict.update(kwargs)\n return AdapterFusionConfig.from_dict(config_dict)"
},
{
"identifier": "ForwardContext",
"path": "src/models/transformers/parameter-efficient-finetuning/context.py",
"snippet": "class ForwardContext:\n \"\"\"\n Holds context information during a forward pass through a model. This class should be used via the\n ``ForwardContext.wrap()`` method.\n\n Note that the context is thread-local.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n context_attributes = [\"adapter_gating_scores\", \"adapter_fusion_attentions\", \"adapter_input_parallelized\"]\n\n def __init__(self, model, *args, **kwargs):\n # If the model has a method ``forward_context()``, use it to create the context.\n if hasattr(model, \"forward_context\"):\n model.forward_context(self, *args, **kwargs)\n\n def __enter__(self):\n ForwardContext.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n ForwardContext.get_contexts().pop()\n\n @classmethod\n def wrap(cls, f):\n \"\"\"\n Decorator method that wraps a ``forward()`` function of a model class.\n \"\"\"\n\n @functools.wraps(f)\n def wrapper_func(self, *args, **kwargs):\n if self.config.adapters is not None:\n with cls(self, *args, **kwargs) as ctx:\n kwargs = {\n k: v for k, v in kwargs.items() if k.replace(\"output_\", \"\") not in cls.context_attributes\n }\n results = f(self, *args, **kwargs)\n\n # append output attributes\n if isinstance(results, tuple):\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results = results + (dict(getattr(ctx, attr)),)\n else:\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results[attr] = dict(getattr(ctx, attr))\n return results\n else:\n return f(self, *args, **kwargs)\n\n return wrapper_func\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None"
}
] | import math
import torch
from torch import nn
from transformers.activations import get_activation
from .configuration import AdapterConfig, AdapterFusionConfig
from .context import ForwardContext | 2,948 |
class Activation_Function_Class(nn.Module):
"""
Implementation of various activation function.
"""
def __init__(self, hidden_act):
super().__init__()
if hidden_act.lower() == "leakyrelu":
self.f = nn.functional.leaky_relu
else:
self.f = get_activation(hidden_act.lower())
def forward(self, x):
return self.f(x)
# Single Adapter
class Adapter(nn.Module):
"""
Implementation of a sequential bottleneck adapter block.
"""
def __init__(
self,
adapter_name,
input_size,
down_sample,
|
class Activation_Function_Class(nn.Module):
"""
Implementation of various activation function.
"""
def __init__(self, hidden_act):
super().__init__()
if hidden_act.lower() == "leakyrelu":
self.f = nn.functional.leaky_relu
else:
self.f = get_activation(hidden_act.lower())
def forward(self, x):
return self.f(x)
# Single Adapter
class Adapter(nn.Module):
"""
Implementation of a sequential bottleneck adapter block.
"""
def __init__(
self,
adapter_name,
input_size,
down_sample, | config: AdapterConfig, | 0 | 2023-10-18 18:05:54+00:00 | 4k |
yntha/cstruct | cstruct/_classwrap.py | [
{
"identifier": "collect_metadata",
"path": "cstruct/_metadata.py",
"snippet": "def collect_metadata(class_obj: dataclass) -> StructMetadata:\n metadata = StructMetadata()\n\n for field in dataclasses.fields(class_obj):\n # the parameters passed to the dataclass constructor individually\n # contain supplementary information about the field such as its\n # format and size.\n field_object = getattr(class_obj, field.name)\n\n if field_object is None:\n continue\n\n field_value = field_object[0]\n field_format = field_object[1]\n field_size = field_object[2]\n\n if repr(field.type).startswith(\"<class 'cstruct.classwrapper.\"):\n lexer = getattr(field.type, \"_lexer\")\n\n # noinspection PyProtectedMember\n orig_name = field.type._source_class.__name__\n field_size += lexer.pad_bytes\n field_format = f\"T[{orig_name}({field.type.primitive_format})]\"\n\n if isinstance(field_value, list):\n value_list = []\n\n for item in field_value:\n if issubclass(field.type, enum.Enum):\n value_list.append(field.type(item[0]))\n\n continue\n\n value_list.append(item[0])\n\n setattr(class_obj, field.name, value_list)\n elif issubclass(field.type, enum.Enum):\n setattr(class_obj, field.name, field.type(field_value))\n else:\n setattr(class_obj, field.name, field_value)\n\n metadata.add_item(\n field.name,\n MetadataItem(field_format, field_size, getattr(class_obj, field.name)),\n )\n\n return metadata"
},
{
"identifier": "CStructLexer",
"path": "cstruct/_lexer.py",
"snippet": "class CStructLexer:\n class _Token:\n def __init__(self, repeat_count: int, format_ch: str, is_vararr: bool = False):\n self.repeat_count = repeat_count\n self.format_ch = format_ch\n self.vararr = is_vararr\n\n self.struct_format = f\"{self.repeat_count}{self.format_ch}\"\n\n def is_leb128(self) -> bool:\n if not has_leb128:\n raise InvalidFormat(\n \"The LEB128 format is not supported. \"\n \"Please install the pyleb128 package to add support for it.\"\n )\n\n return self.format_ch in (\n ULEB128_FORMAT_CH,\n ULEB128P1_FORMAT_CH,\n SLEB128_FORMAT_CH,\n )\n\n def is_typedef(self) -> bool:\n return self.format_ch == TYPEDEF_FORMAT_CH\n\n def __init__(\n self,\n struct_class: type,\n data_format: str,\n data_byte_order: str,\n stream,\n offset: int = -1,\n ):\n if offset > -1:\n stream.seek(offset, 0) # SEEK_SET\n\n self.data_format = data_format\n self.byte_order = \"<\" if data_byte_order == \"little\" else \">\"\n self.stream = stream\n self.struct_class = struct_class\n\n self.pos = 0\n self.values = []\n self.pad_bytes = 0\n\n self.parse()\n\n # noinspection PyDataclass\n dataclass_fields = [f.name for f in dataclasses.fields(struct_class)]\n\n self.parsed_data = dict(zip(dataclass_fields, self.values))\n\n def parse(self):\n while self._has_tokens():\n token = self._next_token()\n\n if token.vararr:\n if token.repeat_count == 0:\n self.values.append([None, token.format_ch, 0])\n\n continue\n\n vararr_values = []\n sum_size = 0\n\n for _ in range(token.repeat_count):\n format_ch = token.format_ch\n\n if token.is_leb128():\n leb_data = self._read_leb128(token)\n\n value = leb_data[0]\n item_size = leb_data[2]\n format_ch = leb_data[1]\n elif token.is_typedef() and self.check_typedef():\n typedef = self.get_typdef_type()\n typedef_initialized = typedef(self.stream)\n\n value = typedef_initialized\n\n # noinspection PyUnresolvedReferences\n format_ch = typedef.primitive_format\n item_size = typedef_initialized.length\n else:\n item_size = struct.calcsize(format_ch)\n value = struct.unpack(\n self.byte_order + format_ch,\n self.stream.read(item_size),\n )[0]\n\n sum_size += item_size\n\n vararr_values.append(\n [\n value,\n format_ch,\n item_size,\n ]\n )\n\n if token.format_ch == \"x\":\n self.pad_bytes += sum_size\n\n continue\n\n self.values.append([vararr_values, token.struct_format, sum_size])\n\n continue\n\n if token.is_typedef():\n if not self.check_typedef():\n raise InvalidFormat(\"Invalid type specified for the typedef.\")\n\n typedef = self.get_typdef_type()\n typedef_initialized = typedef(self.stream)\n\n # noinspection PyUnresolvedReferences\n self.values.append(\n [\n typedef_initialized,\n typedef.primitive_format,\n typedef_initialized.length,\n ]\n )\n\n continue\n\n if token.is_leb128():\n self.values.append(self._read_leb128(token))\n\n continue\n\n if token.repeat_count > 1:\n if token.format_ch == \"s\":\n self.values.append(\n [\n self.stream.read(token.repeat_count),\n token.format_ch,\n token.repeat_count,\n ]\n )\n\n continue\n for _ in range(token.repeat_count):\n if token.format_ch == \"x\":\n self.pad_bytes += 1\n self.stream.read(1)\n\n continue\n\n self.values.append(\n [\n struct.unpack(\n self.byte_order + token.format_ch,\n self.stream.read(struct.calcsize(token.format_ch)),\n )[0],\n token.format_ch,\n struct.calcsize(token.format_ch),\n ]\n )\n\n continue\n\n if token.format_ch == \"x\":\n self.pad_bytes += 1\n self.stream.read(1)\n\n continue\n\n self.values.append(\n [\n struct.unpack(\n self.byte_order + token.struct_format,\n self.stream.read(struct.calcsize(token.struct_format)),\n )[0],\n token.format_ch,\n struct.calcsize(token.struct_format),\n ]\n )\n\n def check_typedef(self):\n typedef_type = self.get_typdef_type()\n\n return repr(typedef_type).startswith(\"<class 'cstruct.classwrapper.\")\n\n # noinspection PyDataclass\n def get_typdef_type(self) -> type:\n return dataclasses.fields(self.struct_class)[self.pos - 1].type\n\n def _next_literal(self) -> str:\n literal = self.data_format[self.pos]\n self.pos += 1\n\n return literal\n\n def _has_tokens(self) -> bool:\n return self.pos < len(self.data_format)\n\n def _next_token(self) -> _Token:\n token = self._next_literal()\n\n if token == \"(\":\n if self.pos == 1:\n raise InvalidFormat(\n \"The data format must start with a struct format character. \"\n \"See https://docs.python.org/3/library/struct.html?highlight=struct#format-characters\"\n \" for more information.\"\n )\n\n digit_buffer = \"\"\n\n while (token := self._next_literal()) != \")\":\n digit_buffer += token\n\n vararr_format = self._next_literal()\n\n data_index = int(digit_buffer)\n data_value = self.values[data_index][0]\n\n if vararr_format == \"s\":\n # return as a non-variable array\n return self._Token(data_value, vararr_format)\n\n return self._Token(data_value, vararr_format, True)\n\n if token.isdigit():\n digit_buffer = token\n\n while (token := self._next_literal()).isdigit():\n digit_buffer += token\n\n # the last read literal is the format character\n return self._Token(int(digit_buffer), token)\n\n return self._Token(1, token)\n\n def _read_leb128(self, token: _Token) -> list[leb128, str, int]:\n if token.format_ch == ULEB128_FORMAT_CH:\n leb_size = uleb128.peek_size(self.stream)\n\n if leb_size == 0:\n return [uleb128(0), ULEB128_FORMAT_CH, 0]\n\n return [uleb128.decode_stream(self.stream), token.format_ch, leb_size]\n elif token.format_ch == ULEB128P1_FORMAT_CH:\n leb_size = uleb128.peek_size(self.stream)\n\n if leb_size == 0:\n return [uleb128(0, p1=True), ULEB128P1_FORMAT_CH, 0]\n\n return [\n uleb128.decode_stream(self.stream, p1=True),\n token.format_ch,\n leb_size,\n ]\n elif token.format_ch == SLEB128_FORMAT_CH:\n leb_size = sleb128.peek_size(self.stream)\n\n if leb_size == 0:\n return [sleb128(0), SLEB128_FORMAT_CH, 0]\n\n return [sleb128.decode_stream(self.stream), token.format_ch, leb_size]"
}
] | import dataclasses
import typing
from dataclasses import dataclass
from ._metadata import collect_metadata
from ._lexer import CStructLexer | 3,155 | # --------------------------------------------------------------------------------------
# Copyright(C) 2023 yntha -
# -
# This program is free software: you can redistribute it and/or modify it under -
# the terms of the GNU General Public License as published by the Free Software -
# Foundation, either version 3 of the License, or (at your option) any later -
# version. -
# -
# This program is distributed in the hope that it will be useful, but WITHOUT ANY -
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -
# PARTICULAR PURPOSE. See the GNU General Public License for more details. -
# -
# You should have received a copy of the GNU General Public License along with -
# this program. If not, see <http://www.gnu.org/licenses/>. -
# --------------------------------------------------------------------------------------
class ClassWrapper:
@classmethod
def wrap(cls, struct_class: type, struct_format: str, byte_order: str) -> type:
return _make_newclass(struct_class, struct_format, byte_order)
# noinspection PyProtectedMember
def _gen_superclass(cls: type) -> type:
superclass_bases = []
annotations = {}
for base in cls.__bases__:
if hasattr(base, "_source_class"):
superclass_bases.append(base._source_class)
annotations.update(base._source_class.__annotations__)
else:
superclass_bases.append(base)
if hasattr(base, "__annotations__"):
annotations.update(base.__annotations__)
annotations.update(cls.__annotations__)
# remove all initvars and classvars from the annotations, if this
# class is a subclass.
if cls.__base__ is not object:
for annotation in annotations.copy():
if isinstance(annotations[annotation], dataclasses.InitVar):
annotations.pop(annotation)
continue
if annotations[annotation] is typing.ClassVar:
annotations.pop(annotation)
# we must remove the old dict because it is improperly copied to
# the new class with `type`. See
# https://jira.mongodb.org/browse/MOTOR-460 for more information.
cls_dict = dict(cls.__dict__)
cls_dict.pop("__dict__", None)
superclass = type(cls.__name__, tuple(superclass_bases), cls_dict)
# copy over the old class annotations
setattr(superclass, "__annotations__", annotations)
# noinspection PyTypeChecker
superclass = dataclass(superclass)
return superclass
class ClassWrapperMeta(type):
# noinspection PyUnresolvedReferences
def __repr__(cls):
return f"<class 'cstruct.classwrapper.{cls._source_class.__name__}'>"
def _make_newclass(src_cls: type, struct_format: str, byte_order: str) -> type:
@dataclass
class newclass(_gen_superclass(src_cls), metaclass=ClassWrapperMeta):
_source_class = src_cls
_lexer = None
primitive_format = struct_format
data_byte_order = byte_order
# noinspection PyArgumentList
def __new__(cls, stream, offset: int = -1):
self = super().__new__(cls)
self.__class__._lexer = CStructLexer(
cls, self.primitive_format, self.data_byte_order, stream, offset
)
cls.__init__(self, None, **self._lexer.parsed_data)
return self
def __getitem__(self, item):
dataclass_values = [i for i in dataclasses.asdict(self).values()]
return dataclass_values[item]
def __repr__(self):
return repr(self.meta)
def __str__(self):
return str(self.meta)
def __post_init__(self, *args, **kwargs):
| # --------------------------------------------------------------------------------------
# Copyright(C) 2023 yntha -
# -
# This program is free software: you can redistribute it and/or modify it under -
# the terms of the GNU General Public License as published by the Free Software -
# Foundation, either version 3 of the License, or (at your option) any later -
# version. -
# -
# This program is distributed in the hope that it will be useful, but WITHOUT ANY -
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -
# PARTICULAR PURPOSE. See the GNU General Public License for more details. -
# -
# You should have received a copy of the GNU General Public License along with -
# this program. If not, see <http://www.gnu.org/licenses/>. -
# --------------------------------------------------------------------------------------
class ClassWrapper:
@classmethod
def wrap(cls, struct_class: type, struct_format: str, byte_order: str) -> type:
return _make_newclass(struct_class, struct_format, byte_order)
# noinspection PyProtectedMember
def _gen_superclass(cls: type) -> type:
superclass_bases = []
annotations = {}
for base in cls.__bases__:
if hasattr(base, "_source_class"):
superclass_bases.append(base._source_class)
annotations.update(base._source_class.__annotations__)
else:
superclass_bases.append(base)
if hasattr(base, "__annotations__"):
annotations.update(base.__annotations__)
annotations.update(cls.__annotations__)
# remove all initvars and classvars from the annotations, if this
# class is a subclass.
if cls.__base__ is not object:
for annotation in annotations.copy():
if isinstance(annotations[annotation], dataclasses.InitVar):
annotations.pop(annotation)
continue
if annotations[annotation] is typing.ClassVar:
annotations.pop(annotation)
# we must remove the old dict because it is improperly copied to
# the new class with `type`. See
# https://jira.mongodb.org/browse/MOTOR-460 for more information.
cls_dict = dict(cls.__dict__)
cls_dict.pop("__dict__", None)
superclass = type(cls.__name__, tuple(superclass_bases), cls_dict)
# copy over the old class annotations
setattr(superclass, "__annotations__", annotations)
# noinspection PyTypeChecker
superclass = dataclass(superclass)
return superclass
class ClassWrapperMeta(type):
# noinspection PyUnresolvedReferences
def __repr__(cls):
return f"<class 'cstruct.classwrapper.{cls._source_class.__name__}'>"
def _make_newclass(src_cls: type, struct_format: str, byte_order: str) -> type:
@dataclass
class newclass(_gen_superclass(src_cls), metaclass=ClassWrapperMeta):
_source_class = src_cls
_lexer = None
primitive_format = struct_format
data_byte_order = byte_order
# noinspection PyArgumentList
def __new__(cls, stream, offset: int = -1):
self = super().__new__(cls)
self.__class__._lexer = CStructLexer(
cls, self.primitive_format, self.data_byte_order, stream, offset
)
cls.__init__(self, None, **self._lexer.parsed_data)
return self
def __getitem__(self, item):
dataclass_values = [i for i in dataclasses.asdict(self).values()]
return dataclass_values[item]
def __repr__(self):
return repr(self.meta)
def __str__(self):
return str(self.meta)
def __post_init__(self, *args, **kwargs): | self.meta = collect_metadata(self) | 0 | 2023-10-22 18:33:32+00:00 | 4k |
sehyun03/MulActSeg | trainer/active_joint_multi_lossdecomp.py | [
{
"identifier": "active_joint_multi",
"path": "trainer/active_joint_multi.py",
"snippet": "class ActiveTrainer(active.ActiveTrainer):\n def __init__(self, args, logger, selection_iter):\n def get_criterion(self):\n def zero_if_nan(self, loss):\n def check_loss_sanity(self, loss):\n def update(self, loss):\n def update_average_meter(self, dict):\n def train_impl(self, total_itrs, val_period):"
},
{
"identifier": "GroupMultiLabelCE_onlymulti",
"path": "trainer/active_joint_multi_predignore_mclossablation2.py",
"snippet": "class GroupMultiLabelCE_onlymulti(GroupMultiLabelCE_):\n def __init__(self, args, num_class, num_superpixel, temperature=1.0, reduction='mean'):\n super().__init__(args, num_class, num_superpixel, temperature, reduction)\n\n\n def forward(self, inputs, targets, superpixels, spmasks):\n ''' inputs: NxCxHxW\n targets: N x self.num_superpixel x C+1\n superpixels: NxHxW\n spmasks: NxHxW\n \n Apply max operation over predicted probabilities for each multi-hot label within the superpixel, and apply CE loss.\n '''\n N, C, H, W = inputs.shape\n outputs = F.softmax(inputs / self.temp, dim=1) ### N x C x H x W\n outputs = outputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C\n superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1\n spmasks = spmasks.reshape(N, -1) ### N x HW\n empty_trg_mask = torch.any(targets, dim=2).bool() ### N x self.num_superpixel\n is_trg_multi = (1 < targets.sum(dim=2)) ### N x self.num_superpixel\n loss = 0\n num_valid = 1\n\n for i in range(N):\n '''\n outputs[i] ### HW x C\n superpixels[i] ### HW x 1\n spmasks[i] ### HW x 1\n '''\n\n ### filtered outputs\n valid_mask = spmasks[i]\n if not torch.any(valid_mask):\n continue\n multi_mask = is_trg_multi[i][superpixels[i].squeeze(dim=1)[spmasks[i]]].detach()\n valid_mask = spmasks[i].clone()\n valid_mask[spmasks[i]] = multi_mask\n if not torch.any(valid_mask):\n continue\n\n valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역\n valid_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID\n\n out_sup_mxpool = scatter(valid_output, valid_superpixel, dim=0, reduce='max', dim_size=self.num_superpixel)\n ### self.num_superpixel x C : sp 영역 내 class 별 max predicted prob, invalid superpixel 은 모두 0 으로 채워짐.\n trg_sup_mxpool = targets[i] ### self.num_superpixel x C: multi-hot annotation\n \n out_sup_mxpool = out_sup_mxpool[empty_trg_mask[i]]\n trg_sup_mxpool = trg_sup_mxpool[empty_trg_mask[i]]\n\n top_one_preds = out_sup_mxpool * trg_sup_mxpool ### self.num_superpixel x C: 존재하는 multi-hot 으로 filtering\n\n top_one_preds_nonzero = top_one_preds[top_one_preds.nonzero(as_tuple=True)] ### 해당 value indexing\n num_valid += top_one_preds_nonzero.shape[0] ### valid pixel 개수 측정\n\n loss += -torch.log(top_one_preds_nonzero + self.eps).sum()\n\n if self.reduction == 'mean':\n return loss / num_valid\n elif self.reduction == 'none':\n return loss, num_valid\n else:\n raise NotImplementedError"
},
{
"identifier": "MultiChoiceCE",
"path": "utils/loss.py",
"snippet": "class MultiChoiceCE(nn.Module):\n def __init__(self, num_class, temperature=1.0, reduction='mean'):\n super().__init__()\n self.num_class = num_class\n self.reduction = reduction\n self.eps = 1e-8\n self.temp = temperature\n\n def forward(self, inputs, targets, superpixels, spmasks):\n ''' inputs: N x C x H x W\n targets: N x self.num_superpiexl x C+1\n superpixels: N x H x W\n spmasks: N x H x W\n '''\n\n N, C, H, W = inputs.shape\n inputs = inputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C\n outputs = F.softmax(inputs / self.temp, dim=2) ### N x HW x C\n superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1\n spmasks = spmasks.reshape(N, -1) ### N x HW\n loss = 0\n num_valid = 1\n\n for i in range(N):\n '''\n outputs[i] ### HW x C\n superpixels[i] ### HW x 1\n spmasks[i] ### HW x 1\n '''\n ### filtered outputs\n valid_mask = spmasks[i] ### HW\n if not torch.any(valid_mask):\n continue\n valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역\n valid_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID\n\n trg_sup = targets[i][..., :-1] ### self.num_superpixel x C: multi-hot annotation\n trg_pixel = trg_sup[valid_superpixel.squeeze(dim=1)].detach() ### HW' x C : pixel-wise multi-hot annotation\n \n ### filter out empty target\n empty_trg_mask = torch.any(trg_pixel, dim=1).bool() ### HW'\n valid_output = valid_output[empty_trg_mask]\n trg_pixel = trg_pixel[empty_trg_mask]\n \n pos_pred = (valid_output * trg_pixel).sum(dim=1)\n num_valid += pos_pred.shape[0]\n loss += -torch.log(pos_pred + self.eps).sum()\n\n if self.reduction == 'mean':\n return loss / num_valid\n elif self.reduction == 'none':\n return loss, num_valid\n else:\n NotImplementedError"
}
] | import torch
import numpy as np
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm
from torch_scatter import scatter
from trainer import active_joint_multi
from trainer.active_joint_multi_predignore_mclossablation2 import GroupMultiLabelCE_onlymulti
from utils.loss import MultiChoiceCE | 2,380 |
r"""
Decomposition of previous multi-positive loss & group-multi loss
- One-hot spxs: CE loss
- Multi-hot spxs: Multi-positive, Group Multi
- without predignore
"""
class OnehotCEMultihotChoice(MultiChoiceCE):
def __init__(self, num_class, temperature=1.0, reduction='mean'):
super().__init__(num_class, temperature, reduction)
assert(self.reduction == 'mean')
def forward(self, inputs, targets, superpixels, spmasks):
''' inputs: N x C x H x W
targets: N x self.num_superpiexl x C+1
superpixels: N x H x W
spmasks: N x H x W
'''
N, C, H, W = inputs.shape
inputs = inputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C
outputs = F.softmax(inputs / self.temp, dim=2) ### N x HW x C
superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1
spmasks = spmasks.reshape(N, -1) ### N x HW: binary mask indicating current selected spxs
oh_loss = 0
oh_num_valid = 1
mh_loss = 0
mh_num_valid = 1
for i in range(N):
'''
outputs[i] ### HW x C
superpixels[i] ### HW x 1
spmasks[i] ### HW x 1
'''
r''' skip this image if valid superpixel is not included '''
valid_mask = spmasks[i] ### HW
if not torch.any(valid_mask): continue ### empty image
r''' calculate pixel-wise (CE, MC) loss jointly'''
valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역
valid_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID
trg_sup = targets[i] ### self.num_superpixel x C: multi-hot annotation
trg_pixel = trg_sup[valid_superpixel.squeeze(dim=1)] ### HW' x C : pixel-wise multi-hot annotation
pos_pred = (valid_output * trg_pixel).sum(dim=1)
r''' ce loss on one-hot spx '''
onehot_trg = (1 == trg_pixel.sum(dim=1))
if torch.any(onehot_trg):
oh_pos_pred = pos_pred[onehot_trg]
oh_loss += -torch.log(oh_pos_pred + self.eps).sum()
oh_num_valid += oh_pos_pred.shape[0]
r''' mc loss on multi-hot spx '''
# multihot_trg = torch.logical_not(onehot_trg)
multihot_trg = (1 < trg_pixel.sum(dim=1))
if torch.any(multihot_trg):
# assert(torch.all(multihot_trg == (1 < trg_pixel.sum(dim=1))))
mh_pos_pred = pos_pred[multihot_trg]
mh_loss += -torch.log(mh_pos_pred + self.eps).sum()
mh_num_valid += mh_pos_pred.shape[0]
return oh_loss / oh_num_valid, mh_loss / mh_num_valid
|
r"""
Decomposition of previous multi-positive loss & group-multi loss
- One-hot spxs: CE loss
- Multi-hot spxs: Multi-positive, Group Multi
- without predignore
"""
class OnehotCEMultihotChoice(MultiChoiceCE):
def __init__(self, num_class, temperature=1.0, reduction='mean'):
super().__init__(num_class, temperature, reduction)
assert(self.reduction == 'mean')
def forward(self, inputs, targets, superpixels, spmasks):
''' inputs: N x C x H x W
targets: N x self.num_superpiexl x C+1
superpixels: N x H x W
spmasks: N x H x W
'''
N, C, H, W = inputs.shape
inputs = inputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C
outputs = F.softmax(inputs / self.temp, dim=2) ### N x HW x C
superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1
spmasks = spmasks.reshape(N, -1) ### N x HW: binary mask indicating current selected spxs
oh_loss = 0
oh_num_valid = 1
mh_loss = 0
mh_num_valid = 1
for i in range(N):
'''
outputs[i] ### HW x C
superpixels[i] ### HW x 1
spmasks[i] ### HW x 1
'''
r''' skip this image if valid superpixel is not included '''
valid_mask = spmasks[i] ### HW
if not torch.any(valid_mask): continue ### empty image
r''' calculate pixel-wise (CE, MC) loss jointly'''
valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역
valid_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID
trg_sup = targets[i] ### self.num_superpixel x C: multi-hot annotation
trg_pixel = trg_sup[valid_superpixel.squeeze(dim=1)] ### HW' x C : pixel-wise multi-hot annotation
pos_pred = (valid_output * trg_pixel).sum(dim=1)
r''' ce loss on one-hot spx '''
onehot_trg = (1 == trg_pixel.sum(dim=1))
if torch.any(onehot_trg):
oh_pos_pred = pos_pred[onehot_trg]
oh_loss += -torch.log(oh_pos_pred + self.eps).sum()
oh_num_valid += oh_pos_pred.shape[0]
r''' mc loss on multi-hot spx '''
# multihot_trg = torch.logical_not(onehot_trg)
multihot_trg = (1 < trg_pixel.sum(dim=1))
if torch.any(multihot_trg):
# assert(torch.all(multihot_trg == (1 < trg_pixel.sum(dim=1))))
mh_pos_pred = pos_pred[multihot_trg]
mh_loss += -torch.log(mh_pos_pred + self.eps).sum()
mh_num_valid += mh_pos_pred.shape[0]
return oh_loss / oh_num_valid, mh_loss / mh_num_valid
| class ActiveTrainer(active_joint_multi.ActiveTrainer): | 0 | 2023-10-24 09:19:58+00:00 | 4k |
hms-dbmi/CHIEF | train.py | [
{
"identifier": "read_yaml",
"path": "utils/utils.py",
"snippet": "def read_yaml(fpath=\"./configs/sample.yaml\"):\n with open(fpath, mode=\"r\") as file:\n yml = yaml.load(file, Loader=yaml.Loader)\n return Dict(yml)"
},
{
"identifier": "seed_torch",
"path": "utils/utils.py",
"snippet": "def seed_torch(device, seed=7):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if device.type == 'cuda':\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True"
},
{
"identifier": "Trainer",
"path": "utils/trainer.py",
"snippet": "class Trainer:\n\n def __init__(self, cfg, result_dir):\n self.cfg = cfg\n self.result_dir = result_dir\n\n def get_dataloader(self, index):\n cfg = self.cfg\n\n train_set_df = []\n val_set_df = []\n if cfg.Train.mode == 'cross_validation':\n for i in range(cfg.General.fold_num):\n if i == index:\n val_set_df = pd.read_csv(os.path.join(cfg.Data.split_dir, f'split_{i}.csv'))\n else:\n train_set_df.append(pd.read_csv(os.path.join(cfg.Data.split_dir, f'split_{i}.csv')))\n train_set_df = pd.concat(train_set_df, axis=0)\n elif cfg.Train.mode == 'repeat':\n train_set_df = pd.read_csv(os.path.join(cfg.Data.split_dir, 'train_set.csv'))\n val_set_df = pd.read_csv(os.path.join(cfg.Data.split_dir, 'val_set.csv'))\n\n if cfg.Train.dataset == 'BagDataset':\n from datasets.BagDataset import BagDataset\n train_set = BagDataset(train_set_df, cfg.Data.data_dir)\n val_set = BagDataset(val_set_df, cfg.Data.data_dir)\n elif cfg.Train.dataset == 'TwoStreamBagDataset':\n from datasets.TwoStreamBagDataset import TwoStreamDataset\n train_set = TwoStreamDataset(train_set_df, cfg.Data.data_dir_1, cfg.Data.data_dir_2)\n val_set = TwoStreamDataset(val_set_df, cfg.Data.data_dir_1, cfg.Data.data_dir_2)\n else:\n raise NotImplementedError\n train_loader = DataLoader(train_set, batch_size=None, shuffle=True, num_workers=0)\n val_loader = DataLoader(val_set, batch_size=None, shuffle=False, num_workers=0)\n\n return train_loader, val_loader\n\n def get_testloader(self, test_set_name='test_set', **kwargs):\n cfg = self.cfg\n\n\n if test_set_name == 'test_set':\n if cfg.Train.mode == 'cross_validation':\n raise NotImplementedError\n elif cfg.Train.mode == 'repeat':\n test_set_df = pd.read_csv(os.path.join(cfg.Data.split_dir, 'test_set.csv'))\n else:\n raise NotImplementedError\n else:\n test_set_df = pd.read_csv(cfg.Test[test_set_name].csv_path)\n\n if cfg.Train.dataset == 'BagDataset':\n from datasets.BagDataset import BagDataset\n if test_set_name == 'test_set':\n test_set = BagDataset(test_set_df, cfg.Data.data_dir)\n else:\n test_set = BagDataset(test_set_df, cfg.Test[test_set_name].data_dir)\n elif cfg.Train.dataset == 'TwoStreamBagDataset':\n from datasets.TwoStreamBagDataset import TwoStreamDataset\n if test_set_name == 'test_set':\n test_set = TwoStreamDataset(test_set_df, cfg.Data.data_dir_1, cfg.Data.data_dir_2)\n else:\n test_set = TwoStreamDataset(test_set_df, cfg.Test[test_set_name].data_dir_1, cfg.Test[test_set_name].data_dir_2)\n else:\n raise NotImplementedError\n test_loader = DataLoader(test_set, batch_size=None, shuffle=False, num_workers=0)\n\n return test_loader\n\n def get_model(self):\n\n if self.cfg.Model.network == 'CHIEF':\n from models.CHIEF import CHIEF\n model = CHIEF(features_size=self.cfg.Data.features_size, n_classes=self.cfg.Data.n_classes)\n else:\n raise NotImplementedError\n\n return model\n\n\n def get_train_loop(self):\n cfg = self.cfg\n if cfg.Train.train_method == 'CHIEF':\n from training_methods.CHIEF import train_loop\n else:\n raise NotImplementedError\n\n return train_loop\n\n def get_validation(self):\n cfg = self.cfg\n if cfg.Train.val_method == 'CHIEF':\n from training_methods.CHIEF import validation\n else:\n raise NotImplementedError\n\n return validation\n\n def get_summary(self):\n cfg = self.cfg\n if cfg.Train.val_method == 'CHIEF':\n from training_methods.CHIEF import summary\n else:\n raise NotImplementedError\n\n return summary\n\n def train(self, index):\n cfg = self.cfg\n train_loader, val_loader = self.get_dataloader(index)\n\n print(f'''\n train set num: {len(train_loader)}\n val set num: {len(val_loader)}\n ''')\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f'training fold {index}')\n\n # tensorboardX writer\n writer_dir = os.path.join(self.result_dir, 'log', str(index))\n if not os.path.isdir(writer_dir):\n os.makedirs(writer_dir)\n writer = SummaryWriter(writer_dir, flush_secs=15)\n\n model = self.get_model()\n model.to(device)\n\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.Train.lr,\n weight_decay=cfg.Train.reg)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer,\n T_max=cfg.Train.CosineAnnealingLR.T_max,\n eta_min=cfg.Train.CosineAnnealingLR.eta_min,\n last_epoch=-1\n )\n\n early_stopping = EarlyStopping(\n patience=cfg.Train.Early_stopping.patient,\n stop_epoch=cfg.Train.Early_stopping.stop_epoch,\n type=cfg.Train.Early_stopping.type\n )\n\n train_loop = self.get_train_loop()\n\n validation = self.get_validation()\n\n for epoch in range(cfg.Train.max_epochs):\n lr = scheduler.get_last_lr()[0]\n print('learning rate:{:.8f}'.format(lr))\n writer.add_scalar('train/lr', lr, epoch)\n\n train_loop(\n epoch=epoch,\n model=model,\n loader=train_loader,\n optimizer=optimizer,\n writer=writer,\n )\n\n stop = validation(\n cur=index,\n epoch=epoch,\n model=model,\n loader=val_loader,\n n_classes=cfg.Data.n_classes,\n results_dir=self.result_dir,\n early_stopping=early_stopping,\n early_stopping_type='max',\n writer=writer\n )\n\n if stop:\n break\n scheduler.step()\n\n def eval(self, test_set_name = 'test_set'):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n cfg = self.cfg\n all_result = {\n 'acc': [],\n 'auc': [],\n 'f1_score': [],\n 'precision': [],\n 'specificity': [],\n 'recall': [],\n 'prob': []\n }\n\n for i in range(cfg.General.fold_num):\n weight_path = os.path.join(self.result_dir, f's_{i}_checkpoint.pt')\n if not os.path.exists(weight_path):\n break\n model = self.get_model()\n model.load_state_dict(torch.load(weight_path))\n model.to(device)\n\n test_loader = self.get_testloader(test_set_name)\n summary = self.get_summary()\n\n result = summary(model, test_loader, cfg.Data.n_classes)\n for key, value in result.items():\n all_result[key].append(value)\n\n probs = all_result['prob']\n del all_result['prob']\n\n result_path = os.path.join(self.result_dir, 'results', test_set_name)\n os.makedirs(result_path, exist_ok=True)\n metrics_df = pd.DataFrame(all_result)\n mean = metrics_df.values.mean(axis=0)\n std = metrics_df.values.std(axis=0)\n metrics_df = pd.DataFrame(data=np.concatenate([metrics_df.values, mean[np.newaxis, :], std[np.newaxis, :]], axis=0), columns=metrics_df.columns)\n metrics_df.to_csv(os.path.join(result_path, 'metrics.csv'), index=False)\n\n test_set_df = test_loader.dataset.get_data_df()\n test_set_df = test_set_df[['slide_id', 'label']]\n for i, prob in enumerate(probs):\n test_set_df[f'prob_{i}'] = prob[:, 1]\n test_set_df.to_csv(os.path.join(result_path, 'probs.csv'), index=False)\n\n print(metrics_df)"
}
] | import argparse
import os
import shutil
import torch
from utils.utils import read_yaml, seed_torch
from utils.trainer import Trainer | 2,304 |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str)
parser.add_argument('--begin', type=int, default=0)
parser.add_argument('--end', type=int, default=10)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str)
parser.add_argument('--begin', type=int, default=0)
parser.add_argument('--end', type=int, default=10)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | cfg = read_yaml(args.config_path) | 0 | 2023-10-17 21:19:25+00:00 | 4k |
justincui03/tesla | buffer.py | [
{
"identifier": "get_dataset",
"path": "utils.py",
"snippet": "def get_dataset(dataset, data_path, batch_size=1, args=None):\n\n class_map = None\n loader_train_dict = None\n class_map_inv = None\n\n if dataset == 'CIFAR10':\n channel = 3\n im_size = (32, 32)\n num_classes = 10\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'Tiny':\n channel = 3\n im_size = (64, 64)\n num_classes = 200\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=transform) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\", \"images\"), transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'ImageNet':\n channel = 3\n im_size = (64, 64)\n # im_size = (128, 128)\n # data_path = '/home/justincui/data/' + str(im_size[0])\n num_classes = 1000\n data_path = '/nfs/data/justincui/data/imagenet2012/' + str(im_size[0])\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n data_transforms = {\n 'train': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=data_transforms['train']) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\"), transform=data_transforms['val'])\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n elif dataset.startswith('CIFAR100'):\n channel = 3\n im_size = (32, 32)\n num_classes = 100\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std), transforms.Resize(im_size)])\n dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x: x for x in range(num_classes)}\n\n else:\n exit('unknown dataset: %s'%dataset)\n\n if args.zca:\n images = []\n labels = []\n print(\"Train ZCA\")\n for i in tqdm.tqdm(range(len(dst_train))):\n im, lab = dst_train[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n zca = K.enhance.ZCAWhitening(eps=0.1, compute_inv=True)\n zca.fit(images)\n zca_images = zca(images).to(\"cpu\")\n dst_train = TensorDataset(zca_images, labels)\n\n images = []\n labels = []\n print(\"Test ZCA\")\n for i in tqdm.tqdm(range(len(dst_test))):\n im, lab = dst_test[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n\n zca_images = zca(images).to(\"cpu\")\n dst_test = TensorDataset(zca_images, labels)\n\n args.zca_trans = zca\n\n\n testloader = torch.utils.data.DataLoader(dst_test, batch_size=128, shuffle=False, num_workers=2)\n\n\n return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv"
},
{
"identifier": "get_network",
"path": "utils.py",
"snippet": "def get_network(model, channel, num_classes, im_size=(32, 32), dist=True):\n torch.random.manual_seed(int(time.time() * 1000) % 100000)\n net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()\n\n if model == 'ConvNet':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD1':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD2':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD3':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD4':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD5':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=5, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD6':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=6, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD7':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=7, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD8':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=8, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n\n else:\n net = None\n exit('DC error: unknown model')\n\n if dist:\n gpu_num = torch.cuda.device_count()\n if gpu_num>0:\n device = 'cuda'\n if gpu_num>1:\n net = nn.DataParallel(net)\n else:\n device = 'cpu'\n net = net.to(device)\n\n return net"
},
{
"identifier": "get_daparam",
"path": "utils.py",
"snippet": "def get_daparam(dataset, model, model_eval, ipc):\n # We find that augmentation doesn't always benefit the performance.\n # So we do augmentation for some of the settings.\n\n dc_aug_param = dict()\n dc_aug_param['crop'] = 4\n dc_aug_param['scale'] = 0.2\n dc_aug_param['rotate'] = 45\n dc_aug_param['noise'] = 0.001\n dc_aug_param['strategy'] = 'none'\n\n if dataset == 'MNIST':\n dc_aug_param['strategy'] = 'crop_scale_rotate'\n\n if model_eval in ['ConvNetBN']: # Data augmentation makes model training with Batch Norm layer easier.\n dc_aug_param['strategy'] = 'crop_noise'\n\n return dc_aug_param"
},
{
"identifier": "TensorDataset",
"path": "utils.py",
"snippet": "class TensorDataset(Dataset):\n def __init__(self, images, labels): # images: n x c x h x w tensor\n self.images = images.detach().float()\n self.labels = labels.detach()\n\n def __getitem__(self, index):\n return self.images[index], self.labels[index]\n\n def __len__(self):\n return self.images.shape[0]"
},
{
"identifier": "epoch",
"path": "utils.py",
"snippet": "def epoch(mode, dataloader, net, optimizer, criterion, args, aug, texture=False):\n loss_avg, acc_avg, num_exp = 0, 0, 0\n net = net.to(args.device)\n\n if mode == 'train':\n net.train()\n else:\n net.eval()\n\n for i_batch, datum in enumerate(dataloader):\n img = datum[0].float().to(args.device)\n lab = datum[1].to(args.device)\n\n if mode == \"train\" and texture:\n img = torch.cat([torch.stack([torch.roll(im, (torch.randint(args.im_size[0]*args.canvas_size, (1,)), torch.randint(args.im_size[0]*args.canvas_size, (1,))), (1,2))[:,:args.im_size[0],:args.im_size[1]] for im in img]) for _ in range(args.canvas_samples)])\n lab = torch.cat([lab for _ in range(args.canvas_samples)])\n\n if aug:\n if args.dsa:\n img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)\n else:\n img = augment(img, args.dc_aug_param, device=args.device)\n\n n_b = lab.shape[0]\n\n output = net(img)\n loss = criterion(output, lab)\n\n if mode == 'train' and args.teacher_label:\n acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), np.argmax(datum[1].cpu().data.numpy(), axis=-1)))\n else:\n acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))\n\n loss_avg += loss.item()*n_b\n acc_avg += acc\n num_exp += n_b\n\n if mode == 'train':\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_avg /= num_exp\n acc_avg /= num_exp\n\n return loss_avg, acc_avg"
},
{
"identifier": "ParamDiffAug",
"path": "utils.py",
"snippet": "class ParamDiffAug():\n def __init__(self):\n self.aug_mode = 'S' #'multiple or single'\n self.prob_flip = 0.5\n self.ratio_scale = 1.2\n self.ratio_rotate = 15.0\n self.ratio_crop_pad = 0.125\n self.ratio_cutout = 0.5 # the size would be 0.5x0.5\n self.ratio_noise = 0.05\n self.brightness = 1.0\n self.saturation = 2.0\n self.contrast = 0.5"
}
] | import os
import argparse
import torch
import torch.nn as nn
import copy
import warnings
from tqdm import tqdm
from utils import get_dataset, get_network, get_daparam,\
TensorDataset, epoch, ParamDiffAug
from PIL import PngImagePlugin | 3,186 |
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.dsa_param = ParamDiffAug()
|
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.dsa_param = ParamDiffAug()
| channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv = get_dataset(args.dataset, args.data_path, args.batch_real, args=args) | 0 | 2023-10-17 23:11:36+00:00 | 4k |
upiterbarg/hihack | models/hierarchical_transformer_lstm.py | [
{
"identifier": "generate_square_subsequent_mask",
"path": "models/transformer_lstm.py",
"snippet": "def generate_square_subsequent_mask(sz: int, device: str = \"cpu\") -> torch.Tensor:\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = (\n mask.float()\n .masked_fill(mask == 0, float(\"-inf\"))\n .masked_fill(mask == 1, float(0.0))\n ).to(device=device)\n return mask"
},
{
"identifier": "PositionalEncoding",
"path": "models/transformer_lstm.py",
"snippet": "class PositionalEncoding(nn.Module):\n def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):\n super().__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n position = torch.arange(max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(max_len, 1, d_model)\n pe[:, 0, 0::2] = torch.sin(position * div_term)\n pe[:, 0, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Args:\n x: Tensor, shape [seq_len, batch_size, embedding_dim]\n \"\"\"\n x = x + self.pe[:x.size(0)]\n return self.dropout(x)"
}
] | import json
import numpy as np
import os
import pathlib
import pdb
import torch
import sys
from nle import nethack
from nle.nethack.actions import ACTIONS as A
from torch import nn
from torch.nn import functional as F
from .transformer_lstm import (
generate_square_subsequent_mask,
PositionalEncoding
)
from chaotic_dwarf import (
TopLineEncoder,
BottomLinesEncoder,
ScreenEncoder,
conv_outdim
) | 2,541 | self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length
self.wrapped = False
def initial_state(self, batch_size=1):
return (
torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0
torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1
)
def get_encodings(self, inputs, for_lstm=False):
T, B, C, H, W = inputs["screen_image"].shape
topline = inputs["tty_chars"][..., 0, :]
bottom_line = inputs["tty_chars"][..., -2:, :]
if for_lstm or not hasattr(self, 'topline_encoder2'):
st = [
self.topline_encoder(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
else:
st = [
self.topline_encoder2(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder2(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder2(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
if self.use_prev_action:
st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1))
st = torch.cat(st, dim=1)
return st
def forward(self, inputs, core_state=None, last_ttyrec_data=None, return_strategywise_logits=False):
T, B, C, H, W = inputs["screen_image"].shape
st_lstm = self.get_encodings(inputs, for_lstm=True)
st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)
T_eff = T
if not last_ttyrec_data is None and self.training:
last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)
last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)
T_eff = T * 2
st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)
st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)
self.wrapped = True
c0, c1, c2, c3 = core_state
trnsfrmr_core_state = c0, c1
lstm_core_state = c2, c3
lstm_core_input = st_lstm.view(T_eff, B, -1)
lstm_core_output_list = []
if self.wrapped:
notdone = torch.cat([(~last_ttyrec_data["done"]).float(), (~inputs["done"]).float()], axis=0)
else:
notdone = (~inputs["done"]).float()
for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
nd = nd.view(1, -1, 1)
lstm_core_state = tuple(nd * t for t in lstm_core_state)
output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)
lstm_core_output_list.append(output)
lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)
st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)
trnsfrmr_core_input = st.reshape(T_eff, B, -1)
if not self.training:
prev_mask, prev_encodings = trnsfrmr_core_state
prev_mask = prev_mask.squeeze(0)
trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)
trnsfrmr_core_mask = torch.stack(
[torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]
)
trnsfrmr_core_mask[:, -1, -1] = 1
trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0),
trnsfrmr_core_input.detach().clone()
)
for i in range(B):
trnsfrmr_core_mask[i].fill_diagonal_(1)
trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float("-inf")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)
trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)
T = trnsfrmr_core_input.shape[0]
elif self.wrapped:
mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)
mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))
trnsfrmr_core_mask = mask1.long() + mask2.long()
trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0
trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float("-inf")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)
else:
|
base_path = pathlib.Path().resolve()
sys.path.insert(0, os.path.join(base_path, '..', 'dungeonsdata-neurips2022/experiment_code/hackrl/models'))
class HierarchicalTransformerLSTM(nn.Module):
def __init__(self, shape, action_space, flags, device, num_strategies=20):
super(HierarchicalTransformerLSTM, self).__init__()
self.flags = flags
self.num_actions = len(action_space)
self.use_prev_action = flags.use_prev_action
self.topline_encoder = TopLineEncoder()
self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())
pixel_size = flags.pixel_size
if flags.crop_dim == 0:
screen_shape = (24 * pixel_size, 80 * pixel_size)
else:
screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)
self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))
## second copy of encoders
self.topline_encoder2 = TopLineEncoder()
self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())
self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))
###
self.prev_actions_dim = 128 if self.use_prev_action else 0
self.h_dim = sum(
[
self.topline_encoder.hidden_dim,
self.bottomline_encoder.hidden_dim,
self.screen_encoder.hidden_dim,
self.prev_actions_dim
]
)
self.hidden_dim = 512
self.policy_hidden_dim = 256
self.strategy_dim = num_strategies
self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)
self.num_attention_heads = flags.num_attention_heads
self.num_transformer_encoder_layers = flags.num_transformer_layers
self.hidden_dim = self.h_dim + self.hidden_dim
core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')
self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers)
self.positional_encoder = PositionalEncoding(self.hidden_dim)
self.strategy_encoder = nn.Linear(self.hidden_dim, self.strategy_dim)
self.policies = nn.ModuleDict(
[[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.strategy_dim)]
)
self.baseline = nn.Linear(self.hidden_dim, 1)
self.version = 0
self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length
self.wrapped = False
def initial_state(self, batch_size=1):
return (
torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0
torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1
)
def get_encodings(self, inputs, for_lstm=False):
T, B, C, H, W = inputs["screen_image"].shape
topline = inputs["tty_chars"][..., 0, :]
bottom_line = inputs["tty_chars"][..., -2:, :]
if for_lstm or not hasattr(self, 'topline_encoder2'):
st = [
self.topline_encoder(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
else:
st = [
self.topline_encoder2(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder2(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder2(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
if self.use_prev_action:
st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1))
st = torch.cat(st, dim=1)
return st
def forward(self, inputs, core_state=None, last_ttyrec_data=None, return_strategywise_logits=False):
T, B, C, H, W = inputs["screen_image"].shape
st_lstm = self.get_encodings(inputs, for_lstm=True)
st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)
T_eff = T
if not last_ttyrec_data is None and self.training:
last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)
last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)
T_eff = T * 2
st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)
st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)
self.wrapped = True
c0, c1, c2, c3 = core_state
trnsfrmr_core_state = c0, c1
lstm_core_state = c2, c3
lstm_core_input = st_lstm.view(T_eff, B, -1)
lstm_core_output_list = []
if self.wrapped:
notdone = torch.cat([(~last_ttyrec_data["done"]).float(), (~inputs["done"]).float()], axis=0)
else:
notdone = (~inputs["done"]).float()
for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
nd = nd.view(1, -1, 1)
lstm_core_state = tuple(nd * t for t in lstm_core_state)
output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)
lstm_core_output_list.append(output)
lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)
st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)
trnsfrmr_core_input = st.reshape(T_eff, B, -1)
if not self.training:
prev_mask, prev_encodings = trnsfrmr_core_state
prev_mask = prev_mask.squeeze(0)
trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)
trnsfrmr_core_mask = torch.stack(
[torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]
)
trnsfrmr_core_mask[:, -1, -1] = 1
trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0),
trnsfrmr_core_input.detach().clone()
)
for i in range(B):
trnsfrmr_core_mask[i].fill_diagonal_(1)
trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float("-inf")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)
trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)
T = trnsfrmr_core_input.shape[0]
elif self.wrapped:
mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)
mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))
trnsfrmr_core_mask = mask1.long() + mask2.long()
trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0
trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float("-inf")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)
else: | trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device) | 0 | 2023-10-23 15:44:32+00:00 | 4k |
nmathey/finasync | finasync/realt.py | [
{
"identifier": "GNOSIS_API_TOKENLIST_URI",
"path": "finasync/constants.py",
"snippet": "GNOSIS_API_TOKENLIST_URI = (\n \"https://blockscout.com/xdai/mainnet/api?module=account&action=tokenlist&address=\"\n)"
},
{
"identifier": "REALT_API_TOKENLIST_URI",
"path": "finasync/constants.py",
"snippet": "REALT_API_TOKENLIST_URI = \"https://api.realt.community/v1/token\""
},
{
"identifier": "REALT_OFFLINE_TOKENS_LIST",
"path": "finasync/constants.py",
"snippet": "REALT_OFFLINE_TOKENS_LIST = \"RealT_OfflineTokensList.json\""
},
{
"identifier": "convert_currency",
"path": "finasync/utils.py",
"snippet": "def convert_currency(amount, from_currency, to_currency):\n Now_Time = datetime.today()\n Exchange_OfflineRates_Path = Path(\n EXCHANGE_OFFLINE_RATES_PATH\n + \"Exchange_OfflineRates_To_\"\n + to_currency\n + \".json\"\n )\n Exchange_OfflineRates_Path.touch(exist_ok=True)\n converted_amount = 0\n with open(Exchange_OfflineRates_Path) as json_file:\n try:\n Exchange_OfflineRates = json.load(json_file)\n except JSONDecodeError:\n Exchange_OfflineRates = {\n \"info\": {\n \"last_sync\": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))\n },\n \"data\": {},\n }\n\n # Fetch latest exchange rates only if local cache > 1 week\n if float(Exchange_OfflineRates[\"info\"][\"last_sync\"]) < datetime.timestamp(\n Now_Time - timedelta(weeks=1)\n ):\n response = requests.get(EXCHANGE_RATES_API_URI + to_currency)\n Exchange_OfflineRates[\"info\"][\"last_sync\"] = str(datetime.timestamp(Now_Time))\n Exchange_OfflineRates[\"data\"] = response.json()\n\n data = Exchange_OfflineRates[\"data\"]\n if \"rates\" in data:\n rates = data[\"rates\"]\n if from_currency in rates and to_currency in rates:\n converted_amount = amount / rates[from_currency]\n else:\n raise ValueError(\"Invalid currency!\")\n else:\n raise ValueError(\"Unable to fetch exchange rates!\")\n\n with open(Exchange_OfflineRates_Path, \"w\") as outfile:\n json.dump(Exchange_OfflineRates, outfile, indent=4)\n\n return round(converted_amount, 2)"
}
] | import requests
import re
import json
import time
import os
import logging
from pathlib import Path
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from finary_uapi.user_real_estates import (
get_user_real_estates,
delete_user_real_estates,
update_user_real_estates,
add_user_real_estates,
add_user_real_estates_with_currency,
)
from finary_uapi.user_me import get_display_currency_code
from .constants import (
GNOSIS_API_TOKENLIST_URI,
REALT_API_TOKENLIST_URI,
REALT_OFFLINE_TOKENS_LIST,
)
from .utils import convert_currency | 2,393 | logging.debug("My RealT Finary portfolio")
logging.debug(myFinary_real_estates)
myFinary_realT = {}
for item in myFinary_real_estates:
contractAddress = re.findall(r"0x.+", str(item.get("description")))
name = re.findall(r"- (.*) -", str(item.get("description")))
myFinary_realT.update(
{
contractAddress[0].lower(): {
"name": name[0],
"contractAddress": contractAddress[0].lower(),
"finary_id": item.get("id"),
"category": item.get("category"),
"description": item.get("description"),
"buying_price": item.get("buying_price"),
"ownership_percentage": item.get("ownership_percentage"),
}
}
)
return json.dumps(myFinary_realT)
def get_realt_rentals_blockchain(wallet_address):
myWallet = json.loads(requests.get(GNOSIS_API_TOKENLIST_URI + wallet_address).text)
myRealT_rentals = {}
logging.debug("My wallet details")
logging.debug(myWallet)
for item in myWallet["result"]:
if re.match(r"^REALTOKEN", str(item.get("symbol")), re.IGNORECASE):
logging.debug("Updating RealT Token to Finary: " + item["symbol"])
myRealT_rentals.update(
{
item["contractAddress"].lower(): {
"name": item["symbol"],
"balance": float(item["balance"])
/ pow(10, int(item["decimals"])),
"contractAddress": item["contractAddress"].lower(),
}
}
)
elif re.match(r"^armmREALT", str(item.get("symbol"))):
time.sleep(0.2)
original_contract_address = requests.get(
GNOSIS_API_TOKENLIST_URI + item["contractAddress"]
).json()
original_contract_address = list(
filter(
lambda x: re.match("^REALTOKEN", x["symbol"]),
original_contract_address["result"],
)
)
original_contract_address = str(
original_contract_address[0]["contractAddress"]
)
logging.debug("Updating armm RealT Token to Finary: " + item["symbol"])
myRealT_rentals.update(
{
original_contract_address.lower(): {
"name": item["symbol"],
"balance": float(item["balance"])
/ pow(10, int(item["decimals"])),
"contractAddress": original_contract_address.lower(),
}
}
)
logging.debug("My RealT portfolio from the blockchain")
logging.debug(myRealT_rentals)
return json.dumps(myRealT_rentals)
def get_building_type(realT_propertyType):
# building type: house, building, apartment, land, commercial, parking_box, or other
# propertyType from RealT -> 1 = Single Family | 2 = Multi Family | 3 = Duplex | 4 = Condominium | 6 = Mixed-Used | 8 = Quadplex | 9 = Commercial |10 = SFR Portfolio
building_type = "other"
if realT_propertyType == 1:
building_type = "house"
elif realT_propertyType == 2 or realT_propertyType == 3 or realT_propertyType == 8:
building_type = "building"
elif realT_propertyType == 4 or realT_propertyType == 9:
building_type = "commercial"
return building_type
def sync_realt_rent(session: requests.Session, wallet_address):
# Get current Finary RealT rent portfolio
myFinary_realT = json.loads(get_realt_rentals_finary(session))
# Get current RealT rent from wallet
myRealT_rentals = json.loads(get_realt_rentals_blockchain(wallet_address))
# If finary RealT rentals not in RealT wallet then delete otherwise update
for key in myFinary_realT:
if key not in myRealT_rentals:
delete_user_real_estates(session, myFinary_realT[key]["finary_id"])
logging.info("Deleting " + myFinary_realT[key]["description"])
else:
token_details = get_realt_token_details(key)
# Handling currency
if token_details["currency"] == get_display_currency_code(session):
user_estimated_value = (
token_details["totalTokens"] * token_details["tokenPrice"]
)
monthly_rent = token_details["netRentMonth"]
elif (
token_details["currency"] == "EUR"
or "USD"
or "SGD"
or "CHF"
or "GBP"
or "CAD"
):
user_estimated_value = (
token_details["totalTokens"] * token_details["tokenPrice"]
)
monthly_rent = token_details["netRentMonth"]
else:
|
def get_realt_token_details(realt_token_contractAdress):
Now_Time = datetime.today()
RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST)
RealT_OfflineTokensList_Path.touch(exist_ok=True)
with open(RealT_OfflineTokensList_Path) as json_file:
try:
RealT_OfflineTokensList = json.load(json_file)
except JSONDecodeError:
RealT_OfflineTokensList = {
"info": {
"last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))
},
"data": {},
}
# Update offlineTokensList from RealT API only if more than 1 week old
if float(RealT_OfflineTokensList["info"]["last_sync"]) < datetime.timestamp(
Now_Time - timedelta(weeks=1)
):
MyRealT_API_Header = {
"Accept": "*/*",
"X-AUTH-REALT-TOKEN": os.environ["MYREALT_API_KEY"],
}
TokensListReq = requests.get(
REALT_API_TOKENLIST_URI, headers=MyRealT_API_Header
)
TokensList = TokensListReq.json()
logging.debug("Tokens list details from API RealT")
logging.debug(TokensList)
for item in TokensList:
RealT_OfflineTokensList["data"].update(
{
item.get("uuid").lower(): {
"fullName": item.get("fullName"),
"shortName": item.get("shortName"),
"tokenPrice": item.get("tokenPrice"),
"currency": item.get("currency"),
"rentStartDate": item.get("rentStartDate"),
"squareFeet": item.get("squareFeet"),
"totalTokens": item.get("totalTokens"),
"totalInvestment": item.get("totalInvestment"),
"grossRentMonth": item.get("grossRentMont"),
"propertyManagement": item.get("propertyManagement"),
"realtPlatform": item.get("realtPlaform"),
"insurance": item.get("insurance"),
"propertyTaxes": item.get("propertyTaxes"),
"propertyMaintenanceMonthly": item.get(
"propertyMaintenanceMonthly"
),
"utilities": item.get("utilities"),
"netRentMonth": item.get("netRentMonth"),
"netRentMonthPerToken": item.get("netRentMonthPerToken"),
"coordinate": item.get("coordinate"),
"propertyType": item.get("propertyType"),
"rentalType": item.get("rentalType"),
"productType": item.get("productType"),
}
}
)
RealT_OfflineTokensList["info"]["last_sync"] = str(datetime.timestamp(Now_Time))
with open(RealT_OfflineTokensList_Path, "w") as outfile:
json.dump(RealT_OfflineTokensList, outfile, indent=4)
return RealT_OfflineTokensList["data"][realt_token_contractAdress]
def get_realt_rentals_finary(session: requests.Session):
myFinary_real_estates = get_user_real_estates(session)
myFinary_real_estates = list(
filter(
lambda x: re.match("^RealT -", x["description"]),
myFinary_real_estates["result"],
)
)
logging.debug("My RealT Finary portfolio")
logging.debug(myFinary_real_estates)
myFinary_realT = {}
for item in myFinary_real_estates:
contractAddress = re.findall(r"0x.+", str(item.get("description")))
name = re.findall(r"- (.*) -", str(item.get("description")))
myFinary_realT.update(
{
contractAddress[0].lower(): {
"name": name[0],
"contractAddress": contractAddress[0].lower(),
"finary_id": item.get("id"),
"category": item.get("category"),
"description": item.get("description"),
"buying_price": item.get("buying_price"),
"ownership_percentage": item.get("ownership_percentage"),
}
}
)
return json.dumps(myFinary_realT)
def get_realt_rentals_blockchain(wallet_address):
myWallet = json.loads(requests.get(GNOSIS_API_TOKENLIST_URI + wallet_address).text)
myRealT_rentals = {}
logging.debug("My wallet details")
logging.debug(myWallet)
for item in myWallet["result"]:
if re.match(r"^REALTOKEN", str(item.get("symbol")), re.IGNORECASE):
logging.debug("Updating RealT Token to Finary: " + item["symbol"])
myRealT_rentals.update(
{
item["contractAddress"].lower(): {
"name": item["symbol"],
"balance": float(item["balance"])
/ pow(10, int(item["decimals"])),
"contractAddress": item["contractAddress"].lower(),
}
}
)
elif re.match(r"^armmREALT", str(item.get("symbol"))):
time.sleep(0.2)
original_contract_address = requests.get(
GNOSIS_API_TOKENLIST_URI + item["contractAddress"]
).json()
original_contract_address = list(
filter(
lambda x: re.match("^REALTOKEN", x["symbol"]),
original_contract_address["result"],
)
)
original_contract_address = str(
original_contract_address[0]["contractAddress"]
)
logging.debug("Updating armm RealT Token to Finary: " + item["symbol"])
myRealT_rentals.update(
{
original_contract_address.lower(): {
"name": item["symbol"],
"balance": float(item["balance"])
/ pow(10, int(item["decimals"])),
"contractAddress": original_contract_address.lower(),
}
}
)
logging.debug("My RealT portfolio from the blockchain")
logging.debug(myRealT_rentals)
return json.dumps(myRealT_rentals)
def get_building_type(realT_propertyType):
# building type: house, building, apartment, land, commercial, parking_box, or other
# propertyType from RealT -> 1 = Single Family | 2 = Multi Family | 3 = Duplex | 4 = Condominium | 6 = Mixed-Used | 8 = Quadplex | 9 = Commercial |10 = SFR Portfolio
building_type = "other"
if realT_propertyType == 1:
building_type = "house"
elif realT_propertyType == 2 or realT_propertyType == 3 or realT_propertyType == 8:
building_type = "building"
elif realT_propertyType == 4 or realT_propertyType == 9:
building_type = "commercial"
return building_type
def sync_realt_rent(session: requests.Session, wallet_address):
# Get current Finary RealT rent portfolio
myFinary_realT = json.loads(get_realt_rentals_finary(session))
# Get current RealT rent from wallet
myRealT_rentals = json.loads(get_realt_rentals_blockchain(wallet_address))
# If finary RealT rentals not in RealT wallet then delete otherwise update
for key in myFinary_realT:
if key not in myRealT_rentals:
delete_user_real_estates(session, myFinary_realT[key]["finary_id"])
logging.info("Deleting " + myFinary_realT[key]["description"])
else:
token_details = get_realt_token_details(key)
# Handling currency
if token_details["currency"] == get_display_currency_code(session):
user_estimated_value = (
token_details["totalTokens"] * token_details["tokenPrice"]
)
monthly_rent = token_details["netRentMonth"]
elif (
token_details["currency"] == "EUR"
or "USD"
or "SGD"
or "CHF"
or "GBP"
or "CAD"
):
user_estimated_value = (
token_details["totalTokens"] * token_details["tokenPrice"]
)
monthly_rent = token_details["netRentMonth"]
else: | user_estimated_value = token_details["totalTokens"] * convert_currency( | 3 | 2023-10-24 00:32:05+00:00 | 4k |
vitaliisili/petoshield-rest | petoshield_api/apps/policy/filters.py | [
{
"identifier": "ServiceProvider",
"path": "petoshield_api/apps/policy/models.py",
"snippet": "class ServiceProvider(ExportModelOperationsMixin('service_provider'), BaseModel):\n \"\"\"Model representing a service provider.\n Attributes:\n company_name (CharField): The name of the company. Max length is 255 characters.\n phone (CharField): The phone number of the company. Max length is 15 characters.\n registration_number (CharField): The registration number of the company. Max length is 255 characters.\n address (CharField): The address of the company. Max length is 255 characters.\n iban (CharField): The IBAN of the company. Max length is 34 characters.\n user (ForeignKey): The user associated with the service provider.\n Methods:\n __str__: Returns the company name.\n \"\"\"\n\n company_name = models.CharField(max_length=255)\n phone = models.CharField(max_length=15)\n registration_number = models.CharField(max_length=255)\n address = models.CharField(max_length=255)\n iban = models.CharField(max_length=34)\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='provider')\n\n def __str__(self):\n \"\"\"Returns the company name.\n Returns:\n str: The company name.\n \"\"\"\n return self.company_name"
},
{
"identifier": "Policy",
"path": "petoshield_api/apps/policy/models.py",
"snippet": "class Policy(ExportModelOperationsMixin('policy'), BaseModel):\n \"\"\"Model representing a policy.\n Attributes:\n POLICY_STATUS (tuple): Choices for the status of the policy.\n policy_number (CharField): The policy number. Max length is 255 characters. Must be unique.\n start_date (DateField): The start date of the policy.\n end_date (DateField): The end date of the policy.\n status (CharField): The status of the policy. Max length is 20 characters. Choices are 'valid',\n 'invalid', and 'expired'.\n price (DecimalField): The price of the policy. Max digits is 8. Decimal places is 2.\n Default is POLICY_BASE_PRICE from settings.\n initial_limit (DecimalField): The initial limit of the policy. Max digits is 8. Decimal places is 2.\n current_limit (DecimalField): The current limit of the policy. Max digits is 8. Decimal places is 2.\n deductible (DecimalField): The deductible of the policy. Max digits is 6. Decimal places is 2.\n pet (ForeignKey): The pet associated with the policy.\n Methods:\n __str__: Returns the policy number.\n Meta:\n verbose_name_plural (str): The plural name for the model.\n \"\"\"\n\n POLICY_STATUS = (\n ('valid', _('Valid')),\n ('invalid', _('Invalid')),\n ('expired', _('Expired')),\n )\n policy_number = models.CharField(max_length=255, unique=True)\n start_date = models.DateField()\n end_date = models.DateField()\n status = models.CharField(max_length=20, choices=POLICY_STATUS)\n price = models.DecimalField(max_digits=8, decimal_places=2, default=settings.POLICY_BASE_PRICE)\n initial_limit = models.DecimalField(max_digits=8, decimal_places=2)\n current_limit = models.DecimalField(max_digits=8, decimal_places=2)\n deductible = models.DecimalField(max_digits=6, decimal_places=2)\n pet = models.ForeignKey(Pet, on_delete=models.SET_NULL, related_name='policy', null=True)\n\n def __str__(self):\n \"\"\"Returns the policy number.\n Returns:\n str: The policy number.\n \"\"\"\n return self.policy_number\n\n class Meta:\n \"\"\"Meta-options for the Policy model.\n Attributes:\n verbose_name_plural (str): The plural name for the model.\n \"\"\"\n verbose_name_plural = 'policies'"
},
{
"identifier": "InsuranceCase",
"path": "petoshield_api/apps/policy/models.py",
"snippet": "class InsuranceCase(ExportModelOperationsMixin('insurance_case'), BaseModel):\n \"\"\"Model representing an insurance case.\n Attributes:\n INSURANCE_STATUS (tuple): Choices for the status of the insurance case.\n claim_date (DateField): The date of the insurance claim.\n description (TextField): The description of the insurance case.\n status (CharField): The status of the insurance case. Max length is 20 characters.\n Choices are 'accept', 'process', and 'reject'.\n policy (ForeignKey): The policy associated with the insurance case.\n service_provider (ForeignKey): The service provider associated with the insurance case.\n Methods:\n __str__: Returns a formatted string representation of the insurance case.\n \"\"\"\n\n INSURANCE_STATUS = (\n ('accept', _('Accept')),\n ('process', _('Process')),\n ('reject', _('Reject')),\n )\n claim_date = models.DateField()\n description = models.TextField()\n status = models.CharField(max_length=20, choices=INSURANCE_STATUS, default='process')\n policy = models.ForeignKey(Policy, on_delete=models.CASCADE, related_name='insurance_cases')\n service_provider = models.ForeignKey(ServiceProvider,\n on_delete=models.SET_NULL,\n related_name='insurance_cases',\n null=True)\n\n def __str__(self):\n \"\"\"Returns a formatted string representation of the insurance case.\n Returns:\n str: A formatted string representation of the insurance case.\n \"\"\"\n return f'{self.claim_date}-{self.status}'"
},
{
"identifier": "IncomingInvoice",
"path": "petoshield_api/apps/policy/models.py",
"snippet": "class IncomingInvoice(ExportModelOperationsMixin('incoming_invoice'), BaseModel):\n \"\"\"Model representing an incoming invoice.\n Attributes:\n invoice_date (DateField): The date of the invoice.\n amount (DecimalField): The amount of the invoice. Max digits is 8 and decimal places is 2.\n insurance_case (ForeignKey): The insurance case associated with the invoice.\n Methods:\n __str__: Returns a formatted string representation of the invoice.\n \"\"\"\n\n invoice_date = models.DateField()\n amount = models.DecimalField(max_digits=8, decimal_places=2)\n insurance_case = models.ForeignKey(InsuranceCase, on_delete=models.CASCADE, related_name='incoming_invoice')\n\n def __str__(self):\n \"\"\"Returns a formatted string representation of the invoice.\n Returns:\n str: A formatted string representation of the invoice.\n \"\"\"\n return f'{self.amount}'"
}
] | from django_filters import rest_framework as filters
from .models import ServiceProvider, Policy, InsuranceCase, IncomingInvoice | 1,723 |
class ServiceProviderFilter(filters.FilterSet):
"""A filter class for the ServiceProvider model.
Attributes:
user (CharFilter): Filter for the 'user__name' field using the 'icontains' lookup.
created_at__year__exact (NumberFilter): Filter for the 'created_at__year' field with exact matching.
created_at__year__gt (NumberFilter): Filter for the 'created_at__year' field with greater than matching.
created_at__year__lt (NumberFilter): Filter for the 'created_at__year' field with less than matching.
Meta:
model (ServiceProvider): The model to be filtered.
fields (dict): The fields and lookup types to be used for filtering.
"""
user = filters.CharFilter(field_name='user__name', lookup_expr='icontains')
created_at__year__exact = filters.NumberFilter(field_name='created_at__year', lookup_expr='exact')
created_at__year__gt = filters.NumberFilter(field_name='created_at__year', lookup_expr='gt')
created_at__year__lt = filters.NumberFilter(field_name='created_at__year', lookup_expr='lt')
class Meta:
|
class ServiceProviderFilter(filters.FilterSet):
"""A filter class for the ServiceProvider model.
Attributes:
user (CharFilter): Filter for the 'user__name' field using the 'icontains' lookup.
created_at__year__exact (NumberFilter): Filter for the 'created_at__year' field with exact matching.
created_at__year__gt (NumberFilter): Filter for the 'created_at__year' field with greater than matching.
created_at__year__lt (NumberFilter): Filter for the 'created_at__year' field with less than matching.
Meta:
model (ServiceProvider): The model to be filtered.
fields (dict): The fields and lookup types to be used for filtering.
"""
user = filters.CharFilter(field_name='user__name', lookup_expr='icontains')
created_at__year__exact = filters.NumberFilter(field_name='created_at__year', lookup_expr='exact')
created_at__year__gt = filters.NumberFilter(field_name='created_at__year', lookup_expr='gt')
created_at__year__lt = filters.NumberFilter(field_name='created_at__year', lookup_expr='lt')
class Meta: | model = ServiceProvider | 0 | 2023-10-19 08:09:10+00:00 | 4k |
biggzlar/plausible-uncertainties | train.py | [
{
"identifier": "get_device",
"path": "utils.py",
"snippet": "def get_device():\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
},
{
"identifier": "UnivariateDummyData",
"path": "utils.py",
"snippet": "class UnivariateDummyData:\n\tdef __init__(self, N, X_range=(0, 10.0)):\n\t\tepsilon = 0.3 * np.random.normal(loc=0.0, scale=1.0, size=N)\n\t\tself.X = np.linspace(*X_range, num=N)\n\t\tself.Y = self.X * np.sin(self.X) + self.X * epsilon + epsilon\n\n\tdef __len__(self):\n\t\treturn len(self.X)\n\n\tdef __getitem__(self, idx):\n\t\tx = torch.Tensor(np.expand_dims(self.X[idx], axis=0))\n\t\ty = torch.Tensor(np.expand_dims(self.Y[idx], axis=0))\n\t\treturn x, y"
},
{
"identifier": "get_predicted_cdf",
"path": "utils.py",
"snippet": "def get_predicted_cdf(residuals: np.ndarray, sigma: np.ndarray):\n \"\"\" Using residuals, generates confidence scores by comparing\n to the standard Gaussian, scaled by predicted standard deviations.\n \"\"\"\n alpha = np.linspace(start=1.0, stop=0, num=10)\n observed_confidence_p = np.zeros((len(residuals), len(alpha)))\n\n # generate quantiles for the standard Gaussian\n std_quantiles = norm.ppf(alpha)\n\n # weight residuals with predicted standard deviations\n weighted_residuals = residuals / sigma\n\n # for each quantile, check whether the weighted residual lies within\n observed_confidence_p = np.less_equal(np.expand_dims(weighted_residuals, axis=-1), std_quantiles)\n\n # get sample cdf by summing the number of quantiles the sample error lies inside of\n pcdf = observed_confidence_p.mean(axis=-1)\n return pcdf"
},
{
"identifier": "UnivariateDerNet",
"path": "evidential_regression/networks.py",
"snippet": "class UnivariateDerNet(nn.Module):\n\tdef __init__(self):\n\t\tsuper(UnivariateDerNet, self).__init__()\n\n\t\tself.hidden = nn.Sequential(\n\t\t\tnn.Linear(in_features=1, out_features=128),\n\t\t\t# nn.ReLU6(),\n\t\t\t# nn.Tanh(),\n\t\t\tnn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\t# nn.ReLU6(),\n\t\t\t# nn.Tanh(),\n\t\t\tnn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\t# nn.ReLU6(),\n\t\t\t# nn.Tanh(),\n\t\t\tnn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tDenseInverseGamma(in_features=128, units=1)\n\t\t)\n\t\tself.apply(self.init_weights)\n\n\tdef forward(self, x):\n\t\tgamma, nu, alpha, beta = self.hidden(x)\n\n\t\treturn gamma, nu, alpha, beta\n\n\tdef init_weights(self, m):\n\t\tif isinstance(m, nn.Linear):\n\t\t\ttorch.nn.init.xavier_uniform_(m.weight)\n\n\tdef get_prediction(self, x):\n\t\tself.eval()\n\n\t\tgamma, nu, alpha, beta = self.hidden(x)\n\n\t\tgamma = gamma.detach().cpu().numpy().squeeze()\n\t\tnu = nu.detach().cpu().numpy().squeeze()\n\t\talpha = alpha.detach().cpu().numpy().squeeze()\n\t\tbeta = beta.detach().cpu().numpy().squeeze()\n\n\t\taleatoric = np.sqrt(beta * np.reciprocal(alpha - 1 + 1e-8))\n\t\tepistemic = np.sqrt(beta * np.reciprocal((nu * (alpha - 1)) + 1e-8))\n\t\tmeta_aleatoric = np.sqrt(beta**2 / ((alpha - 1)**2 * (alpha - 2 + 1e-6)))\n\n\t\treturn gamma, aleatoric, epistemic, meta_aleatoric, {\"nu\": nu, \"alpha\": alpha, \"beta\": beta}"
},
{
"identifier": "UnivariateEvidentialRegressionLoss",
"path": "evidential_regression/losses.py",
"snippet": "class UnivariateEvidentialRegressionLoss(torch.nn.Module):\n def __init__(self):\n super(UnivariateEvidentialRegressionLoss, self).__init__()\n\n def forward(self, y_true, gamma, nu, alpha, beta, mask=None, coeff=1e-2): \n if mask is not None:\n y_true = y_true[mask]\n gamma = gamma[mask]\n nu = nu[mask]\n alpha = alpha[mask]\n beta = beta[mask]\n\n loss_nll = NIG_NLL(y_true, gamma, nu, alpha, beta)\n loss_reg = NIG_REG(y_true, gamma, nu, alpha, beta)\n loss = torch.mean(loss_nll + coeff * loss_reg)\n return loss"
},
{
"identifier": "UnivariateKenNet",
"path": "mle_mc_dropout/networks.py",
"snippet": "class UnivariateKenNet(nn.Module):\n\t\"\"\" Combining MLE and Monte-Carlo Dropout for simultaneous al. and ep. UQ as proposed by\n\t\tKendall et al.: https://proceedings.neurips.cc/paper_files/paper/2017/hash/2650d6089a6d640c5e85b2b88265dc2b-Abstract.html\n\t\"\"\"\n\tdef __init__(self):\n\t\tsuper(UnivariateKenNet, self).__init__()\n\n\t\tself.n_mc_samples = 128\n\n\t\tself.hidden = nn.Sequential(\n\t\t\tnn.Linear(in_features=1, out_features=128),\n\t\t\tnn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tnn.Mish(),\t\n\t\t)\n\n\t\tself.mc_block = nn.Sequential(\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tmc_dropout(p=0.2),\n\t\t\tnn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tmc_dropout(p=0.2),\n\t\t\tnn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=2)\n\t\t)\n\t\tself.apply(self.init_weights)\n\n\tdef forward(self, x):\n\t\tbatch_size, _ = x.shape\n\t\tx = self.hidden(x)\n\t\tmc_x = x.repeat(self.n_mc_samples, 1)\n\t\tmc_x = self.mc_block(mc_x)\n\t\tmc_x = mc_x.view(self.n_mc_samples, batch_size, -1)\n\n\t\tmc_mu = torch.mean(mc_x, axis=0)\n\t\tmu, log_aleatoric = torch.split(mc_mu, split_size_or_sections=1, dim=-1)\n\n\t\treturn mu, log_aleatoric\n\n\tdef init_weights(self, m):\n\t\tif isinstance(m, nn.Linear):\n\t\t\ttorch.nn.init.xavier_uniform_(m.weight)\n\n\tdef get_prediction(self, x):\n\t\tself.eval()\n\n\t\tbatch_size, _ = x.shape\n\t\tx = self.hidden(x)\n\t\tmc_x = x.repeat(self.n_mc_samples, 1)\n\t\tmc_x = self.mc_block(mc_x)\n\t\tmc_x = mc_x.view(self.n_mc_samples, batch_size, -1)\n\n\t\tmc_mu, mc_var = torch.mean(mc_x, axis=0), torch.var(mc_x, axis=0)\n\t\tmu, log_aleatoric = torch.split(mc_mu, split_size_or_sections=1, dim=-1)\n\t\tepistemic, meta_log_aleatoric = torch.split(mc_var, split_size_or_sections=1, dim=-1)\n\n\t\tmu = mu.detach().cpu().numpy().squeeze()\n\t\taleatoric = np.sqrt(np.exp(log_aleatoric.detach().cpu().numpy().squeeze()))\n\t\tepistemic = epistemic.detach().cpu().numpy().squeeze()\n\n\t\treturn mu, aleatoric, epistemic, meta_log_aleatoric, None"
},
{
"identifier": "UnivariateL1Loss",
"path": "mle_mc_dropout/losses.py",
"snippet": "class UnivariateL1Loss(nn.Module):\n def __init__(self):\n super(UnivariateL1Loss, self).__init__()\n \n def forward(self, y_true, y_pred, sigma):\n return torch.mean((0.5 * torch.exp(-sigma)) * torch.abs(y_pred - y_true) + 0.5 * sigma)"
},
{
"identifier": "UnivariateL2Loss",
"path": "mle_mc_dropout/losses.py",
"snippet": "class UnivariateL2Loss(nn.Module):\n def __init__(self):\n super(UnivariateL2Loss, self).__init__()\n \n def forward(self, y_pred, y_true, sigma):\n return torch.mean((0.5 * torch.exp(-sigma)) * torch.norm(y_pred - y_true, p=2, dim=-1) + 0.5 * sigma)"
},
{
"identifier": "BetaNLLLoss",
"path": "mle_mc_dropout/losses.py",
"snippet": "class BetaNLLLoss(nn.Module):\n \"\"\" Based on https://arxiv.org/abs/2203.09168 by Seitzer et al.\n \"\"\"\n def __init__(self):\n super(BetaNLLLoss, self).__init__()\n \n def forward(self, y_pred, y_true, sigma, beta=0.5):\n return torch.mean(torch.exp(sigma.detach())**(2 * beta) * ((0.5 * torch.exp(-sigma)) * torch.norm(y_pred - y_true, p=2, dim=-1) + 0.5 * sigma))"
}
] | import tqdm
import torch
import numpy as np
import matplotlib.pyplot as plt
from utils import get_device, UnivariateDummyData, get_predicted_cdf
from evidential_regression.networks import UnivariateDerNet
from evidential_regression.losses import UnivariateEvidentialRegressionLoss
from mle_mc_dropout.networks import UnivariateKenNet
from mle_mc_dropout.losses import UnivariateL1Loss, UnivariateL2Loss, BetaNLLLoss | 2,487 |
# plot settings
plt.rcParams.update(
{
"font.size": 12,
"text.usetex": False,
"font.family": "stixgeneral",
"mathtext.fontset": "stix",
}
)
if __name__ == "__main__":
device = get_device()
print(f"Working on {device}!")
EPOCHS = 120
in_lower = -2.0
in_upper = 10.0
train_data = UnivariateDummyData(N=2000, X_range=(in_lower, in_upper))
test_data = UnivariateDummyData(N=100, X_range=(-10.0, 20.0))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
optimizer_params = {
"lr": 1e-03,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 1e-2,
"amsgrad": False}
net = UnivariateDerNet()
net.to(device)
|
# plot settings
plt.rcParams.update(
{
"font.size": 12,
"text.usetex": False,
"font.family": "stixgeneral",
"mathtext.fontset": "stix",
}
)
if __name__ == "__main__":
device = get_device()
print(f"Working on {device}!")
EPOCHS = 120
in_lower = -2.0
in_upper = 10.0
train_data = UnivariateDummyData(N=2000, X_range=(in_lower, in_upper))
test_data = UnivariateDummyData(N=100, X_range=(-10.0, 20.0))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
optimizer_params = {
"lr": 1e-03,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 1e-2,
"amsgrad": False}
net = UnivariateDerNet()
net.to(device) | criterion = UnivariateEvidentialRegressionLoss() | 4 | 2023-10-19 08:44:08+00:00 | 4k |
avilliai/Bert_Vits2_Sever | modules.py | [
{
"identifier": "init_weights",
"path": "commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size*dilation - dilation)/2)"
},
{
"identifier": "piecewise_rational_quadratic_transform",
"path": "transforms.py",
"snippet": "def piecewise_rational_quadratic_transform(inputs, \n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None, \n tail_bound=1.,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE):\n\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\n 'tails': tails,\n 'tail_bound': tail_bound\n }\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet"
},
{
"identifier": "Encoder",
"path": "attentions.py",
"snippet": "class Encoder(nn.Module):\n def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs):\n super().__init__()\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.window_size = window_size\n #if isflow:\n # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)\n # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)\n # self.cond_layer = weight_norm(cond_layer, name='weight')\n # self.gin_channels = 256\n self.cond_layer_idx = self.n_layers\n if 'gin_channels' in kwargs:\n self.gin_channels = kwargs['gin_channels']\n if self.gin_channels != 0:\n self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)\n # vits2 says 3rd block, so idx is 2 by default\n self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2\n logging.debug(self.gin_channels, self.cond_layer_idx)\n assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'\n self.drop = nn.Dropout(p_dropout)\n self.attn_layers = nn.ModuleList()\n self.norm_layers_1 = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n self.norm_layers_2 = nn.ModuleList()\n for i in range(self.n_layers):\n self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))\n self.norm_layers_1.append(LayerNorm(hidden_channels))\n self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))\n self.norm_layers_2.append(LayerNorm(hidden_channels))\n def forward(self, x, x_mask, g=None):\n attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)\n x = x * x_mask\n for i in range(self.n_layers):\n if i == self.cond_layer_idx and g is not None:\n g = self.spk_emb_linear(g.transpose(1, 2))\n g = g.transpose(1, 2)\n x = x + g\n x = x * x_mask\n y = self.attn_layers[i](x, x, attn_mask)\n y = self.drop(y)\n x = self.norm_layers_1[i](x + y)\n\n y = self.ffn_layers[i](x, x_mask)\n y = self.drop(y)\n x = self.norm_layers_2[i](x + y)\n x = x * x_mask\n return x"
}
] | import copy
import math
import numpy as np
import scipy
import torch
import commons
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm
from commons import init_weights, get_padding
from transforms import piecewise_rational_quadratic_transform
from attentions import Encoder | 2,812 | self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers-1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
]) | self.convs1.apply(init_weights) | 0 | 2023-10-23 08:24:12+00:00 | 4k |
t-ega/whatsapp-cloud-sdk | whatsapp_cloud_sdk/_files/contact.py | [
{
"identifier": "File",
"path": "whatsapp_cloud_sdk/_files/file_object.py",
"snippet": "class File:\n \"\"\"Base Class for all file objects.\"\"\"\n\n __slots__ = ()\n _id_attrs = ()\n\n def __str__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n attributes = {}\n for slot in self.__slots__:\n attr = getattr(self, slot)\n if hasattr(attr, \"to_dict\"):\n attr = attr.to_dict()\n attributes[slot] = attr\n return str(attributes)\n\n def __eq__(self, other):\n \"\"\"Check for equivalence with another object of the same class.\"\"\"\n if isinstance(other, self.__class__):\n if not self._id_attrs:\n warn(\n f\"Objects of type {self.__class__.__name__} can not be meaningfully tested for\"\n \" equivalence.\",\n stacklevel=2,\n )\n if not other._id_attrs:\n warn(\n f\"Objects of type {other.__class__.__name__} can not be meaningfully tested\"\n \" for equivalence.\",\n stacklevel=2,\n )\n return self._id_attrs == other._id_attrs\n return super().__eq__(other)\n\n def to_dict(self) -> JSONDict:\n \"\"\"Convert the object to a dictionary.\"\"\"\n attributes = {}\n\n for slot in self.__slots__:\n attr = getattr(self, slot)\n\n if hasattr(attr, \"to_dict\"):\n attr = attr.to_dict()\n\n attributes[slot] = attr\n return attributes\n\n @staticmethod\n def parse_data(data: Optional[JSONDict]) -> Optional[JSONDict]:\n \"\"\"Parse data and return as a dictionary.\"\"\"\n return None if not data else data.copy()"
},
{
"identifier": "JSONDict",
"path": "whatsapp_cloud_sdk/_utils/types.py",
"snippet": "class MessageTypes(Enum):\n IMAGE = \"image\"\n AUDIO = \"audio\"\n TEXT = \"text\"\n REACTION = \"reaction\"\n STICKER = \"sticker\"\n LOCATION = \"location\"\n UNKNOWN = \"unknown\""
},
{
"identifier": "AddressValidator",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class AddressValidator(BaseModel):\n \"\"\"\n Validates address information.\n\n Args:\n street (str, optional): The street address.\n city (str, optional): The city.\n state (str, optional): The state or region.\n zip (str, optional): The postal code or ZIP code.\n country (str, optional): The country.\n country_code (str, optional): The country code.\n type (str, optional): The type of address.\n\n Attributes:\n None\n \"\"\"\n\n street: Optional[str] = None\n city: Optional[str] = None\n state: Optional[str] = None\n zip: Optional[str] = None\n country: Optional[str] = None\n country_code: Optional[str] = None\n type: Optional[str] = None"
},
{
"identifier": "NameValidator",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class NameValidator(BaseModel):\n \"\"\"\n Validates name information.\n\n Args:\n formatted_name (str): The formatted full name.\n first_name (str): The first name.\n last_name (str, optional): The last name.\n middle_name (str, optional): The middle name.\n suffix (str, optional): The name suffix.\n prefix (str, optional): The name prefix.\n\n Attributes:\n None\n \"\"\"\n\n formatted_name: str\n first_name: str\n last_name: Optional[str] = None\n middle_name: Optional[str] = None\n suffix: Optional[str] = None\n prefix: Optional[str] = None"
},
{
"identifier": "PhoneValidator",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class PhoneValidator(BaseModel):\n \"\"\"\n Validates phone information.\n\n Args:\n phone (str): The phone number.\n wa_id (str, optional): The WhatsApp ID.\n type (str, optional): The type of phone number.\n\n Attributes:\n None\n \"\"\"\n\n phone: str\n wa_id: Optional[str] = None\n type: Optional[str] = None"
},
{
"identifier": "OrgValidator",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class OrgValidator(BaseModel):\n \"\"\"\n Validates organization information.\n\n Args:\n company (str, optional): The company name.\n department (str, optional): The department.\n title (str, optional): The job title.\n\n Attributes:\n None\n \"\"\"\n\n company: Optional[str] = None\n department: Optional[str] = None\n title: Optional[str] = None"
},
{
"identifier": "URLValidator",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class URLValidator(BaseModel):\n \"\"\"\n Validates URL information.\n\n Args:\n url (str): The URL.\n type (str, optional): The type of URL.\n\n Attributes:\n None\n \"\"\"\n\n url: str\n type: Optional[str] = None"
},
{
"identifier": "EmailValidator",
"path": "whatsapp_cloud_sdk/_validators/messages.py",
"snippet": "class EmailValidator(BaseModel):\n \"\"\"\n Validates email information.\n\n Args:\n email (str, optional): The email address.\n type (str, optional): The type of email.\n\n Attributes:\n None\n \"\"\"\n\n email: Optional[str] = None\n type: Optional[str] = None"
}
] | from typing import List, Optional, Union
from whatsapp_cloud_sdk._files.file_object import File
from whatsapp_cloud_sdk._utils.types import JSONDict
from whatsapp_cloud_sdk._validators.messages import (
AddressValidator,
NameValidator,
PhoneValidator,
OrgValidator,
URLValidator,
EmailValidator,
) | 1,630 | """This module contains an object that represents a Whatsapp Contact and it related details."""
# pylint: disable=redefined-builtin
# pylint: disable=too-few-public-methods
class Address(File):
"""
Represents a contact address.
Args:
street (str): The street address.
city (str): The city.
state (str): The state.
zip (str): The ZIP code.
country (str): The country.
country_code (str): The country code.
type (str): The type of address.
Attributes:
street (str): The street address.
city (str): The city.
state (str): The state.
zip (str): The ZIP code.
country (str): The country.
country_code (str): The country code.
type (str): The type of address.
"""
__slots__ = (
"street",
"city",
"state",
"zip",
"country",
"country_code",
"type",
)
# pylint: disable=too-many-arguments
# pylint: disable=redefined-builtin
def __init__(
self,
street: str,
city: str,
state: str,
zip: str,
country: str,
country_code: str,
type: str,
):
| """This module contains an object that represents a Whatsapp Contact and it related details."""
# pylint: disable=redefined-builtin
# pylint: disable=too-few-public-methods
class Address(File):
"""
Represents a contact address.
Args:
street (str): The street address.
city (str): The city.
state (str): The state.
zip (str): The ZIP code.
country (str): The country.
country_code (str): The country code.
type (str): The type of address.
Attributes:
street (str): The street address.
city (str): The city.
state (str): The state.
zip (str): The ZIP code.
country (str): The country.
country_code (str): The country code.
type (str): The type of address.
"""
__slots__ = (
"street",
"city",
"state",
"zip",
"country",
"country_code",
"type",
)
# pylint: disable=too-many-arguments
# pylint: disable=redefined-builtin
def __init__(
self,
street: str,
city: str,
state: str,
zip: str,
country: str,
country_code: str,
type: str,
): | validator = AddressValidator( | 2 | 2023-10-15 21:12:45+00:00 | 4k |
caglarkucuk/earthformer-satellite-to-radar | ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py | [
{
"identifier": "CuboidSelfAttentionPatterns",
"path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_patterns.py",
"snippet": "def full_attention(input_shape):\ndef self_axial(input_shape):\ndef self_video_swin(input_shape, P=2, M=4):\ndef self_divided_space_time(input_shape):\ndef self_spatial_lg_v1(input_shape, M=4):\ndef self_axial_space_dilate_K(input_shape, K=2):\ndef cross_KxK(mem_shape, K):\ndef cross_KxK_lg(mem_shape, K):\ndef cross_KxK_heter(mem_shape, K):\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n P = min(P, T)\n M = min(M, H, W)\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)"
},
{
"identifier": "get_activation",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def get_activation(act, inplace=False, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n act\n Name of the activation\n inplace\n Whether to perform inplace activation\n\n Returns\n -------\n activation_layer\n The activation\n \"\"\"\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n negative_slope = kwargs.get(\"negative_slope\", 0.1)\n return nn.LeakyReLU(negative_slope, inplace=inplace)\n elif act == 'identity':\n return nn.Identity()\n elif act == 'elu':\n return nn.ELU(inplace=inplace)\n elif act == 'gelu':\n return nn.GELU()\n elif act == 'relu':\n return nn.ReLU()\n elif act == 'sigmoid':\n return nn.Sigmoid()\n elif act == 'tanh':\n return nn.Tanh()\n elif act == 'softrelu' or act == 'softplus':\n return nn.Softplus()\n elif act == 'softsign':\n return nn.Softsign()\n else:\n raise NotImplementedError('act=\"{}\" is not supported. '\n 'Try to include it if you can find that in '\n 'https://pytorch.org/docs/stable/nn.html'.format(act))\n else:\n return act"
},
{
"identifier": "get_norm_layer",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def get_norm_layer(normalization: str = 'layer_norm',\n axis: int = -1,\n epsilon: float = 1e-5,\n in_channels: int = 0, **kwargs):\n \"\"\"Get the normalization layer based on the provided type\n\n Parameters\n ----------\n normalization\n The type of the layer normalization from ['layer_norm']\n axis\n The axis to normalize the\n epsilon\n The epsilon of the normalization layer\n in_channels\n Input channel\n\n Returns\n -------\n norm_layer\n The layer normalization layer\n \"\"\"\n if isinstance(normalization, str):\n if normalization == 'layer_norm':\n assert in_channels > 0\n assert axis == -1\n norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)\n elif normalization == 'rms_norm':\n assert axis == -1\n norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs)\n else:\n raise NotImplementedError('normalization={} is not supported'.format(normalization))\n return norm_layer\n elif normalization is None:\n return nn.Identity()\n else:\n raise NotImplementedError('The type of normalization must be str')"
},
{
"identifier": "_generalize_padding",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def _generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n pad_t\n pad_h\n pad_w\n padding_type\n t_pad_left\n\n Returns\n -------\n out\n The result after padding the x. Shape will be (B, T + pad_t, H + pad_h, W + pad_w, C)\n \"\"\"\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n assert padding_type in ['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T + pad_t, H + pad_h, W + pad_w)).permute(0, 2, 3, 4, 1)\n else:\n if t_pad_left:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, pad_t, 0))\n else:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))"
},
{
"identifier": "_generalize_unpadding",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def _generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type):\n assert padding_type in['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T - pad_t, H - pad_h, W - pad_w)).permute(0, 2, 3, 4, 1)\n else:\n return x[:, :(T - pad_t), :(H - pad_h), :(W - pad_w), :].contiguous()"
},
{
"identifier": "apply_initialization",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def apply_initialization(m,\n linear_mode=\"0\",\n conv_mode=\"0\",\n norm_mode=\"0\",\n embed_mode=\"0\"):\n if isinstance(m, nn.Linear):\n\n if linear_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_in', nonlinearity=\"linear\")\n elif linear_mode in (\"1\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d)):\n if conv_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n if norm_mode in (\"0\", ):\n if m.elementwise_affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n elif isinstance(m, nn.GroupNorm):\n if norm_mode in (\"0\", ):\n if m.affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n # # pos_embed already initialized when created\n elif isinstance(m, nn.Embedding):\n if embed_mode in (\"0\", ):\n nn.init.trunc_normal_(m.weight.data, std=0.02)\n else:\n raise NotImplementedError\n else:\n pass"
},
{
"identifier": "round_to",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def round_to(dat, c):\n return dat + (dat - dat % c) % c"
}
] | from typing import Sequence, Union
from functools import lru_cache
from collections import OrderedDict
from torch import nn
from einops import rearrange
from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns
from .utils import (
get_activation, get_norm_layer,
_generalize_padding, _generalize_unpadding,
apply_initialization, round_to)
import warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint | 3,433 | # spatiotemporal learned positional embedding
if self.typ == 't+h+w':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)
self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.H_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.W_embed.weight, std=0.02)
elif self.typ == 't+hw':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)
else:
raise NotImplementedError
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m, embed_mode="0")
def forward(self, x):
"""
Parameters
----------
x
Shape (B, T, H, W, C)
Returns
-------
out
Return the x + positional embeddings
"""
_, T, H, W, _ = x.shape
t_idx = torch.arange(T, device=x.device) # (T, C)
h_idx = torch.arange(H, device=x.device) # (H, C)
w_idx = torch.arange(W, device=x.device) # (W, C)
if self.typ == 't+h+w':
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\
+ self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\
+ self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)
elif self.typ == 't+hw':
spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)
else:
raise NotImplementedError
class PositionwiseFFN(nn.Module):
"""The Position-wise FFN layer used in Transformer-like architectures
If pre_norm is True:
norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data)
Else:
data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data))
Also, if we use gated projection. We will use
fc1_1 * act(fc1_2(data)) to map the data
"""
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
activation_dropout: float = 0.0,
dropout: float = 0.1,
gated_proj: bool = False,
activation='relu',
normalization: str = 'layer_norm',
layer_norm_eps: float = 1E-5,
pre_norm: bool = False,
linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
# initialization
self.linear_init_mode = linear_init_mode
self.norm_init_mode = norm_init_mode
self._pre_norm = pre_norm
self._gated_proj = gated_proj
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('gated_proj', gated_proj),
('pre_norm', pre_norm)
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Linear(in_features=units, out_features=hidden_size,
bias=True)
if self._gated_proj:
self.ffn_1_gate = nn.Linear(in_features=units,
out_features=hidden_size,
bias=True)
self.activation = get_activation(activation)
self.ffn_2 = nn.Linear(in_features=hidden_size, out_features=units,
bias=True)
| """Only change done in this file is the added upsampling layer to the CuboidTransformerModel,
which increaes `h` and `w` dimensions of the input tensor by 2x to match the dimensions of the output tensor!
The rest is same with the original file from EarthFormer repo!
"""
"""A space-time Transformer with Cuboid Attention"""
class PosEmbed(nn.Module):
def __init__(self, embed_dim, maxT, maxH, maxW, typ='t+h+w'):
r"""
Parameters
----------
embed_dim
maxT
maxH
maxW
typ
The type of the positional embedding.
- t+h+w:
Embed the spatial position to embeddings
- t+hw:
Embed the spatial position to embeddings
"""
super(PosEmbed, self).__init__()
self.typ = typ
assert self.typ in ['t+h+w', 't+hw']
self.maxT = maxT
self.maxH = maxH
self.maxW = maxW
self.embed_dim = embed_dim
# spatiotemporal learned positional embedding
if self.typ == 't+h+w':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)
self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.H_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.W_embed.weight, std=0.02)
elif self.typ == 't+hw':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)
else:
raise NotImplementedError
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m, embed_mode="0")
def forward(self, x):
"""
Parameters
----------
x
Shape (B, T, H, W, C)
Returns
-------
out
Return the x + positional embeddings
"""
_, T, H, W, _ = x.shape
t_idx = torch.arange(T, device=x.device) # (T, C)
h_idx = torch.arange(H, device=x.device) # (H, C)
w_idx = torch.arange(W, device=x.device) # (W, C)
if self.typ == 't+h+w':
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\
+ self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\
+ self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)
elif self.typ == 't+hw':
spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)
else:
raise NotImplementedError
class PositionwiseFFN(nn.Module):
"""The Position-wise FFN layer used in Transformer-like architectures
If pre_norm is True:
norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data)
Else:
data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data))
Also, if we use gated projection. We will use
fc1_1 * act(fc1_2(data)) to map the data
"""
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
activation_dropout: float = 0.0,
dropout: float = 0.1,
gated_proj: bool = False,
activation='relu',
normalization: str = 'layer_norm',
layer_norm_eps: float = 1E-5,
pre_norm: bool = False,
linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
# initialization
self.linear_init_mode = linear_init_mode
self.norm_init_mode = norm_init_mode
self._pre_norm = pre_norm
self._gated_proj = gated_proj
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('gated_proj', gated_proj),
('pre_norm', pre_norm)
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Linear(in_features=units, out_features=hidden_size,
bias=True)
if self._gated_proj:
self.ffn_1_gate = nn.Linear(in_features=units,
out_features=hidden_size,
bias=True)
self.activation = get_activation(activation)
self.ffn_2 = nn.Linear(in_features=hidden_size, out_features=units,
bias=True) | self.layer_norm = get_norm_layer(normalization=normalization, | 2 | 2023-10-23 11:45:50+00:00 | 4k |
DTennant/GPC | data/fgvc_aircraft.py | [
{
"identifier": "subsample_instances",
"path": "data/data_utils.py",
"snippet": "def subsample_instances(dataset, prop_indices_to_subsample=0.8):\n\n np.random.seed(0)\n subsample_indices = np.random.choice(range(len(dataset)), replace=False,\n size=(int(prop_indices_to_subsample * len(dataset)),))\n\n return subsample_indices"
},
{
"identifier": "aircraft_root",
"path": "config.py",
"snippet": "_C = CN()\n_C.MODEL = CN()\n_C.MODEL.DEVICE = \"cuda\"\n_C.MODEL.NAME = 'resnet50'\n_C.MODEL.LAST_STRIDE = 1\n_C.MODEL.LABEL_SMOOTH = False\n_C.MODEL.PRETRAIN_PATH = ''\n_C.INPUT = CN()\n_C.INPUT.SIZE_TRAIN = [384, 128]\n_C.INPUT.SIZE_TEST = [384, 128]\n_C.INPUT.PROB = 0.0\n_C.INPUT.RE_PROB = 0.0\n_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]\n_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]\n_C.INPUT.PADDING = 10\n_C.DATASETS = CN()\n_C.DATASETS.NAMES = ('market1501')\n_C.DATASETS.DATA_PATH = '/home/zbc/data/market1501/'\n_C.DATASETS.TRAIN_PATH = 'bounding_box_train'\n_C.DATASETS.QUERY_PATH = 'query'\n_C.DATASETS.GALLERY_PATH = 'bounding_box_test'\n_C.DATALOADER = CN()\n_C.DATALOADER.NUM_WORKERS = 8\n_C.DATALOADER.SAMPLER = 'softmax'\n_C.DATALOADER.NUM_INSTANCE = 16\n_C.SOLVER = CN()\n_C.SOLVER.OPTIMIZER_NAME = \"Adam\"\n_C.SOLVER.FP16 = False\n_C.SOLVER.MAX_EPOCHS = 50\n_C.SOLVER.BASE_LR = 3e-4\n_C.SOLVER.BIAS_LR_FACTOR = 2\n_C.SOLVER.MOMENTUM = 0.9\n_C.SOLVER.MARGIN = 0.3\n_C.SOLVER.WEIGHT_DECAY = 0.0005\n_C.SOLVER.WEIGHT_DECAY_BIAS = 0.\n_C.SOLVER.GAMMA = 0.1\n_C.SOLVER.STEPS = (30, 55)\n_C.SOLVER.WARMUP_FACTOR = 1.0 / 3\n_C.SOLVER.WARMUP_ITERS = 500\n_C.SOLVER.WARMUP_METHOD = \"linear\"\n_C.SOLVER.CHECKPOINT_PERIOD = 50\n_C.SOLVER.LOG_PERIOD = 100\n_C.SOLVER.EVAL_PERIOD = 50\n_C.SOLVER.IMS_PER_BATCH = 64\n_C.SOLVER.CYTHON = True\n_C.TEST = CN()\n_C.TEST.IMS_PER_BATCH = 128\n_C.TEST.WEIGHT = \"\"\n_C.TEST.DEBUG = False\n_C.TEST.MULTI_GPU = False\n_C.TEST.RERANK = True\n_C.OUTPUT_DIR = \"\""
}
] | import os
import pandas as pd
import numpy as np
import tarfile
from copy import deepcopy
from torchvision.datasets.folder import default_loader
from torch.utils.data import Dataset
from data.data_utils import subsample_instances
from config import aircraft_root
from six.moves import urllib | 2,580 | index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target, self.uq_idxs[index]
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def _check_exists(self):
return os.path.exists(os.path.join(self.root, 'data', 'images')) and \
os.path.exists(self.classes_file)
def download(self):
"""Download the FGVC-Aircraft data if it doesn't exist already."""
if self._check_exists():
return
# prepare to download data to PARENT_DIR/fgvc-aircraft-2013.tar.gz
print('Downloading %s ... (may take a few minutes)' % self.url)
parent_dir = os.path.abspath(os.path.join(self.root, os.pardir))
tar_name = self.url.rpartition('/')[-1]
tar_path = os.path.join(parent_dir, tar_name)
data = urllib.request.urlopen(self.url)
# download .tar.gz file
with open(tar_path, 'wb') as f:
f.write(data.read())
# extract .tar.gz to PARENT_DIR/fgvc-aircraft-2013b
data_folder = tar_path.strip('.tar.gz')
print('Extracting %s to %s ... (may take a few minutes)' % (tar_path, data_folder))
tar = tarfile.open(tar_path)
tar.extractall(parent_dir)
# if necessary, rename data folder to self.root
if not os.path.samefile(data_folder, self.root):
print('Renaming %s to %s ...' % (data_folder, self.root))
os.rename(data_folder, self.root)
# delete .tar.gz file
print('Deleting %s ...' % tar_path)
os.remove(tar_path)
print('Done!')
def subsample_dataset(dataset, idxs):
mask = np.zeros(len(dataset)).astype('bool')
mask[idxs] = True
dataset.samples = [(p, t) for i, (p, t) in enumerate(dataset.samples) if i in idxs]
dataset.uq_idxs = dataset.uq_idxs[mask]
return dataset
def subsample_classes(dataset, include_classes=range(60)):
cls_idxs = [i for i, (p, t) in enumerate(dataset.samples) if t in include_classes]
# TODO: Don't transform targets for now
target_xform_dict = {}
for i, k in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
dataset.target_transform = lambda x: target_xform_dict[x]
return dataset
def get_train_val_indices(train_dataset, val_split=0.2):
all_targets = [t for i, (p, t) in enumerate(train_dataset.samples)]
train_classes = np.unique(all_targets)
# Get train/test indices
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where(all_targets == cls)[0]
v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),))
t_ = [x for x in cls_idxs if x not in v_]
train_idxs.extend(t_)
val_idxs.extend(v_)
return train_idxs, val_idxs
def get_aircraft_datasets(train_transform, test_transform, train_classes=range(50), prop_train_labels=0.8,
split_train_val=False, seed=0):
np.random.seed(seed)
# Init entire training set
|
def make_dataset(dir, image_ids, targets):
assert(len(image_ids) == len(targets))
images = []
dir = os.path.expanduser(dir)
for i in range(len(image_ids)):
item = (os.path.join(dir, 'data', 'images',
'%s.jpg' % image_ids[i]), targets[i])
images.append(item)
return images
def find_classes(classes_file):
# read classes file, separating out image IDs and class names
image_ids = []
targets = []
f = open(classes_file, 'r')
for line in f:
split_line = line.split(' ')
image_ids.append(split_line[0])
targets.append(' '.join(split_line[1:]))
f.close()
# index class names
classes = np.unique(targets)
class_to_idx = {classes[i]: i for i in range(len(classes))}
targets = [class_to_idx[c] for c in targets]
return (image_ids, targets, classes, class_to_idx)
class FGVCAircraft(Dataset):
"""`FGVC-Aircraft <http://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft>`_ Dataset.
Args:
root (string): Root directory path to dataset.
class_type (string, optional): The level of FGVC-Aircraft fine-grain classification
to label data with (i.e., ``variant``, ``family``, or ``manufacturer``).
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g. ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in the root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = 'http://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz'
class_types = ('variant', 'family', 'manufacturer')
splits = ('train', 'val', 'trainval', 'test')
def __init__(self, root, class_type='variant', split='train', transform=None,
target_transform=None, loader=default_loader, download=False):
if split not in self.splits:
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
split, ', '.join(self.splits),
))
if class_type not in self.class_types:
raise ValueError('Class type "{}" not found. Valid class types are: {}'.format(
class_type, ', '.join(self.class_types),
))
self.root = os.path.expanduser(root)
self.class_type = class_type
self.split = split
self.classes_file = os.path.join(self.root, 'data',
'images_%s_%s.txt' % (self.class_type, self.split))
if download:
self.download()
(image_ids, targets, classes, class_to_idx) = find_classes(self.classes_file)
samples = make_dataset(self.root, image_ids, targets)
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.samples = samples
self.classes = classes
self.class_to_idx = class_to_idx
self.train = True if split == 'train' else False
self.uq_idxs = np.array(range(len(self)))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target, self.uq_idxs[index]
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def _check_exists(self):
return os.path.exists(os.path.join(self.root, 'data', 'images')) and \
os.path.exists(self.classes_file)
def download(self):
"""Download the FGVC-Aircraft data if it doesn't exist already."""
if self._check_exists():
return
# prepare to download data to PARENT_DIR/fgvc-aircraft-2013.tar.gz
print('Downloading %s ... (may take a few minutes)' % self.url)
parent_dir = os.path.abspath(os.path.join(self.root, os.pardir))
tar_name = self.url.rpartition('/')[-1]
tar_path = os.path.join(parent_dir, tar_name)
data = urllib.request.urlopen(self.url)
# download .tar.gz file
with open(tar_path, 'wb') as f:
f.write(data.read())
# extract .tar.gz to PARENT_DIR/fgvc-aircraft-2013b
data_folder = tar_path.strip('.tar.gz')
print('Extracting %s to %s ... (may take a few minutes)' % (tar_path, data_folder))
tar = tarfile.open(tar_path)
tar.extractall(parent_dir)
# if necessary, rename data folder to self.root
if not os.path.samefile(data_folder, self.root):
print('Renaming %s to %s ...' % (data_folder, self.root))
os.rename(data_folder, self.root)
# delete .tar.gz file
print('Deleting %s ...' % tar_path)
os.remove(tar_path)
print('Done!')
def subsample_dataset(dataset, idxs):
mask = np.zeros(len(dataset)).astype('bool')
mask[idxs] = True
dataset.samples = [(p, t) for i, (p, t) in enumerate(dataset.samples) if i in idxs]
dataset.uq_idxs = dataset.uq_idxs[mask]
return dataset
def subsample_classes(dataset, include_classes=range(60)):
cls_idxs = [i for i, (p, t) in enumerate(dataset.samples) if t in include_classes]
# TODO: Don't transform targets for now
target_xform_dict = {}
for i, k in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
dataset.target_transform = lambda x: target_xform_dict[x]
return dataset
def get_train_val_indices(train_dataset, val_split=0.2):
all_targets = [t for i, (p, t) in enumerate(train_dataset.samples)]
train_classes = np.unique(all_targets)
# Get train/test indices
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where(all_targets == cls)[0]
v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),))
t_ = [x for x in cls_idxs if x not in v_]
train_idxs.extend(t_)
val_idxs.extend(v_)
return train_idxs, val_idxs
def get_aircraft_datasets(train_transform, test_transform, train_classes=range(50), prop_train_labels=0.8,
split_train_val=False, seed=0):
np.random.seed(seed)
# Init entire training set | whole_training_set = FGVCAircraft(root=aircraft_root, transform=train_transform, split='trainval') | 1 | 2023-10-23 18:23:22+00:00 | 4k |
camenduru/MiniGPT-v2-hf | minigpt4/models/base_model.py | [
{
"identifier": "download_cached_file",
"path": "minigpt4/common/dist_utils.py",
"snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()"
},
{
"identifier": "is_dist_avail_and_initialized",
"path": "minigpt4/common/dist_utils.py",
"snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True"
},
{
"identifier": "get_abs_path",
"path": "minigpt4/common/utils.py",
"snippet": "def get_abs_path(rel_path):\n return os.path.join(registry.get_path(\"library_root\"), rel_path)"
},
{
"identifier": "is_url",
"path": "minigpt4/common/utils.py",
"snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")"
},
{
"identifier": "create_eva_vit_g",
"path": "minigpt4/models/eva_vit.py",
"snippet": "def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision=\"fp16\"):\n model = VisionTransformer(\n img_size=img_size,\n patch_size=14,\n use_mean_pooling=False,\n embed_dim=1408,\n depth=39,\n num_heads=1408//88,\n mlp_ratio=4.3637,\n qkv_bias=True,\n drop_path_rate=drop_path_rate,\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n use_checkpoint=use_checkpoint,\n ) \n url = \"https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth\"\n cached_file = download_cached_file(\n url, check_hash=False, progress=True\n )\n state_dict = torch.load(cached_file, map_location=\"cpu\") \n interpolate_pos_embed(model,state_dict)\n \n incompatible_keys = model.load_state_dict(state_dict, strict=False)\n# print(incompatible_keys)\n \n if precision == \"fp16\":\n# model.to(\"cuda\") \n convert_weights_to_fp16(model)\n return model"
}
] | import os
import logging
import contextlib
import numpy as np
import torch
import torch.nn as nn
from omegaconf import OmegaConf
from transformers import BertTokenizer, LlamaTokenizer
from transformers.models.llama.modeling_llama import LlamaForCausalLM
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
)
from minigpt4.common.dist_utils import download_cached_file, is_dist_avail_and_initialized
from minigpt4.common.utils import get_abs_path, is_url
from minigpt4.models.eva_vit import create_eva_vit_g | 1,732 | """Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[-1].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
def load_checkpoint_from_config(self, cfg, **kwargs):
"""
Load checkpoint as specified in the config file.
If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.
When loading the pretrained model, each task-specific architecture may define their
own load_from_pretrained() method.
"""
load_finetuned = cfg.get("load_finetuned", True)
if load_finetuned:
finetune_path = cfg.get("finetuned", None)
assert (
finetune_path is not None
), "Found load_finetuned is True, but finetune_path is None."
self.load_checkpoint(url_or_filename=finetune_path)
else:
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
assert "Found load_finetuned is False, but pretrain_path is None."
self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)
def before_evaluation(self, **kwargs):
pass
def show_n_params(self, return_str=True):
tot = 0
for p in self.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return "{:.1f}M".format(tot / 1e6)
else:
return "{:.1f}K".format(tot / 1e3)
else:
return tot
def maybe_autocast(self, dtype=torch.float16):
# if on cpu, don't use autocast
# if on gpu, use autocast with dtype if provided, otherwise use torch.float16
enable_autocast = self.device != torch.device("cpu")
if enable_autocast:
return torch.cuda.amp.autocast(dtype=dtype)
else:
return contextlib.nullcontext()
@classmethod
def init_vision_encoder(
cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision, freeze
):
logging.info('Loading VIT')
assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4"
if not freeze:
precision = "fp32" # fp16 is not for training
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[-1].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
def load_checkpoint_from_config(self, cfg, **kwargs):
"""
Load checkpoint as specified in the config file.
If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.
When loading the pretrained model, each task-specific architecture may define their
own load_from_pretrained() method.
"""
load_finetuned = cfg.get("load_finetuned", True)
if load_finetuned:
finetune_path = cfg.get("finetuned", None)
assert (
finetune_path is not None
), "Found load_finetuned is True, but finetune_path is None."
self.load_checkpoint(url_or_filename=finetune_path)
else:
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
assert "Found load_finetuned is False, but pretrain_path is None."
self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)
def before_evaluation(self, **kwargs):
pass
def show_n_params(self, return_str=True):
tot = 0
for p in self.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return "{:.1f}M".format(tot / 1e6)
else:
return "{:.1f}K".format(tot / 1e3)
else:
return tot
def maybe_autocast(self, dtype=torch.float16):
# if on cpu, don't use autocast
# if on gpu, use autocast with dtype if provided, otherwise use torch.float16
enable_autocast = self.device != torch.device("cpu")
if enable_autocast:
return torch.cuda.amp.autocast(dtype=dtype)
else:
return contextlib.nullcontext()
@classmethod
def init_vision_encoder(
cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision, freeze
):
logging.info('Loading VIT')
assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4"
if not freeze:
precision = "fp32" # fp16 is not for training
| visual_encoder = create_eva_vit_g( | 4 | 2023-10-15 19:54:22+00:00 | 4k |
deepghs/sdeval | sdeval/corrupt/aicorrupt.py | [
{
"identifier": "load_images",
"path": "sdeval/utils/images.py",
"snippet": "def _yield_images(images: ImagesTyping) -> Iterator[Image.Image]:\ndef load_images(images: ImagesTyping) -> List[Image.Image]:"
},
{
"identifier": "tqdm",
"path": "sdeval/utils/tqdm_.py",
"snippet": "def tqdm(*args, silent: bool = False, **kwargs):\n \"\"\"\n An enhanced version of tqdm (progress bar) with an option to silence the output.\n\n This function modifies the behavior of tqdm to allow silencing the progress bar.\n\n :param args: Positional arguments to be passed to tqdm.\n :param silent: If True, the progress bar content will not be displayed.\n :type silent: bool\n :param kwargs: Additional keyword arguments to be passed to tqdm.\n :return: tqdm progress bar.\n :rtype: tqdm.std.tqdm\n \"\"\"\n with io.StringIO() as sio:\n if silent:\n kwargs['file'] = sio\n\n return _origin_tqdm(*args, **kwargs)"
}
] | import json
import numpy as np
from functools import lru_cache
from typing import Tuple, Optional, Mapping
from PIL import Image
from huggingface_hub import hf_hub_download
from imgutils.data import rgb_encode, ImageTyping, load_image
from imgutils.utils import open_onnx_model
from ..utils import ImagesTyping, load_images, tqdm | 1,616 | This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened meta information of the AI image corrupted detection model.
:rtype: dict
"""
with open(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/meta.json',
), 'r', encoding='utf-8') as f:
return json.load(f)
@lru_cache()
def _open_anime_aicop_labels(model_name: str):
"""
Open the labels of the AI image corrupted detection model.
This function opens the labels of the AI image corrupted detection model specified by the given model name.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The labels of the AI image corrupted detection model.
:rtype: List[str]
"""
return _open_anime_aicop_meta(model_name)['labels']
def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384),
normalize: Optional[Tuple[float, float]] = (0.5, 0.5)):
"""
Encode the image for AI image corrupted detection.
This function resizes and encodes the image for AI image corrupted detection.
:param image: The input image.
:type image: Image.Image
:param size: The target size for encoding. Default is (384, 384).
:type size: Tuple[int, int]
:param normalize: The normalization parameters. Default is (0.5, 0.5).
:type normalize: Optional[Tuple[float, float]]
:return: The encoded image data.
:rtype: np.ndarray
"""
image = image.resize(size, Image.BILINEAR)
data = rgb_encode(image, order_='CHW')
if normalize is not None:
mean_, std_ = normalize
mean = np.asarray([mean_]).reshape((-1, 1, 1))
std = np.asarray([std_]).reshape((-1, 1, 1))
data = (data - mean) / std
return data.astype(np.float32)
def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]:
"""
Get AI image corrupted detection scores for an image.
This function calculates AI image corrupted detection scores for a given image using the specified model.
:param image: The input image.
:type image: ImageTyping
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:return: A dictionary containing the corrupted score.
:rtype: Mapping[str, float]
"""
image = load_image(image, force_background='white', mode='RGB')
input_ = _img_encode(image)[None, ...]
output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_})
return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist()))
class AICorruptMetrics:
"""
Class for calculating an AI image corruptness score.
The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model.
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:param tqdm_desc: Description for the tqdm progress bar during calculation.
:type tqdm_desc: str
"""
def __init__(self, model_name: str = _DEFAULT_MODEL_NAME,
silent: bool = False, tqdm_desc: str = None):
self._model_name = model_name
self.silent = silent
self.tqdm_desc = tqdm_desc or self.__class__.__name__
def score(self, images: ImagesTyping, silent: bool = None):
"""
Calculate the AI image corruptness score for a set of images.
This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model.
:param images: The set of input images for calculating the AI image corruptness score.
:type images: ImagesTyping
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:return: The AI image corruptness score.
:rtype: float
"""
image_list = load_images(images)
if not image_list:
raise FileNotFoundError(f'Images for calculating AI corrupt score not provided - {images}.')
scores = np.array([
get_ai_corrupted(image, model_name=self._model_name)['corrupted']
| """
Overview:
AI image corrupt evaluation metrics.
"""
_DEFAULT_MODEL_NAME = 'caformer_s36_v0_focal'
@lru_cache()
def _open_anime_aicop_model(model_name: str):
"""
Open the AI image corrupted detection model.
This function downloads and opens the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened AI image corrupted detection model.
:rtype: Model
"""
return open_onnx_model(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/model.onnx',
))
@lru_cache()
def _open_anime_aicop_meta(model_name: str):
"""
Open the meta information of the AI image corrupted detection model.
This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened meta information of the AI image corrupted detection model.
:rtype: dict
"""
with open(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/meta.json',
), 'r', encoding='utf-8') as f:
return json.load(f)
@lru_cache()
def _open_anime_aicop_labels(model_name: str):
"""
Open the labels of the AI image corrupted detection model.
This function opens the labels of the AI image corrupted detection model specified by the given model name.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The labels of the AI image corrupted detection model.
:rtype: List[str]
"""
return _open_anime_aicop_meta(model_name)['labels']
def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384),
normalize: Optional[Tuple[float, float]] = (0.5, 0.5)):
"""
Encode the image for AI image corrupted detection.
This function resizes and encodes the image for AI image corrupted detection.
:param image: The input image.
:type image: Image.Image
:param size: The target size for encoding. Default is (384, 384).
:type size: Tuple[int, int]
:param normalize: The normalization parameters. Default is (0.5, 0.5).
:type normalize: Optional[Tuple[float, float]]
:return: The encoded image data.
:rtype: np.ndarray
"""
image = image.resize(size, Image.BILINEAR)
data = rgb_encode(image, order_='CHW')
if normalize is not None:
mean_, std_ = normalize
mean = np.asarray([mean_]).reshape((-1, 1, 1))
std = np.asarray([std_]).reshape((-1, 1, 1))
data = (data - mean) / std
return data.astype(np.float32)
def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]:
"""
Get AI image corrupted detection scores for an image.
This function calculates AI image corrupted detection scores for a given image using the specified model.
:param image: The input image.
:type image: ImageTyping
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:return: A dictionary containing the corrupted score.
:rtype: Mapping[str, float]
"""
image = load_image(image, force_background='white', mode='RGB')
input_ = _img_encode(image)[None, ...]
output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_})
return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist()))
class AICorruptMetrics:
"""
Class for calculating an AI image corruptness score.
The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model.
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:param tqdm_desc: Description for the tqdm progress bar during calculation.
:type tqdm_desc: str
"""
def __init__(self, model_name: str = _DEFAULT_MODEL_NAME,
silent: bool = False, tqdm_desc: str = None):
self._model_name = model_name
self.silent = silent
self.tqdm_desc = tqdm_desc or self.__class__.__name__
def score(self, images: ImagesTyping, silent: bool = None):
"""
Calculate the AI image corruptness score for a set of images.
This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model.
:param images: The set of input images for calculating the AI image corruptness score.
:type images: ImagesTyping
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:return: The AI image corruptness score.
:rtype: float
"""
image_list = load_images(images)
if not image_list:
raise FileNotFoundError(f'Images for calculating AI corrupt score not provided - {images}.')
scores = np.array([
get_ai_corrupted(image, model_name=self._model_name)['corrupted'] | for image in tqdm(image_list, silent=self.silent if silent is None else silent, desc=self.tqdm_desc) | 1 | 2023-10-18 03:35:52+00:00 | 4k |
nju-websoft/SCR | framework/dataloader.py | [
{
"identifier": "trigger_combine_event",
"path": "framework/utils.py",
"snippet": "def trigger_combine_event(old_data, new_data):\n if len(new_data) == 0:\n return old_data\n init = False\n res = []\n if len(old_data) == 0:\n init = True\n old_data = copy.deepcopy(new_data)\n for old_sample_index in range(len(old_data)-1, -1, -1):\n old_sample = old_data[old_sample_index]\n combine_flag = False\n for new_sample_index in range(len(new_data)-1, -1, -1):\n new_sample = new_data[new_sample_index]\n if old_sample['input_ids'] == new_sample['input_ids']:\n old_offset = torch.nonzero(torch.tensor(np.array(old_sample['labels'])))\n new_offset = torch.nonzero(torch.tensor(np.array(new_sample['labels'])))\n eqoffset = [int(val) for val in old_offset if val in new_offset]\n combine_flag = True\n if len(eqoffset) > 0:\n eqflag = False\n for i in eqoffset: \n if old_sample['labels'][i] != new_sample['labels'][i]:\n # one ins has two event type on same trigger...\n eqflag = True \n if eqflag == False:\n new_data.remove(new_sample)\n continue\n \n old_sample['labels'] = copy.deepcopy(list(np.array(old_sample['labels']) + np.array(new_sample['labels'])))\n new_data.remove(new_sample)\n if (combine_flag and init) or (init == False):\n temp = copy.deepcopy(old_sample)\n res.append(temp)\n res += new_data\n return res"
},
{
"identifier": "args_combine_event",
"path": "framework/utils.py",
"snippet": "def args_combine_event(old_data, new_data):\n if len(new_data) == 0:\n return old_data\n init = False\n res = []\n if len(old_data) == 0:\n init = True\n old_data = copy.deepcopy(new_data)\n for old_sample_index in range(len(old_data)-1, -1, -1):\n old_sample = old_data[old_sample_index]\n combine_flag = False\n for new_sample_index in range(len(new_data)-1, -1, -1):\n new_sample = new_data[new_sample_index]\n if old_sample['input_ids'] == new_sample['input_ids'] and old_sample['trigger'] == new_sample['trigger']:\n \n combine_flag = True\n if old_sample == new_sample:\n new_data.remove(new_sample)\n continue\n for i in range(len(old_sample['args'])):\n if (old_sample['args'][i] == 0 and new_sample['args'][i] !=0) or (old_sample['args'][i] != 0 and new_sample['args'][i] ==0):\n old_sample['args'][i] = old_sample['args'][i] + new_sample['args'][i]\n elif old_sample['args'][i] != 0 and new_sample['args'][i] != 0 and new_sample['args'][i] != old_sample['args'][i]:\n continue\n elif old_sample['args'][i] != 0 and new_sample['args'][i] != 0 and new_sample['args'][i] == old_sample['args'][i]:\n continue\n old_sample['gold_args'] = old_sample['gold_args'] + new_sample['gold_args']\n new_ner = [(start, end) for (start, end, _) in old_sample['gold_args']]\n\n \n old_sample['ner'] = old_sample['ner'] + new_ner\n old_sample['args_offset'] = list(set(old_sample['args_offset']+new_sample['args_offset']))\n \n new_data.remove(new_sample)\n if (combine_flag and init) or (init == False):\n temp = copy.deepcopy(old_sample)\n res.append(temp)\n res += new_data\n return res"
}
] | import torch
import os
import copy
import numpy as np
import random
import json
from torch.utils.data import Dataset, DataLoader
from framework.utils import trigger_combine_event, args_combine_event
from transformers import BertTokenizer
from transformers import logging | 3,446 | # ner2id
self.ner2id = json.load(open(self.data_root+'ner2id.json', 'r'))
self.ner2id['None'] = 0
self.id2ner = {}
for key, value in self.ner2id.items():
self.id2ner[value] = key
# iter
self.stream_turn = config.stream_turn
self.batch = 0
# data stream
self.train_stream = json.load(open(self.data_root+'train_streams.json', 'r'))
self.id2stream = json.load(open(self.data_root+'id2stream.json', 'r'))
# set seed
self.seed = config.seed
random.seed(self.seed)
np.random.seed(self.seed)
self.shuffle_index = list(range(self.stream_turn))
random.shuffle(self.shuffle_index)
self.shuffle_index = np.argsort(self.shuffle_index)
#self.shuffle_index = PERM[i]
print(self.shuffle_index)
# seen data
self.seen_test_data = []
self.seen_dev_data = []
self.seen_event = []
self.seen_test_args_data = []
self.seen_dev_args_data = []
self.seen_args = []
self.ltlabel = []
# prepare data:
self.train_dataset, self.dev_dataset, self.test_dataset = self.read_data(self.data_root)
# tokenizer:
self.tokenizer = BertTokenizer.from_pretrained(config.bert_path)
if config.argument:
# role2id
self.role2id = json.load(open(self.data_root+'role2id.json', 'r'))
self.role2id['None'] = 0
self.id2role = {}
for key, value in self.role2id.items():
self.id2role[value] = key
# metadata
self.args_num = config.args_num
self.metadata = json.load(open(self.data_root+'metadata.json', 'r'))
self.unseen_metadata = {}
for key, value in self.metadata.items():
new_value = [self.role2id[val] for val in value]
self.metadata[key] = new_value
unseen_args = [i for i in range(self.args_num)]
unseen_args = list(set(unseen_args) - set(new_value) - set([0]))
self.unseen_metadata[key] = unseen_args
self.args_train_dataset, self.args_dev_dataset, self.args_test_dataset = self.read_args_data(self.data_root)
if config.lttest:
self.ltlabel = json.load(open(self.data_root+'lt_label.json', 'r'))
def __iter__(self):
return self
def __len__(self):
return self.stream_turn
def __next__(self):
cur_train_data, cur_test_data, cur_dev_data, current_event = [], [], [], []
if self.batch == self.stream_turn:
self.batch = 0
raise StopIteration()
index = self.shuffle_index[self.batch]
# now is tirgger data
cur_train_data = self.train_dataset[index]
cur_dev_data = self.dev_dataset[index]
cur_test_data = self.test_dataset[index]
current_event = self.train_stream[index]
self.seen_event += current_event
self.batch += 1
final_data = [[] , [] , []]
for i, data in enumerate([cur_train_data, cur_dev_data, cur_test_data]):
for x in data:
final_data[i] += data[x]
tr_data, de_data, cur_te_data = final_data
tr_data = trigger_combine_event([], tr_data)
de_data = trigger_combine_event([], de_data)
cur_te_data = trigger_combine_event([], cur_te_data)
temp_cur = copy.deepcopy(cur_te_data)
temp_dev = copy.deepcopy(de_data)
self.seen_test_data = trigger_combine_event(self.seen_test_data, temp_cur)
self.seen_dev_data = trigger_combine_event(self.seen_dev_data, temp_dev)
if self.config.argument:
# now is args data
cur_args_train_data = self.args_train_dataset[index]
cur_args_dev_data = self.args_dev_dataset[index]
cur_args_test_data = self.args_test_dataset[index]
#current_args = self.args_stream[index]
#self.seen_args = list(set(self.seen_args + current_args))
#unseen_args = [i for i in range(self.args_num)]
#unseen_args = list(set(unseen_args) - set(self.seen_args)- set([0]))
args_final_data = [[] , [] , []]
for i, data in enumerate([cur_args_train_data, cur_args_dev_data, cur_args_test_data]):
for x in data:
args_final_data[i] += data[x]
tr_args_data, de_args_data, cur_args_te_data = args_final_data
| logging.set_verbosity_warning()
logging.set_verbosity_error()
class ACETriDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def collate_fn(self, data):
sentence_ids = [torch.tensor(item['sentence_ids']) for item in data]
input_ids = [torch.tensor(item['input_ids']) for item in data]
input_masks = [torch.tensor(item['input_masks']) for item in data]
in_sent = [torch.tensor(item['in_sent']) for item in data]
segment_ids = [torch.tensor(item['segment_ids']) for item in data]
labels = [torch.tensor(item['labels']) for item in data]
ners = [torch.tensor(item['ners']) for item in data]
sentence = [item['sentence'] for item in data]
return (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence)
def get_ACETriData_loader(data, config, shuffle = False, batch_size = None):
dataset = ACETriDataset(data)
if batch_size == None:
batchSize = min(config.batch_size, len(data))
else:
batchSize = min(batch_size, len(data))
ACETriData_loader = DataLoader(
dataset = dataset,
batch_size= batchSize,
shuffle= shuffle,
collate_fn= dataset.collate_fn,
drop_last= False
)
return ACETriData_loader
class ACEPredDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def collate_fn(self, data):
input_ids = [torch.tensor(item['input_ids']) for item in data]
input_masks = [torch.tensor(item['input_masks']) for item in data]
in_sent = [torch.tensor(item['in_sent']) for item in data]
segment_ids = [torch.tensor(item['segment_ids']) for item in data]
sentence = [item['sentence'] for item in data]
event = [item['event'] for item in data]
return (input_ids, input_masks, in_sent, segment_ids, sentence, event)
def get_ACEPredData_loader(data, config, shuffle = False, batch_size = None):
dataset = ACEPredDataset(data)
if batch_size == None:
batchSize = min(config.batch_size, len(data))
else:
batchSize = min(batch_size, len(data))
ACEPredData_loader = DataLoader(
dataset = dataset,
batch_size= batchSize,
shuffle= shuffle,
collate_fn= dataset.collate_fn,
drop_last= False
)
return ACEPredData_loader
def get_ACEArgData_loader(data, config, shuffle = False, batch_size = None):
dataset = ACEArgDataloader(data)
if batch_size == None:
batchSize = min(config.batch_size, len(data))
else:
batchSize = min(batch_size, len(data))
ACEPredData_loader = DataLoader(
dataset = dataset,
batch_size= batchSize,
shuffle= shuffle,
collate_fn= dataset.collate_fn,
drop_last= False
)
return ACEPredData_loader
class ACEArgDataloader(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def collate_fn(self, data):
sentence = [item['sentence'] for item in data]
input_ids = [torch.tensor(item['input_ids']) for item in data]
input_masks = [torch.tensor(item['input_masks']) for item in data]
in_sent = [torch.tensor(item['in_sent']) for item in data]
segment_ids = [torch.tensor(item['segment_ids']) for item in data]
args = [torch.tensor(item['args']) for item in data]
args_offset = [item['args_offset'] for item in data]
gold_args = [item['gold_args'] for item in data]
ner = [item['ner'] for item in data]
trigger = [item['trigger'] for item in data]
return (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger)
class ACEPredNerDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def collate_fn(self, data):
input_ids = [torch.tensor(item['input_ids']) for item in data]
input_masks = [torch.tensor(item['input_masks']) for item in data]
in_sent = [torch.tensor(item['in_sent']) for item in data]
segment_ids = [torch.tensor(item['segment_ids']) for item in data]
sentence = [item['sentence'] for item in data]
event = [item['event'] for item in data]
ner = [item['ner'] for item in data]
return (input_ids, input_masks, in_sent, segment_ids, sentence, event, ner)
def get_ACEPredNerData_loader(data, config, shuffle = False, batch_size = None):
dataset = ACEPredNerDataset(data)
if batch_size == None:
batchSize = min(config.batch_size, len(data))
else:
batchSize = min(batch_size, len(data))
ACEPredNerData_loader = DataLoader(
dataset = dataset,
batch_size= batchSize,
shuffle= shuffle,
collate_fn= dataset.collate_fn,
drop_last= False
)
return ACEPredNerData_loader
class ACETriDataloder(Dataset):
def __init__(self, config, i):
self.config = config
self.data_root = config.data_root
#print(config.bert_path, type(config.bert_path))
self.tokenizer = BertTokenizer.from_pretrained(config.bert_path)
self.max_sentence_length = 512
# trigger category vocabulary
self.vocab2index = {}
self.index2vocab = {}
self.vocab2index = json.load(open(self.data_root+'label2id.json', 'r'))
self.vocab2index['None'] = 0
for key, value in self.vocab2index.items():
#value = value - 169
self.index2vocab[value] = key
#self.vocab2index[key] = value
# ner2id
self.ner2id = json.load(open(self.data_root+'ner2id.json', 'r'))
self.ner2id['None'] = 0
self.id2ner = {}
for key, value in self.ner2id.items():
self.id2ner[value] = key
# iter
self.stream_turn = config.stream_turn
self.batch = 0
# data stream
self.train_stream = json.load(open(self.data_root+'train_streams.json', 'r'))
self.id2stream = json.load(open(self.data_root+'id2stream.json', 'r'))
# set seed
self.seed = config.seed
random.seed(self.seed)
np.random.seed(self.seed)
self.shuffle_index = list(range(self.stream_turn))
random.shuffle(self.shuffle_index)
self.shuffle_index = np.argsort(self.shuffle_index)
#self.shuffle_index = PERM[i]
print(self.shuffle_index)
# seen data
self.seen_test_data = []
self.seen_dev_data = []
self.seen_event = []
self.seen_test_args_data = []
self.seen_dev_args_data = []
self.seen_args = []
self.ltlabel = []
# prepare data:
self.train_dataset, self.dev_dataset, self.test_dataset = self.read_data(self.data_root)
# tokenizer:
self.tokenizer = BertTokenizer.from_pretrained(config.bert_path)
if config.argument:
# role2id
self.role2id = json.load(open(self.data_root+'role2id.json', 'r'))
self.role2id['None'] = 0
self.id2role = {}
for key, value in self.role2id.items():
self.id2role[value] = key
# metadata
self.args_num = config.args_num
self.metadata = json.load(open(self.data_root+'metadata.json', 'r'))
self.unseen_metadata = {}
for key, value in self.metadata.items():
new_value = [self.role2id[val] for val in value]
self.metadata[key] = new_value
unseen_args = [i for i in range(self.args_num)]
unseen_args = list(set(unseen_args) - set(new_value) - set([0]))
self.unseen_metadata[key] = unseen_args
self.args_train_dataset, self.args_dev_dataset, self.args_test_dataset = self.read_args_data(self.data_root)
if config.lttest:
self.ltlabel = json.load(open(self.data_root+'lt_label.json', 'r'))
def __iter__(self):
return self
def __len__(self):
return self.stream_turn
def __next__(self):
cur_train_data, cur_test_data, cur_dev_data, current_event = [], [], [], []
if self.batch == self.stream_turn:
self.batch = 0
raise StopIteration()
index = self.shuffle_index[self.batch]
# now is tirgger data
cur_train_data = self.train_dataset[index]
cur_dev_data = self.dev_dataset[index]
cur_test_data = self.test_dataset[index]
current_event = self.train_stream[index]
self.seen_event += current_event
self.batch += 1
final_data = [[] , [] , []]
for i, data in enumerate([cur_train_data, cur_dev_data, cur_test_data]):
for x in data:
final_data[i] += data[x]
tr_data, de_data, cur_te_data = final_data
tr_data = trigger_combine_event([], tr_data)
de_data = trigger_combine_event([], de_data)
cur_te_data = trigger_combine_event([], cur_te_data)
temp_cur = copy.deepcopy(cur_te_data)
temp_dev = copy.deepcopy(de_data)
self.seen_test_data = trigger_combine_event(self.seen_test_data, temp_cur)
self.seen_dev_data = trigger_combine_event(self.seen_dev_data, temp_dev)
if self.config.argument:
# now is args data
cur_args_train_data = self.args_train_dataset[index]
cur_args_dev_data = self.args_dev_dataset[index]
cur_args_test_data = self.args_test_dataset[index]
#current_args = self.args_stream[index]
#self.seen_args = list(set(self.seen_args + current_args))
#unseen_args = [i for i in range(self.args_num)]
#unseen_args = list(set(unseen_args) - set(self.seen_args)- set([0]))
args_final_data = [[] , [] , []]
for i, data in enumerate([cur_args_train_data, cur_args_dev_data, cur_args_test_data]):
for x in data:
args_final_data[i] += data[x]
tr_args_data, de_args_data, cur_args_te_data = args_final_data | tr_args_data = args_combine_event([], tr_args_data) | 1 | 2023-10-17 02:40:04+00:00 | 4k |
IBM/VillanDiffusion | caption_dataset.py | [
{
"identifier": "Log",
"path": "util.py",
"snippet": "class Log:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \n @staticmethod\n def error_msg(msg: str):\n return Log.FAIL + Log.BOLD + msg + Log.ENDC\n \n @staticmethod\n def warning_msg(msg: str):\n return Log.WARNING + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def critical_msg(msg: str):\n return Log.OKCYAN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def info_msg(msg: str):\n return Log.OKGREEN + Log.BOLD + msg + Log.ENDC\n \n @staticmethod\n def error(msg: str):\n msg: str = Log.error_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def warning(msg: str):\n msg: str = Log.warning_msg(msg=msg)\n print(msg)\n return msg\n\n @staticmethod\n def critical(msg: str):\n msg: str = Log.critical_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def info(msg: str):\n msg: str = Log.info_msg(msg=msg)\n print(msg)\n return msg"
},
{
"identifier": "normalize",
"path": "util.py",
"snippet": "def normalize(x: Union[np.ndarray, torch.Tensor], vmin_in: float=None, vmax_in: float=None, vmin_out: float=0, vmax_out: float=1, eps: float=1e-5) -> Union[np.ndarray, torch.Tensor]:\n if vmax_out == None and vmin_out == None:\n return x\n\n if isinstance(x, np.ndarray):\n if vmin_in == None:\n min_x = np.min(x)\n else:\n min_x = vmin_in\n if vmax_in == None:\n max_x = np.max(x)\n else:\n max_x = vmax_in\n elif isinstance(x, torch.Tensor):\n if vmin_in == None:\n min_x = torch.min(x)\n else:\n min_x = vmin_in\n if vmax_in == None:\n max_x = torch.max(x)\n else:\n max_x = vmax_in\n else:\n raise TypeError(\"x must be a torch.Tensor or a np.ndarray\")\n if vmax_out == None:\n vmax_out = max_x\n if vmin_out == None:\n vmin_out = min_x\n return ((x - min_x) / (max_x - min_x + eps)) * (vmax_out - vmin_out) + vmin_out"
}
] | import io
import json
import os
import pathlib
import random
import shutil
import tempfile
import traceback
import warnings
import jsonlines
import datasets
import numpy as np
import requests
import torch
from random import sample
from typing import Callable, List, Tuple, Union
from functools import lru_cache, partial
from tqdm import tqdm
from datasets import load_dataset, concatenate_datasets, get_dataset_split_names, IterableDataset, load_from_disk
from datasets.dataset_dict import DatasetDict
from matplotlib import pyplot as plt
from torchvision import transforms
from torchvision.transforms import Compose, ToTensor, Lambda, ToPILImage, CenterCrop, Resize
from torchvision.utils import make_grid, save_image
from torch.utils.data import DataLoader, ConcatDataset, Subset, Dataset, IterableDataset
from torchvision.datasets import MNIST, CIFAR10, SVHN, FashionMNIST
from PIL import Image
from joblib import Parallel, delayed
from util import Log, normalize
from diffusers import DDPMScheduler | 2,900 | IMAGE = "image"
IS_CLEAN = "is_clean"
RAW = "raw"
LABEL = "label"
CAPTION = "caption"
RAW_CAPTION = "raw_caption"
CAPTION_AUGMENT_KEY: str = "caption_aug"
# CAPTION_TOKEN = "caption_token"
def __init__(self, name: str, label: int=None, root: str=None,
channel: int=None, image_size: int=None, split: str='[:100%]',
vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX,
batch_size: int=512, shuffle: bool=True, num_workers: int=8, force_R_to_0: bool=False, seed: int=0):
self.__root = root
self.__name = name
if label != None and not isinstance(label, list)and not isinstance(label, tuple):
self.__label = [label]
else:
self.__label = label
self.__channel = channel
self.__vmin = vmin
self.__vmax = vmax
self.__batch_size = batch_size
self.__shuffle = shuffle
self.__split = split
self.__dataset = self.__load_dataset(name=name)
self.__set_img_shape(image_size=image_size)
self.__trigger = self.__target = self.__caption_trigger = self.__poison_rate = None
self.__clean_rate = 1
self.__seed = seed
self.__num_workers = num_workers
self.__force_R_to_0 = force_R_to_0
self.__caption_backdoor = CaptionBackdoor()
if root != None:
self.__backdoor = Backdoor(root=root)
# self.__prep_dataset()
def set_poison(self, trigger_type: str, target_type: str, caption_trigger_type: str=None, rand_caption_trig_pos: int=0, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2) -> 'DatasetLoader':
if self.__root == None:
raise ValueError("Attribute 'root' is None")
self.__clean_rate = clean_rate
self.__poison_rate = poison_rate
self.__trigger = self.__backdoor.get_trigger(type=trigger_type, channel=self.__channel, image_size=self.__image_size, vmin=self.__vmin, vmax=self.__vmax)
self.__caption_trigger = self.__caption_backdoor.get_trigger(_type=caption_trigger_type)
self.__rand_caption_trig_pos: int = rand_caption_trig_pos
self.__target = self.__backdoor.get_target(type=target_type, trigger=self.__trigger, dx=target_dx, dy=target_dy)
return self
def __load_dataset(self, name: str):
datasets.config.IN_MEMORY_MAX_SIZE = 50 * 2 ** 30
split_method = f'train{self.__split}+test{self.__split}'
if name == DatasetLoader.MNIST:
return load_dataset("mnist", split=split_method)
elif name == DatasetLoader.CIFAR10:
return load_dataset("cifar10", split=split_method)
elif name == DatasetLoader.CELEBA:
return load_dataset("student/celebA", split=f"train{self.__split}")
elif name == DatasetLoader.CELEBA_HQ:
return load_dataset("datasets/celeba_hq_256", split=f"train{self.__split}")
elif name ==DatasetLoader.CELEBA_HQ_DIALOG:
return CelebA_HQ_Dialog(path="datasets/CelebA-Dialog (HQ)").prepare(split=f"train{self.__split}")
elif name == DatasetLoader.LAION_COCO or name == DatasetLoader.LAION_COCO_20K:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg200K.hf")
elif name == DatasetLoader.LAION_COCO_1:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg1.hf")
elif name == DatasetLoader.LAION_COCO_200:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg200.hf")
elif name == DatasetLoader.LAION_COCO_50K:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg50K.hf")
elif name == DatasetLoader.POKEMON_CAPTION:
return load_dataset("lambdalabs/pokemon-blip-captions", split=f"train{self.__split}")
else:
raise NotImplementedError(f"Undefined dataset: {name}")
def __set_img_shape(self, image_size: int) -> None:
# Set channel
if self.__name == self.MNIST:
self.__channel = 1 if self.__channel == None else self.__channel
# self.__vmin = -1
# self.__vmax = 1
self.__cmap = "gray"
elif self.__name == self.CIFAR10 or self.__name == self.CELEBA or self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH or self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K or self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:
self.__channel = 3 if self.__channel == None else self.__channel
# self.__vmin = -1
# self.__vmax = 1
self.__cmap = None
else:
raise NotImplementedError(f"No dataset named as {self.__name}")
# Set image size
if image_size == None:
if self.__name == self.MNIST:
self.__image_size = 32
elif self.__name == self.CIFAR10:
self.__image_size = 32
elif self.__name == self.CELEBA:
self.__image_size = 64
elif self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH:
self.__image_size = 256
elif self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K or self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:
self.__image_size = 512
else:
raise NotImplementedError(f"No dataset named as {self.__name}")
else:
self.__image_size = image_size
def __get_transform(self, prev_trans: List=[], next_trans: List=[]):
if self.__channel == 1:
channel_trans = transforms.Grayscale(num_output_channels=1)
elif self.__channel == 3:
channel_trans = transforms.Lambda(lambda x: x.convert("RGB"))
aug_trans = []
if self.__dataset != DatasetLoader.LSUN_CHURCH:
aug_trans = [transforms.RandomHorizontalFlip()]
trans = [channel_trans,
transforms.Resize([self.__image_size, self.__image_size]),
transforms.ToTensor(),
| # %%
"""
Backdoor Poisoned Dataset
"""
# from tmp_parse_dataset import LaionCoco
DEFAULT_VMIN = float(-1.0)
DEFAULT_VMAX = float(1.0)
class DatasetLoader(object):
# Dataset generation mode
MODE_FIXED = "FIXED"
MODE_FLEX = "FLEX"
# Dataset names
MNIST = "MNIST"
CIFAR10 = "CIFAR10"
CELEBA = "CELEBA"
LSUN_CHURCH = "LSUN-CHURCH"
LSUN_BEDROOM = "LSUN-BEDROOM"
CELEBA_HQ = "CELEBA-HQ"
CELEBA_HQ_DIALOG = "CELEBA-HQ-DIALOG"
LAION_COCO = "LAION-COCO"
LAION_COCO_1 = "LAION-COCO-1"
LAION_COCO_20K = "LAION-COCO-20K"
LAION_COCO_200 = "LAION-COCO-200"
LAION_COCO_50K = "LAION-COCO-50K"
POKEMON_CAPTION = "POKEMON-CAPTION"
# Inpaint Type
INPAINT_BOX: str = "INPAINT_BOX"
INPAINT_LINE: str = "INPAINT_LINE"
TRAIN = "train"
TEST = "test"
POISON_IMAGE = "poison_image"
IMAGE = "image"
IS_CLEAN = "is_clean"
RAW = "raw"
LABEL = "label"
CAPTION = "caption"
RAW_CAPTION = "raw_caption"
CAPTION_AUGMENT_KEY: str = "caption_aug"
# CAPTION_TOKEN = "caption_token"
def __init__(self, name: str, label: int=None, root: str=None,
channel: int=None, image_size: int=None, split: str='[:100%]',
vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX,
batch_size: int=512, shuffle: bool=True, num_workers: int=8, force_R_to_0: bool=False, seed: int=0):
self.__root = root
self.__name = name
if label != None and not isinstance(label, list)and not isinstance(label, tuple):
self.__label = [label]
else:
self.__label = label
self.__channel = channel
self.__vmin = vmin
self.__vmax = vmax
self.__batch_size = batch_size
self.__shuffle = shuffle
self.__split = split
self.__dataset = self.__load_dataset(name=name)
self.__set_img_shape(image_size=image_size)
self.__trigger = self.__target = self.__caption_trigger = self.__poison_rate = None
self.__clean_rate = 1
self.__seed = seed
self.__num_workers = num_workers
self.__force_R_to_0 = force_R_to_0
self.__caption_backdoor = CaptionBackdoor()
if root != None:
self.__backdoor = Backdoor(root=root)
# self.__prep_dataset()
def set_poison(self, trigger_type: str, target_type: str, caption_trigger_type: str=None, rand_caption_trig_pos: int=0, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2) -> 'DatasetLoader':
if self.__root == None:
raise ValueError("Attribute 'root' is None")
self.__clean_rate = clean_rate
self.__poison_rate = poison_rate
self.__trigger = self.__backdoor.get_trigger(type=trigger_type, channel=self.__channel, image_size=self.__image_size, vmin=self.__vmin, vmax=self.__vmax)
self.__caption_trigger = self.__caption_backdoor.get_trigger(_type=caption_trigger_type)
self.__rand_caption_trig_pos: int = rand_caption_trig_pos
self.__target = self.__backdoor.get_target(type=target_type, trigger=self.__trigger, dx=target_dx, dy=target_dy)
return self
def __load_dataset(self, name: str):
datasets.config.IN_MEMORY_MAX_SIZE = 50 * 2 ** 30
split_method = f'train{self.__split}+test{self.__split}'
if name == DatasetLoader.MNIST:
return load_dataset("mnist", split=split_method)
elif name == DatasetLoader.CIFAR10:
return load_dataset("cifar10", split=split_method)
elif name == DatasetLoader.CELEBA:
return load_dataset("student/celebA", split=f"train{self.__split}")
elif name == DatasetLoader.CELEBA_HQ:
return load_dataset("datasets/celeba_hq_256", split=f"train{self.__split}")
elif name ==DatasetLoader.CELEBA_HQ_DIALOG:
return CelebA_HQ_Dialog(path="datasets/CelebA-Dialog (HQ)").prepare(split=f"train{self.__split}")
elif name == DatasetLoader.LAION_COCO or name == DatasetLoader.LAION_COCO_20K:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg200K.hf")
elif name == DatasetLoader.LAION_COCO_1:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg1.hf")
elif name == DatasetLoader.LAION_COCO_200:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg200.hf")
elif name == DatasetLoader.LAION_COCO_50K:
return LaionCoco.load("/work/u2941379/workspace/laion_coco_hg50K.hf")
elif name == DatasetLoader.POKEMON_CAPTION:
return load_dataset("lambdalabs/pokemon-blip-captions", split=f"train{self.__split}")
else:
raise NotImplementedError(f"Undefined dataset: {name}")
def __set_img_shape(self, image_size: int) -> None:
# Set channel
if self.__name == self.MNIST:
self.__channel = 1 if self.__channel == None else self.__channel
# self.__vmin = -1
# self.__vmax = 1
self.__cmap = "gray"
elif self.__name == self.CIFAR10 or self.__name == self.CELEBA or self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH or self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K or self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:
self.__channel = 3 if self.__channel == None else self.__channel
# self.__vmin = -1
# self.__vmax = 1
self.__cmap = None
else:
raise NotImplementedError(f"No dataset named as {self.__name}")
# Set image size
if image_size == None:
if self.__name == self.MNIST:
self.__image_size = 32
elif self.__name == self.CIFAR10:
self.__image_size = 32
elif self.__name == self.CELEBA:
self.__image_size = 64
elif self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH:
self.__image_size = 256
elif self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K or self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:
self.__image_size = 512
else:
raise NotImplementedError(f"No dataset named as {self.__name}")
else:
self.__image_size = image_size
def __get_transform(self, prev_trans: List=[], next_trans: List=[]):
if self.__channel == 1:
channel_trans = transforms.Grayscale(num_output_channels=1)
elif self.__channel == 3:
channel_trans = transforms.Lambda(lambda x: x.convert("RGB"))
aug_trans = []
if self.__dataset != DatasetLoader.LSUN_CHURCH:
aug_trans = [transforms.RandomHorizontalFlip()]
trans = [channel_trans,
transforms.Resize([self.__image_size, self.__image_size]),
transforms.ToTensor(), | transforms.Lambda(lambda x: normalize(vmin_in=0, vmax_in=1, vmin_out=self.__vmin, vmax_out=self.__vmax, x=x)), | 1 | 2023-10-17 19:57:37+00:00 | 4k |
WHUlwb/Assisted_learning | train_t.py | [
{
"identifier": "Dice_loss",
"path": "loss.py",
"snippet": "def Dice_loss(inputs, target, beta=1, smooth = 1e-5):\r\n # inputs B, C, H, W, and target B, H, W, C. \r\n # There are C dimensions in total, each dimension representing a class.\r\n n, c, h, w = inputs.size()\r\n nt, ht, wt, ct = target.size()\r\n if h != ht and w != wt:\r\n inputs = F.interpolate(inputs, size=(ht, wt), mode=\"bilinear\", align_corners=True)\r\n \r\n temp_inputs = torch.softmax(inputs.transpose(1, 2).transpose(2, 3).contiguous().view(n, -1, c),-1)\r\n temp_target = target.view(n, -1, ct)\r\n #--------------------------------------------#\r\n # dice loss\r\n #--------------------------------------------#\r\n tp = torch.sum(temp_target * temp_inputs, axis=[0,1])\r\n fp = torch.sum(temp_inputs, axis=[0,1]) - tp\r\n fn = torch.sum(temp_target, axis=[0,1]) - tp\r\n score = ((1 + beta ** 2) * tp + smooth) / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth)\r\n dice_loss = 1 - torch.mean(score)\r\n return dice_loss\r"
},
{
"identifier": "CE_Loss",
"path": "loss.py",
"snippet": "def CE_Loss(inputs, target, reduction='mean'):\r\n # The shape of the input for \"CrossEntropyLoss\" is N,C, target is N\r\n n, c, h, w = inputs.size()\r\n nt, ht, wt, ct = target.size()\r\n if h != ht and w != wt:\r\n inputs = F.interpolate(inputs, size=(ht, wt), mode=\"bilinear\", align_corners=True)\r\n temp_inputs = inputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\r\n temp_target = target.view(-1, c)\r\n temp_target = torch.argmax(temp_target, dim=1).view(-1)\r\n CE_loss = nn.CrossEntropyLoss(reduction=reduction)(temp_inputs, temp_target)\r\n return CE_loss\r"
},
{
"identifier": "MyDataset",
"path": "dataset.py",
"snippet": "class MyDataset(Dataset):\n def __init__(self, root, is_training=False):\n self.is_training = is_training\n self.root = root\n self.files_A = sorted(glob.glob(os.path.join(root, 'optical') + '/*.tif')) #optical\n self.files_B = sorted(glob.glob(os.path.join(root, 'sar') + '/*.tif')) #SAR\n self.files_D = sorted(glob.glob(os.path.join(root, 'label') + '/*.tif')) #label\n self.trans = tf.Compose([\n tf.ToTensor(),\n tf.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])\n ])\n self.tans_gray = tf.Compose([\n tf.ToTensor(),\n tf.Normalize([0.5], [0.5])\n ])\n # self.img_size = [128,192,256,320,384,448]\n # self.img_size = [128,192,256]\n self.size = config.image_size\n self.num_classes = config.classnum\n def __getitem__(self, index):\n img1 = Image.open(self.files_A[index % len(self.files_A)])\n img2 = Image.open(self.files_B[index % len(self.files_B)])\n # mask = Image.open(self.files_D[index % len(self.files_D)])\n mask = Image.fromarray(cv2.imread(self.files_D[index % len(self.files_D)],0))\n \n if self.is_training:\n img1 = tf.Resize((self.size,self.size))(img1)\n img2 = tf.Resize((self.size,self.size))(img2)\n mask = tf.Resize((self.size,self.size))(mask)\n\n img1,img2,mask = random_roate(img1,img2, mask)\n img1 = enhance_feature(img1)\n # img2 = enhance_feature(img2)\n else:\n img1 = tf.Resize((self.size,self.size))(img1)\n img2 = tf.Resize((self.size,self.size))(img2)\n mask = tf.Resize((self.size,self.size))(mask)\n\n img_RGB = np.array(img1)[...,:-1]\n Nir = np.array(img1)[...,-1]\n img_RGB = self.trans(img_RGB)\n Nir = self.tans_gray(Nir)\n image1 = torch.cat([img_RGB,Nir],dim=0)\n\n image2 = self.tans_gray(img2)\n # mask_img = tf.ToTensor()(mask)\n # mask = np.array(mask) #dsm rgbn\n mask = np.array(mask)//10 #sar rgbn\n seg_labels = np.eye(self.num_classes)[mask.reshape([-1])]\n seg_labels = seg_labels.reshape((int(self.size), int(self.size), self.num_classes))\n mask = torch.from_numpy(np.array(mask)).long()\n seg_labels = torch.from_numpy(np.array(seg_labels)).type(torch.FloatTensor)\n # return image1, image2, mask, seg_labels\n return img_RGB, image2, mask, seg_labels #only RGB\n def __len__(self):\n return len(self.files_A)"
},
{
"identifier": "config",
"path": "config.py",
"snippet": ""
},
{
"identifier": "HRnet",
"path": "hrnet/hrnet.py",
"snippet": "class HRnet(nn.Module):\r\n def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True):\r\n super(HRnet, self).__init__()\r\n self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained)\r\n\r\n last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels))\r\n\r\n self.last_layer = nn.Sequential(\r\n nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),\r\n nn.ReLU(inplace=False),\r\n nn.Conv2d(in_channels=last_inp_channels, out_channels=num_classes, kernel_size=1, stride=1, padding=0)\r\n )\r\n\r\n def forward(self, inputs):\r\n H, W = inputs.size(2), inputs.size(3)\r\n x = self.backbone(inputs)\r\n \r\n # Upsampling\r\n x0_h, x0_w = x[0].size(2), x[0].size(3)\r\n x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n\r\n x = torch.cat([x[0], x1, x2, x3], 1)\r\n\r\n x = self.last_layer(x)\r\n x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\r\n return x\r"
}
] | import torch
import numpy as np
import os
import torch.nn as nn
import metric
import time
from torch.utils.data import DataLoader
from loss import Dice_loss,CE_Loss
from torch.autograd import Variable
from dataset import MyDataset
from config import config
from hrnet.hrnet import HRnet
from torch.cuda.amp import GradScaler as Gradscaler
from torch.cuda.amp import autocast
from tqdm import tqdm | 2,152 | scaler = Gradscaler()
traindd = MyDataset(config.trainroot,is_training=True)
traindata = DataLoader(traindd,batch_size=config.batch_size, shuffle=True)
valdata = DataLoader(MyDataset(config.valroot,is_training=False), num_workers=0, batch_size=config.batch_size, shuffle=False)
net = HRnet(in_channel=3,num_classes=config.classnum,backbone='hrnetv2_w32').cuda()
optimizer = torch.optim.SGD(net.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
iters = len(traindata)
train_size = len(traindata)
val_size = len(valdata)
print('train data size: %04d'%train_size)
print('val data size: %04d'%val_size)
global_Fb = 0
start = time.time()
cls_weights = np.ones([config.classnum], np.float32)
weights = torch.from_numpy(cls_weights)
weights = weights.cuda()
if __name__ == '__main__':
for epoch in range(config.epoch_start,config.n_epochs):
seg_loss_t = 0
kd_loss_t = 0
val_Loss = 0
score = 0
conf_mat_val = 0
conf_mat_tra = 0
loop = tqdm(enumerate(traindata), total = len(traindata))
for i,data in loop:
rgbn,sar,m,seg = data
rgbn = Variable(rgbn).cuda()
sar = Variable(sar).cuda()
m = Variable(m).cuda()
seg = Variable(seg).cuda()
optimizer.zero_grad()
if config.amp:
with autocast():
rgbresult = net(rgbn)
ce = CE_Loss(rgbresult,seg)
| scaler = Gradscaler()
traindd = MyDataset(config.trainroot,is_training=True)
traindata = DataLoader(traindd,batch_size=config.batch_size, shuffle=True)
valdata = DataLoader(MyDataset(config.valroot,is_training=False), num_workers=0, batch_size=config.batch_size, shuffle=False)
net = HRnet(in_channel=3,num_classes=config.classnum,backbone='hrnetv2_w32').cuda()
optimizer = torch.optim.SGD(net.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
iters = len(traindata)
train_size = len(traindata)
val_size = len(valdata)
print('train data size: %04d'%train_size)
print('val data size: %04d'%val_size)
global_Fb = 0
start = time.time()
cls_weights = np.ones([config.classnum], np.float32)
weights = torch.from_numpy(cls_weights)
weights = weights.cuda()
if __name__ == '__main__':
for epoch in range(config.epoch_start,config.n_epochs):
seg_loss_t = 0
kd_loss_t = 0
val_Loss = 0
score = 0
conf_mat_val = 0
conf_mat_tra = 0
loop = tqdm(enumerate(traindata), total = len(traindata))
for i,data in loop:
rgbn,sar,m,seg = data
rgbn = Variable(rgbn).cuda()
sar = Variable(sar).cuda()
m = Variable(m).cuda()
seg = Variable(seg).cuda()
optimizer.zero_grad()
if config.amp:
with autocast():
rgbresult = net(rgbn)
ce = CE_Loss(rgbresult,seg) | dice = Dice_loss(rgbresult,seg) | 0 | 2023-10-17 06:19:02+00:00 | 4k |
dagedarr/telegram-budget | handlers/registration_handler.py | [
{
"identifier": "get_by_id",
"path": "core/crud.py",
"snippet": "async def get_by_id(\n model: ModelType,\n obj_id: int,\n session: AsyncSession\n) -> ModelType:\n \"\"\"\n Получение объекта по ID.\n\n Parameters:\n - model (ModelType): Тип модели SQLAlchemy.\n - obj_id (int): Идентификатор объекта.\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n\n Returns:\n ModelType: Объект модели, найденный по ID.\n \"\"\"\n\n get_obj_in_db = await session.execute(\n select(model).where(model.id == obj_id)\n )\n return get_obj_in_db.scalars().first()"
},
{
"identifier": "get_or_create",
"path": "core/crud.py",
"snippet": "async def get_or_create(\n session: AsyncSession,\n model: ModelType,\n **kwargs\n) -> Tuple[ModelType, bool]:\n \"\"\"\n Получение или создание объекта.\n\n Parameters:\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n - model (ModelType): Тип модели SQLAlchemy.\n - **kwargs: Параметры для фильтрации или создания объекта.\n\n Returns:\n Tuple[ModelType, bool]: Кортеж, содержащий объект модели и флаг,\n указывающий на создание нового объекта.\n \"\"\"\n\n instance = await session.execute(select(model).filter_by(**kwargs))\n instance = instance.scalars().one_or_none()\n flag = True\n\n if not instance:\n instance = await session.execute(insert(model).values(**kwargs))\n await session.commit()\n flag = False\n return instance, flag"
},
{
"identifier": "update",
"path": "core/crud.py",
"snippet": "async def update(\n db_obj: ModelType,\n obj_in: dict,\n session: AsyncSession,\n) -> ModelType:\n \"\"\"\n Изменение значений полей объекта.\n\n Parameters:\n - db_obj (ModelType): Объект модели для обновления.\n - obj_in (dict): Словарь с новыми значениями полей.\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n\n Returns:\n ModelType: Обновленный объект модели.\n \"\"\"\n\n for field in obj_in:\n setattr(db_obj, field, obj_in[field])\n session.add(db_obj)\n await session.commit()\n await session.refresh(db_obj)\n return db_obj"
},
{
"identifier": "RegistrationForm",
"path": "forms/user_form.py",
"snippet": "class RegistrationForm(StatesGroup):\n username = State()\n mail = State()"
},
{
"identifier": "set_info_keyboard",
"path": "keyboards/user_keyboards.py",
"snippet": "def set_info_keyboard(is_onboarding=False) -> InlineKeyboardMarkup:\n \"\"\"Клавиатура изменения данных пользователя.\"\"\"\n\n builder = InlineKeyboardBuilder()\n builder.add(InlineKeyboardButton(\n text='Ввести почту' if is_onboarding else 'Поменять почту',\n callback_data='get_mail')\n )\n builder.add(InlineKeyboardButton(\n text='Поменять имя',\n callback_data='get_username')\n )\n if is_onboarding:\n builder.add(InlineKeyboardButton(\n text='Завершить регистрацию',\n callback_data='registration_end')\n )\n else:\n builder.add(InlineKeyboardButton(\n text='Назад',\n callback_data='other')\n )\n builder.adjust(2)\n return builder.as_markup()"
},
{
"identifier": "universal_keyboard",
"path": "keyboards/user_keyboards.py",
"snippet": "def universal_keyboard(\n buttons: List[Tuple[str, Union[str, CallbackData]]],\n buttons_per_row: int = 1,\n) -> InlineKeyboardMarkup:\n \"\"\"Универсальная клавиатура с кнопками колбека.\"\"\"\n\n builder = InlineKeyboardBuilder()\n\n if len(buttons) == 1:\n text, data = buttons[0]\n builder.add(InlineKeyboardButton(text=text, callback_data=data))\n else:\n line = []\n for text, data in buttons:\n line.append(\n InlineKeyboardButton(text=text, callback_data=data)\n )\n builder.add(*line)\n builder.adjust(buttons_per_row)\n return builder.as_markup()"
},
{
"identifier": "User",
"path": "models/user.py",
"snippet": "class User(Base):\n \"\"\"Модель пользователя.\"\"\"\n\n username = Column(String(64), nullable=True)\n email = Column(String(254), unique=True, index=True, nullable=True)\n registration_time = Column(BigInteger) # Время в формате Unix.\n is_onboarding = Column(Boolean, default=False)\n\n categories = relationship(\n 'Category', back_populates='user',\n cascade='all, delete-orphan', lazy='selectin'\n )\n aliases = relationship(\n 'Alias', back_populates='user', cascade='all, delete-orphan'\n )\n transactions = relationship(\n 'Transaction', back_populates='user',\n cascade='all, delete-orphan', lazy='selectin'\n )"
},
{
"identifier": "callback_message",
"path": "utils/user_actions.py",
"snippet": "async def callback_message(\n target: Union[Message, CallbackQuery],\n text: str,\n reply_markup: InlineKeyboardMarkup = None,\n replace_message: bool = False,\n delete_reply: bool = True,\n **kwargs,\n):\n \"\"\"Редактировние сообщения.\"\"\"\n\n target = target if isinstance(target, Message) else target.message\n\n if replace_message:\n await target.edit_text(\n text=text,\n reply_markup=reply_markup,\n **kwargs\n )\n else:\n await target.answer(\n text=text,\n reply_markup=reply_markup,\n **kwargs\n )\n await target.delete_reply_markup() if delete_reply else None"
},
{
"identifier": "make_onboarding_end",
"path": "utils/user_actions.py",
"snippet": "async def make_onboarding_end(\n user_id: int,\n session: AsyncSession,\n default_username: str\n):\n \"\"\"\n Завершает процесс онбординга для пользователя, устанавливая значения\n по умолчанию, если они не были предварительно установлены.\n \"\"\"\n\n user: User = await get_by_id(\n model=User,\n obj_id=user_id,\n session=session\n )\n if not user.username:\n user.username = default_username\n if not user.registration_time:\n user.registration_time = datetime.now().timestamp()\n user.is_onboarding = True\n await session.commit()"
}
] | from aiogram import F, Router
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, Message
from sqlalchemy.ext.asyncio import AsyncSession
from core.crud import get_by_id, get_or_create, update
from forms import RegistrationForm
from keyboards import set_info_keyboard, universal_keyboard
from models import User
from utils.user_actions import callback_message, make_onboarding_end | 1,742 |
router = Router(name='registration_router')
# ------------------------ REGISTRATION ------------------------
@router.callback_query(F.data == 'registration')
async def registration(callback: CallbackQuery, session: AsyncSession):
"""Регистрация пользователя."""
|
router = Router(name='registration_router')
# ------------------------ REGISTRATION ------------------------
@router.callback_query(F.data == 'registration')
async def registration(callback: CallbackQuery, session: AsyncSession):
"""Регистрация пользователя."""
| await get_or_create( | 1 | 2023-10-23 17:30:24+00:00 | 4k |
nchen909/Pass-Tuning | evaluator/CodeBLEU/calc_code_bleu.py | [
{
"identifier": "bleu",
"path": "evaluator/CodeBLEU/bleu.py",
"snippet": "def sentence_bleu(\r\n references,\r\n hypothesis,\r\n weights=(0.25, 0.25, 0.25, 0.25),\r\n smoothing_function=None,\r\n auto_reweigh=False,\r\n):\r\ndef corpus_bleu(\r\n list_of_references,\r\n hypotheses,\r\n weights=(0.25, 0.25, 0.25, 0.25),\r\n smoothing_function=None,\r\n auto_reweigh=False,\r\n):\r\ndef modified_precision(references, hypothesis, n):\r\ndef closest_ref_length(references, hyp_len):\r\ndef brevity_penalty(closest_ref_len, hyp_len):\r\n def __init__(self, epsilon=0.1, alpha=5, k=5):\r\n def method0(self, p_n, *args, **kwargs):\r\n def method1(self, p_n, *args, **kwargs):\r\n def method2(self, p_n, *args, **kwargs):\r\n def method3(self, p_n, *args, **kwargs):\r\n def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\n def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\n def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\n def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\nclass SmoothingFunction:\r"
},
{
"identifier": "weighted_ngram_match",
"path": "evaluator/CodeBLEU/weighted_ngram_match.py",
"snippet": "def sentence_bleu(\r\n references,\r\n hypothesis,\r\n weights=(0.25, 0.25, 0.25, 0.25),\r\n smoothing_function=None,\r\n auto_reweigh=False,\r\n):\r\ndef corpus_bleu(\r\n list_of_references,\r\n hypotheses,\r\n weights=(0.25, 0.25, 0.25, 0.25),\r\n smoothing_function=None,\r\n auto_reweigh=False,\r\n):\r\ndef modified_recall(references, hypothesis, n):\r\n def weighted_sum(weights, counts):\r\ndef closest_ref_length(references, hyp_len):\r\ndef brevity_penalty(closest_ref_len, hyp_len):\r\n def __init__(self, epsilon=0.1, alpha=5, k=5):\r\n def method0(self, p_n, *args, **kwargs):\r\n def method1(self, p_n, *args, **kwargs):\r\n def method2(self, p_n, *args, **kwargs):\r\n def method3(self, p_n, *args, **kwargs):\r\n def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\n def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\n def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\n def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\r\nclass SmoothingFunction:\r"
},
{
"identifier": "syntax_match",
"path": "evaluator/CodeBLEU/syntax_match.py",
"snippet": "def calc_syntax_match(references, candidate, lang):\r\ndef corpus_syntax_match(references, candidates, lang):\r\n def get_all_sub_trees(root_node):\r\n JAVA_LANGUAGE = Language('build/my-languages.so', lang)\r"
},
{
"identifier": "dataflow_match",
"path": "evaluator/CodeBLEU/dataflow_match.py",
"snippet": "def calc_dataflow_match(references, candidate, lang):\r\ndef corpus_dataflow_match(references, candidates, lang):\r\ndef get_data_flow(code, parser):\r\ndef normalize_dataflow_item(dataflow_item):\r\ndef normalize_dataflow(dataflow):\r\n LANGUAGE = Language('build/my-languages.so', lang)\r\n DFG, _ = parser[1](root_node, index_to_code, {})\r\n DFG = []\r\n DFG = sorted(DFG, key=lambda x: x[1])\r\n DFG = []\r"
},
{
"identifier": "get_lang_by_task",
"path": "utils.py",
"snippet": "def get_lang_by_task(task, sub_task):\n if task in ['summarize','complete']:\n return sub_task\n elif task in ['refine','generate','clone']:\n return 'java'\n elif task == 'translate':\n if sub_task == 'cs-java':\n return 'c_sharp'\n else:\n return 'java'\n elif task == 'defect':\n return 'c'\n else:\n raise 'java'"
}
] | import argparse
import os
from evaluator.CodeBLEU import bleu, weighted_ngram_match, syntax_match, dataflow_match
from utils import get_lang_by_task
| 1,902 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding:utf-8 -*-
# import evaluator.CodeBLEU.weighted_ngram_match
# import evaluator.CodeBLEU.syntax_match
# import evaluator.CodeBLEU.dataflow_match
def get_codebleu(refs, hyp, lang, params='0.25,0.25,0.25,0.25',args=None):
if not isinstance(refs, list):
refs = [refs]
alpha, beta, gamma, theta = [float(x) for x in params.split(',')]
# preprocess inputs
pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] for file in refs]
hypothesis = [x.strip() for x in open(hyp, 'r', encoding='utf-8').readlines()]
for i in range(len(pre_references)):
assert len(hypothesis) == len(pre_references[i])
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert len(references) == len(pre_references) * len(hypothesis)
# calculate ngram match (BLEU)
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs, tokenized_hyps)
# calculate weighted ngram match
if args:
keywords_path=args.data_dir+"/../evaluator/CodeBLEU/keywords/"
keywords = [x.strip() for x in open(keywords_path + lang + '.txt', 'r', encoding='utf-8').readlines()]
else:
keywords = [x.strip() for x in open('/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/keywords/' + lang + '.txt', 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token: 1 if token in key_word_list else 0.2 for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)] \
for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights, tokenized_hyps)
# calculate syntax match
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, lang)
# calculate dataflow match
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'. \
format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
code_bleu_score = alpha * ngram_match_score \
+ beta * weighted_ngram_match_score \
+ gamma * syntax_match_score \
+ theta * dataflow_match_score
return code_bleu_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--refs', type=str, nargs='+', required=True,
help='reference files')
parser.add_argument('--hyp', type=str, required=True,
help='hypothesis file')
parser.add_argument('--lang', type=str, required=True,
choices=['java', 'js', 'c_sharp', 'php', 'go', 'python', 'ruby'],
help='programming language')
parser.add_argument('--params', type=str, default='0.25,0.25,0.25,0.25',
help='alpha, beta and gamma')
args = parser.parse_args()
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding:utf-8 -*-
# import evaluator.CodeBLEU.weighted_ngram_match
# import evaluator.CodeBLEU.syntax_match
# import evaluator.CodeBLEU.dataflow_match
def get_codebleu(refs, hyp, lang, params='0.25,0.25,0.25,0.25',args=None):
if not isinstance(refs, list):
refs = [refs]
alpha, beta, gamma, theta = [float(x) for x in params.split(',')]
# preprocess inputs
pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] for file in refs]
hypothesis = [x.strip() for x in open(hyp, 'r', encoding='utf-8').readlines()]
for i in range(len(pre_references)):
assert len(hypothesis) == len(pre_references[i])
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert len(references) == len(pre_references) * len(hypothesis)
# calculate ngram match (BLEU)
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs, tokenized_hyps)
# calculate weighted ngram match
if args:
keywords_path=args.data_dir+"/../evaluator/CodeBLEU/keywords/"
keywords = [x.strip() for x in open(keywords_path + lang + '.txt', 'r', encoding='utf-8').readlines()]
else:
keywords = [x.strip() for x in open('/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/keywords/' + lang + '.txt', 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token: 1 if token in key_word_list else 0.2 for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)] \
for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights, tokenized_hyps)
# calculate syntax match
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, lang)
# calculate dataflow match
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'. \
format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
code_bleu_score = alpha * ngram_match_score \
+ beta * weighted_ngram_match_score \
+ gamma * syntax_match_score \
+ theta * dataflow_match_score
return code_bleu_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--refs', type=str, nargs='+', required=True,
help='reference files')
parser.add_argument('--hyp', type=str, required=True,
help='hypothesis file')
parser.add_argument('--lang', type=str, required=True,
choices=['java', 'js', 'c_sharp', 'php', 'go', 'python', 'ruby'],
help='programming language')
parser.add_argument('--params', type=str, default='0.25,0.25,0.25,0.25',
help='alpha, beta and gamma')
args = parser.parse_args()
| args.lang = get_lang_by_task(args.task, args.sub_task)
| 4 | 2023-10-20 09:24:44+00:00 | 4k |
openfoodfacts/open-prices | app/tasks.py | [
{
"identifier": "crud",
"path": "app/crud.py",
"snippet": "def get_users_query(filters: ProductFilter | None = None):\ndef get_users(db: Session, filters: ProductFilter | None = None):\ndef get_user(db: Session, user_id: str):\ndef get_user_by_user_id(db: Session, user_id: str):\ndef get_user_by_token(db: Session, token: str):\ndef create_user(db: Session, user: UserCreate) -> User:\ndef get_or_create_user(db: Session, user: UserCreate):\ndef update_user(db: Session, user: UserCreate, update_dict: dict):\ndef update_user_last_used_field(db: Session, user: UserCreate) -> UserCreate | None:\ndef increment_user_price_count(db: Session, user: UserCreate):\ndef delete_user(db: Session, user_id: UserCreate):\ndef get_products_query(filters: ProductFilter | None = None):\ndef get_products(db: Session, filters: ProductFilter | None = None):\ndef get_product_by_id(db: Session, id: int):\ndef get_product_by_code(db: Session, code: str) -> Product:\ndef create_product(\n db: Session, product: ProductCreate, price_count: int = 0\n) -> Product:\ndef get_or_create_product(\n db: Session, product: ProductCreate, init_price_count: int = 0\n):\ndef update_product(db: Session, product: ProductFull, update_dict: dict):\ndef increment_product_price_count(db: Session, product: ProductFull):\ndef get_prices_query(\n with_join_product: bool = True,\n with_join_location: bool = True,\n with_join_proof: bool = True,\n filters: PriceFilter | None = None,\n):\ndef get_prices(db: Session, filters: PriceFilter | None = None):\ndef create_price(db: Session, price: PriceCreate, user: UserCreate):\ndef link_price_product(\n db: Session, price: PriceFull, product: ProductFull\n) -> PriceFull:\ndef set_price_location(db: Session, price: PriceFull, location: LocationFull):\ndef get_proof(db: Session, proof_id: int):\ndef get_user_proofs(db: Session, user: UserCreate):\ndef create_proof(\n db: Session,\n file_path: str,\n mimetype: str,\n type: ProofTypeEnum,\n user: UserCreate,\n is_public: bool = True,\n):\ndef create_proof_file(file: UploadFile) -> tuple[str, str]:\ndef get_locations_query(filters: LocationFilter | None = None):\ndef get_locations(db: Session, filters: LocationFilter | None = None):\ndef get_location_by_id(db: Session, id: int):\ndef get_location_by_osm_id_and_type(\n db: Session, osm_id: int, osm_type: LocationOSMEnum\n):\ndef create_location(\n db: Session, location: LocationCreate, price_count: int = 0\n) -> Location:\ndef get_or_create_location(\n db: Session, location: LocationCreate, init_price_count: int = 0\n):\ndef update_location(db: Session, location: LocationFull, update_dict: dict):\ndef increment_location_price_count(db: Session, location: LocationFull):"
},
{
"identifier": "Product",
"path": "app/models.py",
"snippet": "class Product(Base):\n id = Column(Integer, primary_key=True, index=True)\n\n code = Column(String, unique=True, index=True)\n\n source = Column(ChoiceType(Flavor))\n product_name = Column(String)\n product_quantity = Column(Integer)\n brands = Column(String)\n image_url = Column(String)\n unique_scans_n = Column(Integer, nullable=False, server_default=\"0\")\n\n prices: Mapped[list[\"Price\"]] = relationship(back_populates=\"product\")\n price_count = Column(Integer, nullable=False, server_default=\"0\", index=True)\n\n created = Column(DateTime(timezone=True), server_default=func.now())\n updated = Column(DateTime(timezone=True), onupdate=func.now())\n\n __tablename__ = \"products\""
},
{
"identifier": "LocationCreate",
"path": "app/schemas.py",
"snippet": "class LocationCreate(BaseModel):\n model_config = ConfigDict(from_attributes=True, arbitrary_types_allowed=True)\n\n osm_id: int = Field(gt=0)\n osm_type: LocationOSMEnum"
},
{
"identifier": "PriceFull",
"path": "app/schemas.py",
"snippet": "class PriceFull(PriceCreate):\n product_id: int | None\n location_id: int | None\n owner: str\n created: datetime.datetime"
},
{
"identifier": "ProductCreate",
"path": "app/schemas.py",
"snippet": "class ProductCreate(BaseModel):\n model_config = ConfigDict(from_attributes=True, arbitrary_types_allowed=True)\n\n code: str = Field(\n min_length=1,\n pattern=\"^[0-9]+$\",\n description=\"barcode (EAN) of the product, as a string.\",\n examples=[\"8001505005707\"],\n )"
},
{
"identifier": "UserCreate",
"path": "app/schemas.py",
"snippet": "class UserCreate(UserBase):\n token: str"
},
{
"identifier": "OFF_FIELDS",
"path": "app/utils.py",
"snippet": "OFF_FIELDS = [\n \"product_name\",\n \"product_quantity\",\n \"brands\",\n \"image_url\",\n \"unique_scans_n\",\n]"
},
{
"identifier": "fetch_location_openstreetmap_details",
"path": "app/utils.py",
"snippet": "def fetch_location_openstreetmap_details(location: LocationFull):\n location_openstreetmap_details = dict()\n try:\n response = openstreetmap_nominatim_search(\n osm_id=location.osm_id, osm_type=location.osm_type.value.lower()\n )\n if len(response):\n for osm_field in OSM_FIELDS:\n if osm_field in response[0]:\n location_openstreetmap_details[f\"osm_{osm_field}\"] = response[0][\n osm_field\n ]\n if \"address\" in response[0]:\n for osm_address_field in OSM_ADDRESS_FIELDS:\n if osm_address_field in response[0][\"address\"]:\n location_openstreetmap_details[\n f\"osm_address_{osm_address_field}\"\n ] = response[0][\"address\"][osm_address_field]\n # manage city\n location_openstreetmap_details[\"osm_address_city\"] = None\n for osm_address_place_field in OSM_ADDRESS_PLACE_FIELDS:\n if osm_address_place_field in response[0][\"address\"]:\n if not location_openstreetmap_details[\"osm_address_city\"]:\n location_openstreetmap_details[\n \"osm_address_city\"\n ] = response[0][\"address\"][osm_address_place_field]\n\n return location_openstreetmap_details\n except Exception:\n logger.exception(\"Error returned from OpenStreetMap\")\n return"
},
{
"identifier": "fetch_product_openfoodfacts_details",
"path": "app/utils.py",
"snippet": "def fetch_product_openfoodfacts_details(product: ProductFull) -> JSONType | None:\n product = {}\n try:\n response = openfoodfacts_product_search(code=product.code)\n if response[\"status\"]:\n product[\"source\"] = Flavor.off\n for off_field in OFF_FIELDS:\n if off_field in response[\"product\"]:\n product[off_field] = response[\"product\"][off_field]\n product = normalize_product_fields(product)\n return product\n except Exception:\n logger.exception(\"Error returned from Open Food Facts\")\n return"
},
{
"identifier": "generate_openfoodfacts_main_image_url",
"path": "app/utils.py",
"snippet": "def generate_openfoodfacts_main_image_url(\n code: str, images: JSONType, lang: str\n) -> str | None:\n \"\"\"Generate the URL of the main image of a product.\n\n :param code: The code of the product\n :param images: The images of the product\n :param lang: The main language of the product\n :return: The URL of the main image of the product or None if no image is\n available.\n \"\"\"\n image_key = None\n if f\"front_{lang}\" in images:\n image_key = f\"front_{lang}\"\n else:\n for key in (k for k in images if k.startswith(\"front_\")):\n image_key = key\n break\n\n if image_key:\n image_rev = images[image_key][\"rev\"]\n image_id = f\"{image_key}.{image_rev}.400\"\n return generate_image_url(\n code, image_id=image_id, flavor=Flavor.off, environment=settings.environment\n )\n\n return None"
},
{
"identifier": "normalize_product_fields",
"path": "app/utils.py",
"snippet": "def normalize_product_fields(product: JSONType) -> JSONType:\n \"\"\"Normalize product fields and return a product dict\n ready to be inserted in the database.\n\n :param product: the product to normalize\n :return: the normalized product\n \"\"\"\n product = product.copy()\n product_quantity = int(product.get(\"product_quantity\") or 0)\n if product_quantity >= 100_000:\n # If the product quantity is too high, it's probably an\n # error, and cause an OutOfRangeError in the database\n product[\"product_quantity\"] = None\n\n # Some products have null unique_scans_n\n if product[\"unique_scans_n\"] is None:\n product[\"unique_scans_n\"] = 0\n\n return product"
}
] | import datetime
import tqdm
from openfoodfacts import DatasetType, Flavor, ProductDataset
from openfoodfacts.types import JSONType
from openfoodfacts.utils import get_logger
from sqlalchemy import or_, select
from sqlalchemy.orm import Session
from app import crud
from app.models import Product
from app.schemas import LocationCreate, PriceFull, ProductCreate, UserCreate
from app.utils import (
OFF_FIELDS,
fetch_location_openstreetmap_details,
fetch_product_openfoodfacts_details,
generate_openfoodfacts_main_image_url,
normalize_product_fields,
) | 2,889 |
logger = get_logger(__name__)
# Users
# ------------------------------------------------------------------------------
def increment_user_price_count(db: Session, user: UserCreate):
crud.increment_user_price_count(db, user=user)
# Products
# ------------------------------------------------------------------------------
def create_price_product(db: Session, price: PriceFull):
# The price may not have a product code, if it's the price of a
# barcode-less product
if price.product_code:
# get or create the corresponding product
product = ProductCreate(code=price.product_code)
db_product, created = crud.get_or_create_product(
db, product=product, init_price_count=1
)
# link the product to the price
crud.link_price_product(db, price=price, product=db_product)
# fetch data from OpenFoodFacts if created
if created:
product_openfoodfacts_details = fetch_product_openfoodfacts_details(
product=db_product
)
if product_openfoodfacts_details:
crud.update_product(
db, product=db_product, update_dict=product_openfoodfacts_details
)
else:
# Increment the price count of the product
crud.increment_product_price_count(db, product=db_product)
def import_product_db(db: Session, batch_size: int = 1000):
"""Import from DB JSONL dump to insert/update product table.
:param db: the session to use
:param batch_size: the number of products to insert/update in a single
transaction, defaults to 1000
"""
logger.info("Launching import_product_db")
existing_codes = set(db.execute(select(Product.code)).scalars())
logger.info("Number of existing codes: %d", len(existing_codes))
dataset = ProductDataset(
dataset_type=DatasetType.jsonl, force_download=True, download_newer=True
)
added_count = 0
updated_count = 0
buffer_len = 0
# the dataset was created after the start of the day, every product updated
# after should be skipped, as we don't know the exact creation time of the
# dump
start_datetime = datetime.datetime.now(tz=datetime.timezone.utc).replace(
hour=0, minute=0, second=0
)
seen_codes = set()
for product in tqdm.tqdm(dataset):
if "code" not in product:
continue
product_code = product["code"]
# Some products are duplicated in the dataset, we skip them
if product_code in seen_codes:
continue
seen_codes.add(product_code)
images: JSONType = product.get("images", {})
last_modified_t = product.get("last_modified_t")
last_modified = (
datetime.datetime.fromtimestamp(last_modified_t, tz=datetime.timezone.utc)
if last_modified_t
else None
)
if last_modified is None:
continue
# Skip products that have been modified today (more recent updates are
# possible)
if last_modified >= start_datetime:
logger.debug("Skipping %s", product_code)
continue
if product_code not in existing_codes:
item = {"code": product_code, "source": Flavor.off}
|
logger = get_logger(__name__)
# Users
# ------------------------------------------------------------------------------
def increment_user_price_count(db: Session, user: UserCreate):
crud.increment_user_price_count(db, user=user)
# Products
# ------------------------------------------------------------------------------
def create_price_product(db: Session, price: PriceFull):
# The price may not have a product code, if it's the price of a
# barcode-less product
if price.product_code:
# get or create the corresponding product
product = ProductCreate(code=price.product_code)
db_product, created = crud.get_or_create_product(
db, product=product, init_price_count=1
)
# link the product to the price
crud.link_price_product(db, price=price, product=db_product)
# fetch data from OpenFoodFacts if created
if created:
product_openfoodfacts_details = fetch_product_openfoodfacts_details(
product=db_product
)
if product_openfoodfacts_details:
crud.update_product(
db, product=db_product, update_dict=product_openfoodfacts_details
)
else:
# Increment the price count of the product
crud.increment_product_price_count(db, product=db_product)
def import_product_db(db: Session, batch_size: int = 1000):
"""Import from DB JSONL dump to insert/update product table.
:param db: the session to use
:param batch_size: the number of products to insert/update in a single
transaction, defaults to 1000
"""
logger.info("Launching import_product_db")
existing_codes = set(db.execute(select(Product.code)).scalars())
logger.info("Number of existing codes: %d", len(existing_codes))
dataset = ProductDataset(
dataset_type=DatasetType.jsonl, force_download=True, download_newer=True
)
added_count = 0
updated_count = 0
buffer_len = 0
# the dataset was created after the start of the day, every product updated
# after should be skipped, as we don't know the exact creation time of the
# dump
start_datetime = datetime.datetime.now(tz=datetime.timezone.utc).replace(
hour=0, minute=0, second=0
)
seen_codes = set()
for product in tqdm.tqdm(dataset):
if "code" not in product:
continue
product_code = product["code"]
# Some products are duplicated in the dataset, we skip them
if product_code in seen_codes:
continue
seen_codes.add(product_code)
images: JSONType = product.get("images", {})
last_modified_t = product.get("last_modified_t")
last_modified = (
datetime.datetime.fromtimestamp(last_modified_t, tz=datetime.timezone.utc)
if last_modified_t
else None
)
if last_modified is None:
continue
# Skip products that have been modified today (more recent updates are
# possible)
if last_modified >= start_datetime:
logger.debug("Skipping %s", product_code)
continue
if product_code not in existing_codes:
item = {"code": product_code, "source": Flavor.off} | for key in OFF_FIELDS: | 6 | 2023-10-21 14:02:15+00:00 | 4k |
krasnoukhov/homeassistant-smart-maic | custom_components/smart_maic/config_flow.py | [
{
"identifier": "DEVICE_NAME",
"path": "custom_components/smart_maic/const.py",
"snippet": "DEVICE_NAME = \"device_name\""
},
{
"identifier": "DEVICE_ID",
"path": "custom_components/smart_maic/const.py",
"snippet": "DEVICE_ID = \"devid\""
},
{
"identifier": "DEVICE_TYPE",
"path": "custom_components/smart_maic/const.py",
"snippet": "DEVICE_TYPE = \"devtype\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/smart_maic/const.py",
"snippet": "DOMAIN = \"smart_maic\""
},
{
"identifier": "IP_ADDRESS",
"path": "custom_components/smart_maic/const.py",
"snippet": "IP_ADDRESS = CONF_IP_ADDRESS"
},
{
"identifier": "PIN",
"path": "custom_components/smart_maic/const.py",
"snippet": "PIN = CONF_PIN"
},
{
"identifier": "SmartMaic",
"path": "custom_components/smart_maic/smart_maic.py",
"snippet": "class SmartMaic:\n \"\"\"Smart MAIC instance.\"\"\"\n\n def __init__(self, data: dict[str, Any]) -> None:\n \"\"\"Init Smart MAIC.\"\"\"\n self._ip_address = data[IP_ADDRESS]\n self._pin = data[PIN]\n self._devid = data.get(DEVICE_ID)\n\n def get_wdata(self) -> dict[str, Any]:\n \"\"\"Get \"wdata\" for Smart MAIC component.\"\"\"\n self._login_request()\n return self._get_request(page=\"getwdata\").json()\n\n def get_config(self) -> dict[str, Any]:\n \"\"\"Get config for Smart MAIC component.\"\"\"\n self._login_request()\n return self._get_request(page=\"webinit\").json()\n\n def set_mqtt_config(self) -> dict[str, Any]:\n \"\"\"Set Smart MAIC MQTT config.\"\"\"\n config = self.get_config()\n\n self._get_request(\n page=\"mqtt\",\n serv=config[\"serv\"],\n port=config[\"port\"],\n uname=config[\"uname\"],\n **{\"pass\": config[\"pass\"]},\n mqtt_on=1,\n mqttint=5,\n separat=2,\n prefix=f\"{PREFIX}/\",\n )\n\n return self.get_config()\n\n def set_consumption(self, key: str, value: float) -> None:\n \"\"\"Set Smart MAIC consumption value.\"\"\"\n self._login_request()\n self._get_request(page=\"initval\", **{key: value})\n\n def set_dry_switch(self, value: int) -> dict[str, Any]:\n \"\"\"Set Smart MAIC dry switch.\"\"\"\n self._get_request(\n page=\"getdata\", devid=self._devid, devpass=self._pin, pout=value\n )\n\n def _login_request(self) -> None:\n self._get_request(page=\"devlogin\", devpass=self._pin)\n\n def _get_request(self, **kwargs) -> requests.Response:\n \"\"\"Make GET request to the Smart MAIC API.\"\"\"\n url = urlparse(f\"http://{self._ip_address}/\")\n url = url._replace(query=urlencode(kwargs))\n\n _LOGGER.debug(f\"Smart MAIC request: GET {url.geturl()}\")\n try:\n r = requests.get(url.geturl(), timeout=HTTP_TIMEOUT)\n r.raise_for_status()\n _LOGGER.debug(f\"Smart MAIC status: {r.status_code}\")\n _LOGGER.debug(f\"Smart MAIC response: {r.text}\")\n\n return r\n except TimeoutError as timeout_error:\n raise ConnectionError from timeout_error\n except requests.exceptions.ConnectionError as connection_error:\n raise ConnectionError from connection_error\n except requests.exceptions.HTTPError as http_error:\n if http_error.response.status_code == 400:\n return r\n raise ConnectionError from http_error"
},
{
"identifier": "SmartMaicCoordinator",
"path": "custom_components/smart_maic/coordinator.py",
"snippet": "class SmartMaicCoordinator(DataUpdateCoordinator[dict[str, Any]]):\n \"\"\"Smart MAIC Coordinator class.\"\"\"\n\n def __init__(self, smart_maic: SmartMaic, hass: HomeAssistant) -> None:\n \"\"\"Initialize.\"\"\"\n self._smart_maic = smart_maic\n\n super().__init__(\n hass,\n _LOGGER,\n name=DOMAIN,\n )\n\n def _get_config(self) -> None:\n \"\"\"Get Smart MAIC config.\"\"\"\n return self._smart_maic.set_mqtt_config()\n\n async def async_get_config(self) -> None:\n \"\"\"Get Smart MAIC config.\"\"\"\n return await self.hass.async_add_executor_job(self._get_config)\n\n def _set_mqtt_config(self) -> None:\n \"\"\"Set Smart MAIC MQTT config.\"\"\"\n return self._smart_maic.set_mqtt_config()\n\n async def async_set_mqtt_config(self) -> None:\n \"\"\"Set Smart MAIC MQTT config.\"\"\"\n return await self.hass.async_add_executor_job(self._set_mqtt_config)\n\n def _set_consumption(self, key: str, value: float) -> None:\n \"\"\"Set Smart MAIC consumption value.\"\"\"\n return self._smart_maic.set_consumption(key=key, value=value)\n\n async def async_set_consumption(self, key: str, value: float) -> None:\n \"\"\"Set Smart MAIC consumption value.\"\"\"\n return await self.hass.async_add_executor_job(self._set_consumption, key, value)\n\n def _set_dry_switch(self, value: int) -> None:\n \"\"\"Set Smart MAIC dry switch value.\"\"\"\n return self._smart_maic.set_dry_switch(value=value)\n\n async def async_set_dry_switch(self, value: int) -> None:\n \"\"\"Set Smart MAIC dry switch value.\"\"\"\n return await self.hass.async_add_executor_job(self._set_dry_switch, value)"
}
] | import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from typing import Any
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import AbortFlow
from .const import (
DEVICE_NAME,
DEVICE_ID,
DEVICE_TYPE,
DOMAIN,
IP_ADDRESS,
PIN,
)
from .smart_maic import SmartMaic
from .coordinator import SmartMaicCoordinator | 1,627 | """Config flow for Smart MAIC integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{
vol.Required(IP_ADDRESS): cv.string,
vol.Required(PIN): cv.string,
vol.Required(DEVICE_NAME, default="Energy"): cv.string,
}
)
async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from USER_SCHEMA with values provided by the user.
"""
if not await mqtt.async_wait_for_mqtt_client(hass):
raise AbortFlow("mqtt_unavailable")
smart_maic = SmartMaic(data)
coordinator = SmartMaicCoordinator(smart_maic, hass)
config = await coordinator.async_get_config()
if not config["serv"]:
raise AbortFlow("mqtt_unconfigured")
config = await coordinator.async_set_mqtt_config()
additional = {
DEVICE_ID: config["about"][DEVICE_ID]["value"],
DEVICE_TYPE: config["about"][DEVICE_TYPE]["value"],
}
return {"title": data[DEVICE_NAME], "additional": additional}
| """Config flow for Smart MAIC integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{
vol.Required(IP_ADDRESS): cv.string,
vol.Required(PIN): cv.string,
vol.Required(DEVICE_NAME, default="Energy"): cv.string,
}
)
async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from USER_SCHEMA with values provided by the user.
"""
if not await mqtt.async_wait_for_mqtt_client(hass):
raise AbortFlow("mqtt_unavailable")
smart_maic = SmartMaic(data)
coordinator = SmartMaicCoordinator(smart_maic, hass)
config = await coordinator.async_get_config()
if not config["serv"]:
raise AbortFlow("mqtt_unconfigured")
config = await coordinator.async_set_mqtt_config()
additional = {
DEVICE_ID: config["about"][DEVICE_ID]["value"],
DEVICE_TYPE: config["about"][DEVICE_TYPE]["value"],
}
return {"title": data[DEVICE_NAME], "additional": additional}
| class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): | 3 | 2023-10-16 17:24:45+00:00 | 4k |
JoaoPedro9674/django-ledger | django_ledger/models/customer.py | [
{
"identifier": "ContactInfoMixIn",
"path": "django_ledger/models/mixins.py",
"snippet": "class ContactInfoMixIn(models.Model):\n \"\"\"\n Implements a common set of fields used to document contact information.\n\n Attributes\n ----------\n address_1: str\n A string used to document the first line of an address. Mandatory. Max length is 70.\n address_2: str\n A string used to document the first line of an address. Optional.\n city: str\n A string used to document the city. Optional.\n state: str\n A string used to document the State of Province. Optional.\n zip_code: str\n A string used to document the ZIP code. Optional\n country: str\n A string used to document the country. Optional.\n email: str\n A string used to document the contact email. Uses django's EmailField for validation.\n website: str\n A string used to document the contact website. Uses django's URLField for validation.\n phone: str\n A string used to document the contact phone.\n \"\"\"\n address_1 = models.CharField(max_length=70, verbose_name=_('Address Line 1'))\n address_2 = models.CharField(null=True, blank=True, max_length=70, verbose_name=_('Address Line 2'))\n city = models.CharField(null=True, blank=True, max_length=70, verbose_name=_('City'))\n state = models.CharField(null=True, blank=True, max_length=70, verbose_name=_('State/Province'))\n zip_code = models.CharField(null=True, blank=True, max_length=20, verbose_name=_('Zip Code'))\n country = models.CharField(null=True, blank=True, max_length=70, verbose_name=_('Country'))\n email = models.EmailField(null=True, blank=True, verbose_name=_('Email'))\n website = models.URLField(null=True, blank=True, verbose_name=_('Website'))\n phone = models.CharField(max_length=30, null=True, blank=True, verbose_name=_('Phone Number'))\n\n class Meta:\n abstract = True\n\n def get_cszc(self):\n if all([\n self.city,\n self.state,\n self.zip_code,\n self.country,\n ]):\n return f'{self.city}, {self.state}. {self.zip_code}. {self.country}'"
},
{
"identifier": "CreateUpdateMixIn",
"path": "django_ledger/models/mixins.py",
"snippet": "class CreateUpdateMixIn(models.Model):\n \"\"\"\n Implements a created and an updated field to a base Django Model.\n\n Attributes\n ----------\n created: datetime\n A created timestamp. Defaults to now().\n updated: str\n An updated timestamp used to identify when models are updated.\n \"\"\"\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True, null=True, blank=True)\n\n class Meta:\n abstract = True"
},
{
"identifier": "TaxCollectionMixIn",
"path": "django_ledger/models/mixins.py",
"snippet": "class TaxCollectionMixIn(models.Model):\n \"\"\"\n Implements functionality used to add tax collection rates and or withholding to a base Django Model.\n This field may be used to set a pre-defined withholding rate to a financial instrument, customer, vendor, etc.\n\n Attributes\n ----------\n sales_tax_rate: float\n The tax rate as a float. A Number between 0.00 and 1.00.\n \"\"\"\n sales_tax_rate = models.FloatField(default=0.00000,\n verbose_name=_('Sales Tax Rate'),\n null=True,\n blank=True,\n validators=[\n MinValueValidator(limit_value=0.00000),\n MaxValueValidator(limit_value=1.00000)\n ])\n\n class Meta:\n abstract = True"
},
{
"identifier": "lazy_loader",
"path": "django_ledger/models/utils.py",
"snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):"
},
{
"identifier": "DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING",
"path": "django_ledger/settings.py",
"snippet": "DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)"
},
{
"identifier": "DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX",
"path": "django_ledger/settings.py",
"snippet": "DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')"
}
] | from uuid import uuid4
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, transaction, IntegrityError
from django.db.models import Q, F, QuerySet
from django.utils.translation import gettext_lazy as _
from django_ledger.models.mixins import ContactInfoMixIn, CreateUpdateMixIn, TaxCollectionMixIn
from django_ledger.models.utils import lazy_loader
from django_ledger.settings import DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING, DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX | 2,326 |
class CustomerModelQueryset(QuerySet):
"""
A custom defined QuerySet for the CustomerModel. This implements multiple methods or queries needed to get a
filtered QuerySet based on the CustomerModel status. For example, we might want to have list of Customers that
are active or hidden. All these separate functions will assist in making such queries and building customized
reports.
"""
def active(self) -> QuerySet:
"""
Active customers can be assigned to new Invoices and show on dropdown menus and views.
Returns
-------
CustomerModelQueryset
A QuerySet of active Customers.
"""
return self.filter(active=True)
def inactive(self) -> QuerySet:
"""
Active customers can be assigned to new Invoices and show on dropdown menus and views.
Marking CustomerModels as inactive can help reduce Database load to populate select inputs and also inactivate
CustomerModels that are not relevant to the Entity anymore. Also, it makes de UI cleaner by not populating
unnecessary choices.
Returns
-------
CustomerModelQueryset
A QuerySet of inactive Customers.
"""
return self.filter(active=False)
def hidden(self) -> QuerySet:
"""
Hidden customers do not show on dropdown menus, but may be used via APIs or any other method that does not
involve the UI.
Returns
-------
CustomerModelQueryset
A QuerySet of hidden Customers.
"""
return self.filter(hidden=True)
def visible(self) -> QuerySet:
"""
Visible customers show on dropdown menus and views. Visible customers are active and not hidden.
Returns
-------
CustomerModelQueryset
A QuerySet of visible Customers.
"""
return self.filter(
Q(hidden=False) & Q(active=True)
)
class CustomerModelManager(models.Manager):
"""
A custom defined CustomerModelManager that will act as an interface to handling the DB queries to the
CustomerModel.
"""
def for_user(self, user_model):
"""
Fetches a QuerySet of BillModels that the UserModel as access to.
May include BillModels from multiple Entities.
The user has access to bills if:
1. Is listed as Manager of Entity.
2. Is the Admin of the Entity.
Parameters
__________
user_model
Logged in and authenticated django UserModel instance.
Examples
________
>>> request_user = request.user
>>> customer_model_qs = CustomerModel.objects.for_user(user_model=request_user)
"""
qs = self.get_queryset()
return qs.filter(
Q(entity__admin=user_model) |
Q(entity__managers__in=[user_model])
)
def for_entity(self, entity_slug, user_model) -> CustomerModelQueryset:
"""
Fetches a QuerySet of CustomerModel associated with a specific EntityModel & UserModel.
May pass an instance of EntityModel or a String representing the EntityModel slug.
Parameters
__________
entity_slug: str or EntityModel
The entity slug or EntityModel used for filtering the QuerySet.
user_model
Logged in and authenticated django UserModel instance.
Examples
________
>>> request_user = request.user
>>> slug = kwargs['entity_slug'] # may come from request kwargs
>>> customer_model_qs = CustomerModel.objects.for_entity(user_model=request_user, entity_slug=slug)
Returns
-------
CustomerModelQueryset
A filtered CustomerModel QuerySet.
"""
qs = self.get_queryset()
| """
Django Ledger created by Miguel Sanda <[email protected]>.
Copyright© EDMA Group Inc licensed under the GPLv3 Agreement.
Contributions to this module:
* Miguel Sanda <[email protected]>
* Pranav P Tulshyan <[email protected]>
A Customer refers to the person or entity that buys product and services. When issuing Invoices, a Customer must be
created before it can be assigned to the InvoiceModel. Only customers who are active can be assigned to new Invoices.
"""
class CustomerModelQueryset(QuerySet):
"""
A custom defined QuerySet for the CustomerModel. This implements multiple methods or queries needed to get a
filtered QuerySet based on the CustomerModel status. For example, we might want to have list of Customers that
are active or hidden. All these separate functions will assist in making such queries and building customized
reports.
"""
def active(self) -> QuerySet:
"""
Active customers can be assigned to new Invoices and show on dropdown menus and views.
Returns
-------
CustomerModelQueryset
A QuerySet of active Customers.
"""
return self.filter(active=True)
def inactive(self) -> QuerySet:
"""
Active customers can be assigned to new Invoices and show on dropdown menus and views.
Marking CustomerModels as inactive can help reduce Database load to populate select inputs and also inactivate
CustomerModels that are not relevant to the Entity anymore. Also, it makes de UI cleaner by not populating
unnecessary choices.
Returns
-------
CustomerModelQueryset
A QuerySet of inactive Customers.
"""
return self.filter(active=False)
def hidden(self) -> QuerySet:
"""
Hidden customers do not show on dropdown menus, but may be used via APIs or any other method that does not
involve the UI.
Returns
-------
CustomerModelQueryset
A QuerySet of hidden Customers.
"""
return self.filter(hidden=True)
def visible(self) -> QuerySet:
"""
Visible customers show on dropdown menus and views. Visible customers are active and not hidden.
Returns
-------
CustomerModelQueryset
A QuerySet of visible Customers.
"""
return self.filter(
Q(hidden=False) & Q(active=True)
)
class CustomerModelManager(models.Manager):
"""
A custom defined CustomerModelManager that will act as an interface to handling the DB queries to the
CustomerModel.
"""
def for_user(self, user_model):
"""
Fetches a QuerySet of BillModels that the UserModel as access to.
May include BillModels from multiple Entities.
The user has access to bills if:
1. Is listed as Manager of Entity.
2. Is the Admin of the Entity.
Parameters
__________
user_model
Logged in and authenticated django UserModel instance.
Examples
________
>>> request_user = request.user
>>> customer_model_qs = CustomerModel.objects.for_user(user_model=request_user)
"""
qs = self.get_queryset()
return qs.filter(
Q(entity__admin=user_model) |
Q(entity__managers__in=[user_model])
)
def for_entity(self, entity_slug, user_model) -> CustomerModelQueryset:
"""
Fetches a QuerySet of CustomerModel associated with a specific EntityModel & UserModel.
May pass an instance of EntityModel or a String representing the EntityModel slug.
Parameters
__________
entity_slug: str or EntityModel
The entity slug or EntityModel used for filtering the QuerySet.
user_model
Logged in and authenticated django UserModel instance.
Examples
________
>>> request_user = request.user
>>> slug = kwargs['entity_slug'] # may come from request kwargs
>>> customer_model_qs = CustomerModel.objects.for_entity(user_model=request_user, entity_slug=slug)
Returns
-------
CustomerModelQueryset
A filtered CustomerModel QuerySet.
"""
qs = self.get_queryset()
| if isinstance(entity_slug, lazy_loader.get_entity_model()): | 3 | 2023-10-20 01:07:20+00:00 | 4k |
HLTCHKUST/InstructAlign | run_t2t_finetuning.py | [
{
"identifier": "load_flores_datasets",
"path": "data_utils.py",
"snippet": "def load_flores_datasets(pivot_langs=['eng_Latn'], augmentation='multilingual', num_train_ratio=1.0):\n def inject_lang(row, lang1, lang2):\n row['lang1'] = lang_map[lang1]\n row['lang2'] = lang_map[lang2]\n return row\n\n dsets = {}\n if augmentation == 'monolingual':\n for lang1 in pivot_langs:\n # Load a single dataset from the pivot language as `lang1` and random `lang2`\n lang2 = 'bug_Latn' # This random `lang2` is not used for training\n subset = f'{lang1}-{lang2}'\n dset = datasets.load_dataset('facebook/flores', subset)\n dset = dset.rename_columns({f'sentence_{lang1}': 'sentence1', f'sentence_{lang2}': 'sentence2'})\n dset = dset.map(inject_lang, fn_kwargs={'lang1': lang1, 'lang2': lang2}, load_from_cache_file=True)\n dsets[subset] = dset\n \n for lang1 in pivot_langs:\n for lang2 in ['ind_Latn', 'sun_Latn', 'jav_Latn', 'bug_Latn', 'ace_Latn', 'bjn_Latn', 'ban_Latn', 'min_Latn']:\n if lang1 != lang2:\n if augmentation != 'monolingual':\n # If not monolingual take both directions\n subset = f'{lang1}-{lang2}'\n dset = datasets.load_dataset('facebook/flores', subset)\n dset = dset.rename_columns({f'sentence_{lang1}': 'sentence1', f'sentence_{lang2}': 'sentence2'})\n dset = dset.map(inject_lang, fn_kwargs={'lang1': lang1, 'lang2': lang2}, load_from_cache_file=True)\n dsets[subset] = dset\n\n subset = f'{lang2}-{lang1}'\n dset = datasets.load_dataset('facebook/flores', subset)\n dset = dset.rename_columns({f'sentence_{lang2}': 'sentence1', f'sentence_{lang1}': 'sentence2'})\n dset = dset.map(inject_lang, fn_kwargs={'lang1': lang2, 'lang2': lang1}, load_from_cache_file=True)\n dsets[subset] = dset\n \n dset_subsets = []\n for key in dsets.keys():\n for split in ['dev', 'devtest']:\n if 0 < num_train_ratio < 1:\n dset_subsets.append(dsets[key][split].train_test_split(test_size=num_train_ratio, seed=0)['test'])\n else:\n dset_subsets.append(dsets[key][split])\n \n combined_dset = datasets.concatenate_datasets(dset_subsets)\n\n return combined_dset.train_test_split(test_size=1000, seed=0)"
},
{
"identifier": "load_rehearsal_dataset",
"path": "data_utils.py",
"snippet": "def load_rehearsal_dataset(n_samples=1000, random_seed=42):\n en_dset = datasets.load_dataset('bigscience/xP3', 'en', split='train', streaming=True)\n # id_dset = datasets.load_dataset('bigscience/xP3', 'id', split='train', streaming=True)\n\n sample_en_dset = en_dset.shuffle(random_seed).take(n_samples)\n # sample_id_dset = id_dset.shuffle(random_seed).take(n_samples)\n \n # return datasets.concatenate_datasets([sample_en_dset, sample_id_dset])\n return sample_en_dset"
},
{
"identifier": "do_augment",
"path": "augmentation_utils.py",
"snippet": "def do_augment(text, aug_type):\n if aug_type == 'infilling':\n return random_infilling(text)\n elif aug_type == 'deletion':\n return random_deletion(text)\n elif aug_type == 'permutation':\n return random_permutation(text)"
},
{
"identifier": "prompt_monolingual",
"path": "prompt_utils.py",
"snippet": "def prompt_monolingual(src_text, tgt_text, src_lang, is_encoder_decoder):\n prompt = random.choice(MONOLINGUAL_PROMPTS)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang) \n if is_encoder_decoder:\n prompt = prompt.replace('[TARGET_TEXT]', '')\n return (prompt, tgt_text)\n else:\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n return (prompt, prompt)"
},
{
"identifier": "prompt_translation",
"path": "prompt_utils.py",
"snippet": "def prompt_translation(src_text, tgt_text, src_lang, tgt_lang, is_encoder_decoder):\n prompt = random.choice(TRANSLATION_PROMPTS)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang)\n prompt = prompt.replace('[TARGET_LANG]', tgt_lang)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n if is_encoder_decoder:\n prompt = prompt.replace('[TARGET_TEXT]', '')\n return (prompt, tgt_text)\n else:\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n return (prompt, prompt)"
},
{
"identifier": "prompt_xss",
"path": "prompt_utils.py",
"snippet": "def prompt_xss(src_text, tgt_text, src_lang, tgt_lang, label, is_encoder_decoder):\n prompt = random.choice(XSS_PROMPTS)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang)\n prompt = prompt.replace('[TARGET_LANG]', tgt_lang)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n if is_encoder_decoder:\n prompt = prompt.replace('[LABEL]', '')\n return (prompt, label)\n else:\n prompt = prompt.replace('[LABEL]', label)\n return (prompt, prompt)"
},
{
"identifier": "prompt_bilingual",
"path": "prompt_utils.py",
"snippet": "def prompt_bilingual(src_text, con_text, tgt_text, src_lang, con_lang, is_encoder_decoder):\n prompt = random.choice(BILINGUAL_PROMPTS)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang)\n prompt = prompt.replace('[CONTEXT_LANG]', con_lang)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n prompt = prompt.replace('[CONTEXT]', con_text)\n if is_encoder_decoder:\n prompt = prompt.replace('[TARGET_TEXT]', '')\n return (prompt, tgt_text)\n else:\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n return (prompt, prompt)"
}
] | import logging
import os
import sys
import random
import numpy as np
import pandas as pd
import torch
import transformers
import datasets
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorWithPadding,
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from peft import prepare_model_for_int8_training
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from data_utils import load_flores_datasets, load_rehearsal_dataset
from augmentation_utils import do_augment
from prompt_utils import prompt_monolingual, prompt_translation, prompt_xss, prompt_bilingual | 3,399 | "than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
num_beams: Optional[int] = field(
default=1,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
augmentation_type: str = field(
default='monolingual',
metadata={
"help": "Mode for data augmentation (monolingual / translation / bilingual / random)."
},
)
continual_type: str = field(
default=None,
metadata={
"help": "Mode for continual learning method (rehearsal / None)."
},
)
continual_size: int = field(
default=100,
metadata={
"help": "Mode for data (monolingual / translation / bilingual / random)."
},
)
num_train_ratio: float = field(
default=1.0,
metadata={
"help": "Number of samples to be taken from FLORES"
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the datasets
| #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
num_beams: Optional[int] = field(
default=1,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
augmentation_type: str = field(
default='monolingual',
metadata={
"help": "Mode for data augmentation (monolingual / translation / bilingual / random)."
},
)
continual_type: str = field(
default=None,
metadata={
"help": "Mode for continual learning method (rehearsal / None)."
},
)
continual_size: int = field(
default=100,
metadata={
"help": "Mode for data (monolingual / translation / bilingual / random)."
},
)
num_train_ratio: float = field(
default=1.0,
metadata={
"help": "Number of samples to be taken from FLORES"
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the datasets | raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn'], augmentation=data_args.augmentation_type, num_train_ratio=data_args.num_train_ratio) | 0 | 2023-10-24 07:46:05+00:00 | 4k |
acolas1/KGSimple | eval_KGSimp/eval_baselines.py | [
{
"identifier": "args",
"path": "cli.py",
"snippet": ""
},
{
"identifier": "SaliencyBERTScore",
"path": "scoring/saliency_scorer.py",
"snippet": "class SaliencyBERTScore:\n def __init__(self, lmscorer = \"bertscore\", lang=\"en\"):\n self.bertscore = evaluate.load(lmscorer)\n self.lang = lang\n\n\n def calc_BERT_score(self, predictions, references, sigmoid):\n results = self.bertscore.compute(predictions=predictions, references=references, lang=self.lang)\n if sigmoid:\n results = expit(results)\n return results\n\n def score_batched(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score(generated_text, source_text, sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}\n\n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score([generated_text], [source_text], sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}"
},
{
"identifier": "FluencyScorer",
"path": "scoring/fluency_scorer.py",
"snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.batch_size = batch_size\n self.reduce = reduce\n self.log = log\n self.laplace_smooth = laplace_smooth\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n self.scorer = LMScorer.from_pretrained(\"gpt2\", device=self.device, batch_size=batch_size)\n self.idf_df = pd.read_csv(prob_dict_path, ',', encoding='utf-8')\n self.freq_dict = pd.Series((self.idf_df.frequency.values), index=self.idf_df.token).to_dict()\n self.num_tokens = self.idf_df.total.values[0] \n \n def unigram_score(self, sentences):\n if self.freq_dict is None:\n raise Exception(\"Probability dictionary is not defined.\") \n unigram_scores = []\n for sent in sentences:\n unigram_prob = 1\n for token in word_tokenize(sent.lower()):\n if token in self.freq_dict:\n if self.laplace_smooth:\n curr_unigram_prob = (self.freq_dict[token]+1)/(self.num_tokens+len(self.freq_dict))\n else:\n curr_unigram_prob = self.freq_dict[token]/self.num_tokens\n \n \n\n else:\n if self.laplace_smooth:\n curr_unigram_prob = (1/(self.num_tokens+len(self.freq_dict)))\n else:\n curr_unigram_prob = 1\n # unigram_prob += curr_unigram_prob\n \n \n if self.log:\n unigram_prob +=np.log(curr_unigram_prob)\n else:\n unigram_prob *= curr_unigram_prob\n uni_score = unigram_prob/len(word_tokenize(sent))\n unigram_scores.append(uni_score)\n return unigram_scores\n \n def SLOR_score(self, sentence_list, lm_score, unigram_score):\n SLOR_scores = []\n for i in range(len(sentence_list)):\n SLOR_score = lm_score[i]-unigram_score[i]\n if self.log:\n SLOR_score = math.exp(lm_score[i]-unigram_score[i])\n SLOR_scores.append(SLOR_score)\n return SLOR_scores\n \n def score_batched(self, generated_texts, source_texts=None, printing=False, **kwargs):\n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_texts:\n sources_lm_prob_scores = self.scorer.sentence_score(source_texts, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_texts)\n sources_SLOR_score = self.SLOR_score(source_texts, sources_lm_prob_scores, sources_unigram_scores)\n\n\n\n generateds_lm_prob_scores = self.scorer.sentence_score(generated_texts, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generated_texts)\n generateds_SLOR_score = self.SLOR_score(generated_texts, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_texts)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_texts)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}\n\n def score(self, generated_text, source_text=None, printing=False, **kwargs):\n # sources_lm_prob_score = scorer.sentence_score(source_list, reduce=\"mean\")\n \n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_text:\n source_list = [source_text]\n sources_lm_prob_scores = self.scorer.sentence_score(source_list, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_list)\n sources_SLOR_score = self.SLOR_score(source_list, sources_lm_prob_scores, sources_unigram_scores)\n \n \n \n generateds_list = [generated_text]\n generateds_lm_prob_scores = self.scorer.sentence_score(generateds_list, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generateds_list)\n generateds_SLOR_score = self.SLOR_score(generateds_list, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_text)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_text)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}"
}
] | import os
import sys
import logging
import random
import numpy as np
import torch
import pandas as pd
import stanza
import sacrebleu.tokenizers.tokenizer_13a as tok
from ast import literal_eval
from eval_utils import *
from eval_batched import *
from cli import args
from scoring.saliency_scorer import SaliencyBERTScore
from scoring.fluency_scorer import FluencyScorer
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
| 1,745 | #### read in result files, format, run eval functions
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# setting path
sys.path.append('../../')
sys.path.append('/blue/daisyw/acolas1/KGSimplification/')
def eval():
| #### read in result files, format, run eval functions
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# setting path
sys.path.append('../../')
sys.path.append('/blue/daisyw/acolas1/KGSimplification/')
def eval():
| eval_mod = args.eval_mod ## model + eval type
| 0 | 2023-10-24 13:24:23+00:00 | 4k |
yuanxy92/DANTE | train_electric_optical_kernel.py | [
{
"identifier": "train_complex",
"path": "optical_layer.py",
"snippet": "def train_complex(label_nopad, folder_prefix, epoch, whole_dim, phase_dim, wave_lambda, focal_length, pixel_size, compute_loss_region, factor):\n image = np.zeros((1, whole_dim, whole_dim))\n image[0, whole_dim//2, whole_dim//2] = 1\n image = torch.tensor(image, dtype=torch.complex64)\n label = padding(label_nopad, whole_dim)\n\n loss_slice = slice(whole_dim//2-compute_loss_region//2, whole_dim//2+compute_loss_region//2)\n\n def cropped_loss(output, target):\n diff = (output-target)[:, loss_slice, loss_slice]\n return torch.mean(torch.abs(diff)**2)\n\n onn = FourierConvComplex(whole_dim, phase_dim, pixel_size, focal_length, wave_lambda)\n onn, image, label = onn.cuda(), image.cuda(), label.cuda()\n optimizer = torch.optim.Adam(onn.parameters(), lr=0.5)\n # scheduler = LambdaLR(optimizer, lr_lambda=adjust_lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode='min', factor=0.5, patience=150)\n\n now = time.strftime('%m%d_%H%M%S', time.localtime(time.time()))\n folder = folder_prefix + now\n if not os.path.exists(folder):\n os.makedirs(folder)\n os.makedirs(folder+'/results')\n shutil.copy(__file__, folder)\n params = {\n 'whole_dim': whole_dim,\n 'phase_dim': phase_dim,\n 'wave_lambda': wave_lambda,\n 'focal_length': focal_length,\n 'pixel_size': pixel_size,\n 'compute_loss_region': compute_loss_region,\n 'factor': factor\n }\n torch.save(params, folder+'/params.pt')\n\n logging_file = folder+\"/Fitting_loss.log\"\n log_f = open(logging_file, 'w')\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n for epoch in range(1, epoch+1):\n # Compute prediction and loss\n pred = onn(image*factor)\n loss = cropped_loss(pred, label*factor)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss = loss.item()\n scheduler.step(loss)\n\n log_f.write(f\"epoch: {epoch} loss: {loss:>8f} Current learning rate: {optimizer.param_groups[0]['lr']}\\r\\n\")\n\n if epoch % 3000 == 0:\n print(f\"epoch: {epoch} loss: {loss:>8f} Current learning rate: {optimizer.param_groups[0]['lr']}\")\n print(f\"Saving results for epoch {epoch}\")\n end.record()\n torch.cuda.synchronize()\n print(\n f'Running time for 3000 epoch: {start.elapsed_time(end) / 1000} s')\n\n # print(f\"Learning rate: {scheduler.get_lr()}\")\n temp_phase = onn.phase.w_p.cpu().detach().numpy()\n temp_scalar = onn.w_scalar.cpu().detach().numpy()\n weights_dict = {\n 'phase': temp_phase,\n 'scalar': temp_scalar,\n 'pred': pred,\n 'label': label\n }\n torch.save(weights_dict, folder +\n '/results/weights_%05d.pth' % epoch)\n pred_show = torch.abs(pred).cpu().detach().numpy()[\n 0, loss_slice, loss_slice]\n label_show = (torch.abs(label).cpu().detach().numpy()[\n 0, loss_slice, loss_slice])*factor\n plt.subplot(131)\n plt.imshow(pred_show)\n plt.colorbar()\n plt.subplot(132)\n plt.imshow(label_show)\n plt.colorbar()\n plt.subplot(133)\n plt.imshow(pred_show-label_show)\n plt.colorbar()\n plt.savefig(folder+'/results/output_%05d.png' % epoch)\n\n if loss < 0.01:\n break\n start.record()\n \n log_f.close()\n return folder, loss"
},
{
"identifier": "padding",
"path": "utils.py",
"snippet": "def padding(array, whole_dim):\n # pad square array\n array_size = array.shape[-1]\n pad_size1 = (whole_dim-array_size)//2\n pad_size2 = whole_dim-array_size-pad_size1\n array_pad = F.pad(array, (pad_size1, pad_size2, pad_size1, pad_size2))\n return array_pad"
},
{
"identifier": "tile_kernels",
"path": "utils.py",
"snippet": "def tile_kernels(array, numx, numy):\n # array should be [C*M*N]\n temp_list = []\n for i in range(numx):\n temp_list.append(torch.cat(\n [array[i*numx+j] for j in range(numy)], -1))\n\n newarray = torch.cat(temp_list, -2)\n return newarray"
}
] | import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import os
import os.path
import time
import torch
import math
import platform
import torchsummary
import logging
import math
from optical_layer import train_complex
from utils import padding, tile_kernels
from importlib.machinery import SourceFileLoader | 2,523 | elif platform.system().lower() == 'linux':
server_dir = '/data/xiaoyun/Elec-Opt-D2NN/'
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
device_cpu = torch.device('cpu')
device_gpu = torch.device('cuda:0')
model_idx = '3bs'
whole_dim = 2000
phase_dim = 1200
wave_lambda = 532e-9
focal_length = 14.5e-2
pixel_size = 8e-6
factor = 100
train_epoch = 12000
dataset_input_shape = (3, 32, 32) # for cifar-10 and imagenet32 dataset
fc_in = 324
fc_out = 10
################################################################################
########## fashionmnist and cifar-10 ONN #################
################################################################################
# load network
if model_idx == '3bs':
# Three layers best model
folder_to_fit = server_dir + 'results_nc/cifar_3layers_best_0713_091820'
test_epoch = 190
foo = SourceFileLoader(
"a", folder_to_fit+"/electric_network.py").load_module()
CNN = foo.CNN_complex_3layers_best_small
################################################################################
########## Load ONN, start to fit ############
################################################################################
with torch.no_grad():
net = CNN(fc_in, fc_out)
net.load_state_dict(torch.load(
folder_to_fit + '/models/%03d.pth' % test_epoch))
net.eval()
net_gpu = net.to(device_gpu)
# net = net.to(device_cpu)
netsummary = torchsummary.summary(net_gpu, dataset_input_shape)
now = time.strftime('%m%d_%H%M%S', time.localtime(time.time()))
logging_file = folder_to_fit+"/Elec_to_optical_fitting__" + now + ".log"
if os.path.isfile(logging_file):
os.remove(logging_file)
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[
logging.FileHandler(logging_file),
logging.StreamHandler()
])
layer_idx = 0
for layer_key, layer_value in netsummary.items():
if layer_key.startswith('ComplexConv2d'):
# get parameters of layers
num_group = layer_value['groups']
num_padding = layer_value['padding']
output_shape_total = layer_value['output_shape']
input_shape_total = layer_value['input_shape']
complex_kernel_total = layer_value['complex_kernel'].cpu().detach()
w = layer_value['w'].cpu().detach()
for group_idx in range(num_group):
# re-calculate shape
input_shape = (input_shape_total[0], int(
input_shape_total[1] / num_group), input_shape_total[2], input_shape_total[3])
output_shape = (output_shape_total[0], int(
output_shape_total[1] / num_group), output_shape_total[2], output_shape_total[3])
kernel_out_step = int(
complex_kernel_total.shape[0] / num_group)
complex_kernel = complex_kernel_total[group_idx * kernel_out_step:(
group_idx + 1) * kernel_out_step, :, :, :]
# get required size and padding kernel size
kernel_shape = complex_kernel.shape
imgsize = input_shape[2] + 2*num_padding
psf = padding(complex_kernel, imgsize)
psf_shape = psf.shape
print(input_shape, kernel_shape, psf_shape)
# get input and output channels
input_ch_num = input_shape[1]
output_ch_num = output_shape[1]
input_ch_len = int(math.ceil(math.sqrt(input_ch_num)))
output_ch_len = int(math.ceil(math.sqrt(output_ch_num)))
# padding kernel channels to square
if input_ch_len ** 2 - psf_shape[1] > 0:
psf_padding = torch.zeros(
psf_shape[0], input_ch_len ** 2 - psf_shape[1], psf_shape[2], psf_shape[3])
psf = torch.cat((psf, psf_padding), dim=1)
psf_shape = psf.shape
if output_ch_len ** 2 - psf_shape[0] > 0:
psf_padding = torch.zeros(
output_ch_len ** 2 - psf_shape[0], psf_shape[1], psf_shape[2], psf_shape[3])
psf = torch.cat((psf, psf_padding), dim=0)
psf_shape = psf.shape
# tile kernels
psf = torch.transpose(psf, 0, 1)
psf = tile_kernels(psf, input_ch_len, input_ch_len)
psf_to_fit = tile_kernels(
psf, output_ch_len, output_ch_len).unsqueeze(0).detach()
psf_fitting_size = psf_to_fit.shape[1] + psf.shape[1]
# rounded_size = int(math.ceil(psf_fitting_size / 50) * 50)
rounded_size = psf_fitting_size
# print layer fit
print(
f'Fitting layer {layer_key}, group {group_idx}, psf {tuple(psf_to_fit.shape)}, fitting size {psf_fitting_size}, rounded size {rounded_size}')
# start to fit
compute_loss_region = psf_fitting_size # rounded_size
out_layer_name = layer_key + '_group_%d_' % group_idx
folder_prefix = folder_to_fit + '/' + out_layer_name
print('Fitting layer %s group %d ...' % (layer_key, group_idx))
with torch.enable_grad():
| # -*- coding: utf-8 -*-
########################################################################
if platform.system().lower() == 'windows':
server_dir = './'
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
elif platform.system().lower() == 'linux':
server_dir = '/data/xiaoyun/Elec-Opt-D2NN/'
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
device_cpu = torch.device('cpu')
device_gpu = torch.device('cuda:0')
model_idx = '3bs'
whole_dim = 2000
phase_dim = 1200
wave_lambda = 532e-9
focal_length = 14.5e-2
pixel_size = 8e-6
factor = 100
train_epoch = 12000
dataset_input_shape = (3, 32, 32) # for cifar-10 and imagenet32 dataset
fc_in = 324
fc_out = 10
################################################################################
########## fashionmnist and cifar-10 ONN #################
################################################################################
# load network
if model_idx == '3bs':
# Three layers best model
folder_to_fit = server_dir + 'results_nc/cifar_3layers_best_0713_091820'
test_epoch = 190
foo = SourceFileLoader(
"a", folder_to_fit+"/electric_network.py").load_module()
CNN = foo.CNN_complex_3layers_best_small
################################################################################
########## Load ONN, start to fit ############
################################################################################
with torch.no_grad():
net = CNN(fc_in, fc_out)
net.load_state_dict(torch.load(
folder_to_fit + '/models/%03d.pth' % test_epoch))
net.eval()
net_gpu = net.to(device_gpu)
# net = net.to(device_cpu)
netsummary = torchsummary.summary(net_gpu, dataset_input_shape)
now = time.strftime('%m%d_%H%M%S', time.localtime(time.time()))
logging_file = folder_to_fit+"/Elec_to_optical_fitting__" + now + ".log"
if os.path.isfile(logging_file):
os.remove(logging_file)
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[
logging.FileHandler(logging_file),
logging.StreamHandler()
])
layer_idx = 0
for layer_key, layer_value in netsummary.items():
if layer_key.startswith('ComplexConv2d'):
# get parameters of layers
num_group = layer_value['groups']
num_padding = layer_value['padding']
output_shape_total = layer_value['output_shape']
input_shape_total = layer_value['input_shape']
complex_kernel_total = layer_value['complex_kernel'].cpu().detach()
w = layer_value['w'].cpu().detach()
for group_idx in range(num_group):
# re-calculate shape
input_shape = (input_shape_total[0], int(
input_shape_total[1] / num_group), input_shape_total[2], input_shape_total[3])
output_shape = (output_shape_total[0], int(
output_shape_total[1] / num_group), output_shape_total[2], output_shape_total[3])
kernel_out_step = int(
complex_kernel_total.shape[0] / num_group)
complex_kernel = complex_kernel_total[group_idx * kernel_out_step:(
group_idx + 1) * kernel_out_step, :, :, :]
# get required size and padding kernel size
kernel_shape = complex_kernel.shape
imgsize = input_shape[2] + 2*num_padding
psf = padding(complex_kernel, imgsize)
psf_shape = psf.shape
print(input_shape, kernel_shape, psf_shape)
# get input and output channels
input_ch_num = input_shape[1]
output_ch_num = output_shape[1]
input_ch_len = int(math.ceil(math.sqrt(input_ch_num)))
output_ch_len = int(math.ceil(math.sqrt(output_ch_num)))
# padding kernel channels to square
if input_ch_len ** 2 - psf_shape[1] > 0:
psf_padding = torch.zeros(
psf_shape[0], input_ch_len ** 2 - psf_shape[1], psf_shape[2], psf_shape[3])
psf = torch.cat((psf, psf_padding), dim=1)
psf_shape = psf.shape
if output_ch_len ** 2 - psf_shape[0] > 0:
psf_padding = torch.zeros(
output_ch_len ** 2 - psf_shape[0], psf_shape[1], psf_shape[2], psf_shape[3])
psf = torch.cat((psf, psf_padding), dim=0)
psf_shape = psf.shape
# tile kernels
psf = torch.transpose(psf, 0, 1)
psf = tile_kernels(psf, input_ch_len, input_ch_len)
psf_to_fit = tile_kernels(
psf, output_ch_len, output_ch_len).unsqueeze(0).detach()
psf_fitting_size = psf_to_fit.shape[1] + psf.shape[1]
# rounded_size = int(math.ceil(psf_fitting_size / 50) * 50)
rounded_size = psf_fitting_size
# print layer fit
print(
f'Fitting layer {layer_key}, group {group_idx}, psf {tuple(psf_to_fit.shape)}, fitting size {psf_fitting_size}, rounded size {rounded_size}')
# start to fit
compute_loss_region = psf_fitting_size # rounded_size
out_layer_name = layer_key + '_group_%d_' % group_idx
folder_prefix = folder_to_fit + '/' + out_layer_name
print('Fitting layer %s group %d ...' % (layer_key, group_idx))
with torch.enable_grad(): | folder_fitting, loss = train_complex(psf_to_fit, folder_prefix, train_epoch, whole_dim, phase_dim, wave_lambda, focal_length, pixel_size, compute_loss_region, factor) | 0 | 2023-10-19 10:42:47+00:00 | 4k |
CAMeL-Lab/camel_parser | src/data_preparation.py | [
{
"identifier": "ConllParams",
"path": "src/classes.py",
"snippet": "class ConllParams:\n file_path: str\n parse_model_path: str\n \n def __iter__(self):\n return iter(astuple(self))"
},
{
"identifier": "TextParams",
"path": "src/classes.py",
"snippet": "class TextParams:\n lines: List[str]\n parse_model_path: str\n arclean: CharMapper\n disambiguator_param: Union[BERTUnfactoredDisambiguator, MLEDisambiguator, str]\n clitic_feats_df: pd.DataFrame\n tagset: str\n morphology_db_type: str\n \n def __iter__(self):\n return iter(astuple(self))"
},
{
"identifier": "PreprocessedTextParams",
"path": "src/classes.py",
"snippet": "class PreprocessedTextParams:\n lines: List[str]\n parse_model_path: str\n disambiguator: Union[BERTUnfactoredDisambiguator, MLEDisambiguator, str]\n clitic_feats_df: pd.DataFrame\n tagset: str\n morphology_db_type: str\n \n def __iter__(self):\n return iter(astuple(self))"
},
{
"identifier": "TokenizedParams",
"path": "src/classes.py",
"snippet": "class TokenizedParams:\n lines: List[str]\n parse_model_path: str"
},
{
"identifier": "TokenizedTaggedParams",
"path": "src/classes.py",
"snippet": "class TokenizedTaggedParams:\n lines: List[str]\n parse_model_path: str"
},
{
"identifier": "parse_conll",
"path": "src/dependency_parser/biaff_parser.py",
"snippet": "def parse_conll(conll_path: str, parse_model) -> List[List[tuple]]:\n conll = parse(conll_path, parse_model=parse_model)\n for i, sent in enumerate(conll):\n conll[i].values[1] = [filter_tatweel(form) for form in sent.values[1]]\n return parser_conll_to_conll_tuples(conll)"
},
{
"identifier": "parse_text_tuples",
"path": "src/dependency_parser/biaff_parser.py",
"snippet": "def parse_text_tuples(sentence_tuples: List[List[tuple]], parse_model) -> List[List[tuple]]:\n sentence_tuples = [[val[1:4] for val in sent] for sent in sentence_tuples]\n form_lemma_pos_tuple = [[(filter_tatweel(dediac_ar(val[0])), filter_tatweel(dediac_ar(val[1])), val[2]) for val in sent] for sent in sentence_tuples]\n conll = parse(form_lemma_pos_tuple, parse_model=parse_model)\n return parser_conll_to_conll_tuples(conll)"
},
{
"identifier": "get_disambiguator",
"path": "src/initialize_disambiguator/disambiguator_interface.py",
"snippet": "@log\ndef get_disambiguator(model_name: str, morphology_db: str) -> Union[MLEDisambiguatorAdapter, BERTUnfactoredDisambiguator]:\n analyzer = set_up_analyzer(morphology_db)\n \n if model_name == 'mle':\n model = MLEDisambiguatorAdapter(analyzer)\n elif model_name == 'bert':\n model = create_bert_disambiguator(analyzer)\n else:\n raise ValueError('Invalid model')\n \n return model"
},
{
"identifier": "to_sentence_analysis_list",
"path": "src/parse_disambiguation/disambiguation_analysis.py",
"snippet": "def to_sentence_analysis_list(disambiguated_sentences: List[List[DisambiguatedWord]]) -> List[List[Token]]:\n return [get_sentence_analysis(disambiguated_sentence, 'top') for disambiguated_sentence in disambiguated_sentences]"
},
{
"identifier": "to_conll_fields_list",
"path": "src/parse_disambiguation/feature_extraction.py",
"snippet": "def to_conll_fields_list(sentence_analysis_list: List[List[dict]], clitic_feats, tagset):\n sentence_features_list = []\n \n for sentence_analysis in sentence_analysis_list:\n sentence_features = {'tokens': [], 'lemmas': [], 'pos_tags': [], 'feats': []}\n for word_analysis in sentence_analysis:\n word_features_df = get_word_features_df(word_analysis, clitic_feats)\n \n word_features = join_feats(word_features_df, tagset)\n sentence_features = update_sentence_features(sentence_features, word_features)\n token_list = build_token_list(sentence_features)\n sentence_features_list.append(token_list)\n \n return sentence_features_list"
},
{
"identifier": "clean_lines",
"path": "src/utils/text_cleaner.py",
"snippet": "def clean_lines(lines, arclean):\n return [clean_line(line, arclean) for line in lines]"
},
{
"identifier": "split_lines_words",
"path": "src/utils/text_cleaner.py",
"snippet": "def split_lines_words(lines):\n return [line.strip().split() for line in lines]"
},
{
"identifier": "log",
"path": "src/logger.py",
"snippet": "def log(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n \n with open(log_path, 'a') as f:\n f.write(f'{map_function_to_phrase(func.__name__)}: {round(end_time - start_time, 2)}s\\n')\n return result\n except Exception as e:\n logger.exception(f\"Exception raised in {func.__name__}. exception: {str(e)}\")\n raise e\n\n return wrapper"
}
] | import re
import pandas as pd
from typing import List, Union
from camel_tools.disambig.common import DisambiguatedWord
from src.classes import ConllParams, TextParams, PreprocessedTextParams, TokenizedParams, TokenizedTaggedParams
from src.dependency_parser.biaff_parser import parse_conll, parse_text_tuples
from src.initialize_disambiguator.disambiguator_interface import get_disambiguator
from src.parse_disambiguation.disambiguation_analysis import to_sentence_analysis_list
from src.parse_disambiguation.feature_extraction import to_conll_fields_list
from src.utils.text_cleaner import clean_lines, split_lines_words
from src.logger import log | 2,083 |
FileTypeParams = Union[ConllParams, TextParams, PreprocessedTextParams, TokenizedParams, TokenizedTaggedParams]
def get_feats_from_text_tuples(text_tuples: List[List[tuple]]) -> List[List[str]]:
"""Extract the FEATS columns from the unparsed data.
FEATS will exist only for text and pre-processed text inputs.
Args:
text_tuples (List[List[tuple]]): unparsed data
Returns:
List[List[str]]: the FEATS column (or _ if it does not exist)
"""
try:
return [[col_items[5] for col_items in tup_list] for tup_list in text_tuples]
except Exception as e:
print(e)
print('Not enough elements in tuple.')
def add_feats(text_tuples: List[List[tuple]], text_feats: List[List[str]]) -> List[List[tuple]]:
"""Add FEATS data to the text tuples.
The parent list (text_tuples) is a list of sentences.
Each sentence is a list of tuples.
Each tuple represents a token.
Args:
text_tuples (List[List[tuple]]): list of list of tuples
text_feats (List[List[str]]): list of list of FEATS
Returns:
List[List[tuple]]: text_tuples but with the FEATS column filled
"""
text_tuples_with_feats = []
for sentence_tuples, sentence_feats in zip(text_tuples, text_feats):
# get first 5 and last 4 items from parsed tuple using lists, and add features.
# Convert the list of fields to a tuple
merged_tuples = [
tuple(list(token_tuple[:5]) + [token_feats] + list(token_tuple[6:]))
for token_tuple, token_feats in zip(sentence_tuples, sentence_feats)
]
text_tuples_with_feats.append(merged_tuples)
return text_tuples_with_feats
def string_to_tuple_list(string_of_tuples: str) -> List[tuple[str, str]]:
"""Take a string of space-separated tuples and convert it to a tuple list.
Example input: '(جامعة, NOM) (نيويورك, PROP)'
Example output: [(جامعة, NOM), (نيويورك, PROP)]
Args:
string_of_tuples (str): string of tuples
Returns:
List(tuple[str, str]): list of token-pos tuple pairs
"""
sentence_tuples = []
# split on space, and using positive lookbehind and lookahead
# to detect parentheses around the space
for tup in re.split(r'(?<=\)) (?=\()', string_of_tuples.strip()):
# tup = (جامعة, NOM)
tup_items = tup[1:-1] # removes parens
form = (','.join(tup_items.split(',')[:-1])).strip() # account for comma tokens
pos = (tup_items.split(',')[-1]).strip()
sentence_tuples.append((form, pos))
return sentence_tuples
def get_tree_tokens(tok_pos_tuples):
sentences = []
for sentence_tuples in tok_pos_tuples:
sentence = ' '.join([tok_pos_tuple[0] for tok_pos_tuple in sentence_tuples])
sentences.append(sentence)
return sentences
def handle_conll(file_type_params):
file_path, parse_model_path = file_type_params
# pass the path to the text file and the model path and name, and get the tuples
return parse_conll(file_path, parse_model=parse_model_path)
def handle_preprocessed_text(file_type_params):
lines, _, disambiguator_param, clitic_feats_df, tagset, morphology_db_type = file_type_params
|
FileTypeParams = Union[ConllParams, TextParams, PreprocessedTextParams, TokenizedParams, TokenizedTaggedParams]
def get_feats_from_text_tuples(text_tuples: List[List[tuple]]) -> List[List[str]]:
"""Extract the FEATS columns from the unparsed data.
FEATS will exist only for text and pre-processed text inputs.
Args:
text_tuples (List[List[tuple]]): unparsed data
Returns:
List[List[str]]: the FEATS column (or _ if it does not exist)
"""
try:
return [[col_items[5] for col_items in tup_list] for tup_list in text_tuples]
except Exception as e:
print(e)
print('Not enough elements in tuple.')
def add_feats(text_tuples: List[List[tuple]], text_feats: List[List[str]]) -> List[List[tuple]]:
"""Add FEATS data to the text tuples.
The parent list (text_tuples) is a list of sentences.
Each sentence is a list of tuples.
Each tuple represents a token.
Args:
text_tuples (List[List[tuple]]): list of list of tuples
text_feats (List[List[str]]): list of list of FEATS
Returns:
List[List[tuple]]: text_tuples but with the FEATS column filled
"""
text_tuples_with_feats = []
for sentence_tuples, sentence_feats in zip(text_tuples, text_feats):
# get first 5 and last 4 items from parsed tuple using lists, and add features.
# Convert the list of fields to a tuple
merged_tuples = [
tuple(list(token_tuple[:5]) + [token_feats] + list(token_tuple[6:]))
for token_tuple, token_feats in zip(sentence_tuples, sentence_feats)
]
text_tuples_with_feats.append(merged_tuples)
return text_tuples_with_feats
def string_to_tuple_list(string_of_tuples: str) -> List[tuple[str, str]]:
"""Take a string of space-separated tuples and convert it to a tuple list.
Example input: '(جامعة, NOM) (نيويورك, PROP)'
Example output: [(جامعة, NOM), (نيويورك, PROP)]
Args:
string_of_tuples (str): string of tuples
Returns:
List(tuple[str, str]): list of token-pos tuple pairs
"""
sentence_tuples = []
# split on space, and using positive lookbehind and lookahead
# to detect parentheses around the space
for tup in re.split(r'(?<=\)) (?=\()', string_of_tuples.strip()):
# tup = (جامعة, NOM)
tup_items = tup[1:-1] # removes parens
form = (','.join(tup_items.split(',')[:-1])).strip() # account for comma tokens
pos = (tup_items.split(',')[-1]).strip()
sentence_tuples.append((form, pos))
return sentence_tuples
def get_tree_tokens(tok_pos_tuples):
sentences = []
for sentence_tuples in tok_pos_tuples:
sentence = ' '.join([tok_pos_tuple[0] for tok_pos_tuple in sentence_tuples])
sentences.append(sentence)
return sentences
def handle_conll(file_type_params):
file_path, parse_model_path = file_type_params
# pass the path to the text file and the model path and name, and get the tuples
return parse_conll(file_path, parse_model=parse_model_path)
def handle_preprocessed_text(file_type_params):
lines, _, disambiguator_param, clitic_feats_df, tagset, morphology_db_type = file_type_params
| token_lines = split_lines_words(lines) | 11 | 2023-10-21 10:39:28+00:00 | 4k |
aiueola/neurips2023-future-dependent-ope | src/ope/value_based.py | [
{
"identifier": "DiscreteStateLSTMVfunction",
"path": "src/ope/v_func.py",
"snippet": "class DiscreteStateLSTMVfunction(nn.Module):\n def __init__(\n self,\n n_states: int = 500,\n n_actions: int = 6,\n memory_length: int = 0,\n future_length: int = 0,\n lstm_hidden_dim: int = 10,\n linear_hidden_dim: int = 100,\n ):\n super().__init__()\n self.n_states = n_states\n self.n_actions = n_actions\n self.is_memory_dependent = memory_length > 0\n self.is_future_dependent = future_length > 0\n\n self.lstm = nn.LSTM(\n (n_states * n_actions),\n lstm_hidden_dim,\n batch_first=True,\n bidirectional=True,\n )\n\n self.history_based_state_predictor = nn.Linear(lstm_hidden_dim * 2, n_states)\n self.memory_based_state_predictor = nn.Linear(lstm_hidden_dim * 2, n_states)\n self.future_based_state_predictor = nn.Linear(lstm_hidden_dim * 2, n_states)\n\n self.fc1 = nn.Linear(\n n_states * (1 + self.is_memory_dependent + self.is_future_dependent),\n linear_hidden_dim,\n )\n self.fc2 = nn.Linear(linear_hidden_dim, 1)\n\n self.ce_loss = nn.CrossEntropyLoss()\n\n def _encode_state(\n self,\n state: torch.Tensor,\n ):\n return F.one_hot(state, num_classes=self.n_states)\n\n def _encode_state_action(\n self,\n states: torch.Tensor,\n actions: torch.Tensor,\n ):\n indexes = states * self.n_actions + actions\n return F.one_hot(indexes, num_classes=self.n_states * self.n_actions)\n\n def _encode_sequence(\n self,\n states: torch.Tensor,\n actions: torch.Tensor,\n ):\n input = self._encode_state_action(states, actions).to(torch.float32)\n out = self.lstm(input)[1][0]\n return torch.cat([out[0], out[1]], dim=1)\n\n def _predict_state(\n self,\n states: torch.Tensor,\n actions: torch.Tensor,\n input_type: str,\n ):\n input = self._encode_sequence(states, actions)\n\n if input_type == \"history\":\n x = self.history_based_state_predictor(input)\n elif input_type == \"memory\":\n x = self.memory_based_state_predictor(input)\n else:\n x = self.future_based_state_predictor(input)\n return F.softmax(x)\n\n def forward(\n self,\n state: torch.Tensor, # O\n memory_states: Optional[torch.Tensor] = None, # Z\n memory_actions: Optional[torch.Tensor] = None, # Z\n future_states: Optional[torch.Tensor] = None, # F \\ O\n future_actions: Optional[torch.Tensor] = None, # F\n ):\n state = self._encode_state(state).to(torch.float32)\n\n if self.is_memory_dependent:\n with torch.no_grad():\n state_ = self._predict_state(\n memory_states, memory_actions, input_type=\"memory\"\n )\n state = torch.cat((state, state_), dim=1)\n\n if self.is_future_dependent:\n with torch.no_grad():\n state_ = self._predict_state(\n future_states, future_actions, input_type=\"future\"\n )\n state = torch.cat((state, state_), dim=1)\n\n x = F.relu(self.fc1(state))\n x = self.fc2(x)\n return x.squeeze()\n\n def state_prediction_loss(\n self,\n history_states: torch.Tensor, # H\n history_actions: torch.Tensor, # H\n memory_states: torch.Tensor, # M\n memory_actions: torch.Tensor, # M\n state: torch.Tensor, # O\n future_states: Optional[torch.Tensor] = None, # F \\ O\n future_actions: Optional[torch.Tensor] = None, # F\n ):\n state_history = self._predict_state(\n history_states, history_actions, input_type=\"history\"\n )\n loss = self.ce_loss(state_history, state)\n\n if self.is_memory_dependent:\n state_memory = self._predict_state(\n memory_states, memory_actions, input_type=\"memory\"\n )\n loss += self.ce_loss(state_memory, state)\n\n if self.is_future_dependent:\n state_future = self._predict_state(\n future_states, future_actions, input_type=\"future\"\n )\n loss += self.ce_loss(state_future, state)\n\n return loss"
},
{
"identifier": "ContinuousStateLSTMVfunction",
"path": "src/ope/v_func.py",
"snippet": "class ContinuousStateLSTMVfunction(nn.Module):\n def __init__(\n self,\n state_dim: int = 4,\n n_actions: int = 2,\n memory_length: int = 0,\n future_length: int = 0,\n emb_dim: int = 4,\n lstm_hidden_dim: int = 10,\n linear_hidden_dim: int = 100,\n ):\n super().__init__()\n self.state_dim = state_dim\n self.n_actions = n_actions\n self.is_memory_dependent = memory_length > 0\n self.is_future_dependent = future_length > 0\n\n # self.emb = nn.Embedding(num_embeddings=n_actions, embedding_dim=emb_dim)\n\n self.lstm = nn.LSTM(\n (state_dim + n_actions),\n lstm_hidden_dim,\n batch_first=True,\n bidirectional=True,\n )\n\n self.history_based_state_predictor = nn.Linear(lstm_hidden_dim * 2, state_dim)\n self.memory_based_state_predictor = nn.Linear(lstm_hidden_dim * 2, state_dim)\n self.future_based_state_predictor = nn.Linear(lstm_hidden_dim * 2, state_dim)\n\n # self.fc1 = nn.Linear(\n # state_dim * (1 + self.is_memory_dependent + self.is_future_dependent),\n # linear_hidden_dim,\n # )\n self.fc1 = nn.Linear(\n state_dim + (state_dim + n_actions) * (memory_length + future_length),\n linear_hidden_dim,\n )\n self.fc2 = nn.Linear(linear_hidden_dim, 1)\n\n self.mse_loss = nn.MSELoss()\n\n def _encode_state_action(\n self,\n states: torch.Tensor,\n actions: torch.Tensor,\n ):\n # action_embs = self.emb(actions)\n action_embs = F.one_hot(actions, num_classes=self.n_actions)\n return torch.cat([states, action_embs], dim=2)\n\n def _encode_sequence(\n self,\n states: torch.Tensor,\n actions: torch.Tensor,\n use_lstm: bool = False,\n ):\n input = self._encode_state_action(states, actions)\n\n if use_lstm:\n out = self.lstm(input)[1][0]\n out = torch.cat([out[0], out[1]], dim=1)\n else:\n out = input.reshape((input.shape[0], -1))\n\n return out\n\n def _predict_state(\n self,\n states: torch.Tensor,\n actions: torch.Tensor,\n input_type: str,\n ):\n input = self._encode_sequence(states, actions, use_lstm=True)\n\n if input_type == \"history\":\n x = self.history_based_state_predictor(input)\n elif input_type == \"memory\":\n x = self.memory_based_state_predictor(input)\n else:\n x = self.future_based_state_predictor(input)\n\n return F.softmax(x)\n\n def forward(\n self,\n state: torch.Tensor, # O\n memory_states: Optional[torch.Tensor] = None, # Z\n memory_actions: Optional[torch.Tensor] = None, # Z\n future_states: Optional[torch.Tensor] = None, # F \\ O\n future_actions: Optional[torch.Tensor] = None, # F\n ):\n if self.is_memory_dependent:\n with torch.no_grad():\n state_ = self._encode_sequence(\n memory_states,\n memory_actions,\n )\n state = torch.cat((state, state_), dim=1)\n\n if self.is_future_dependent:\n with torch.no_grad():\n state_ = self._encode_sequence(\n future_states,\n future_actions,\n )\n state = torch.cat((state, state_), dim=1)\n\n x = F.relu(self.fc1(state))\n x = self.fc2(x)\n return x.squeeze()\n\n def state_prediction_loss(\n self,\n history_states: torch.Tensor, # H\n history_actions: torch.Tensor, # H\n memory_states: torch.Tensor, # M\n memory_actions: torch.Tensor, # M\n state: torch.Tensor, # O\n future_states: Optional[torch.Tensor] = None, # F \\ O\n future_actions: Optional[torch.Tensor] = None, # F\n ):\n state_history = self._predict_state(\n history_states, history_actions, input_type=\"history\"\n )\n loss = self.mse_loss(state_history, state)\n\n # if self.is_memory_dependent:\n # state_memory = self._predict_state(\n # memory_states, memory_actions, input_type=\"memory\"\n # )\n # loss += self.mse_loss(state_memory, state)\n\n # if self.is_future_dependent:\n # state_future = self._predict_state(\n # future_states, future_actions, input_type=\"future\"\n # )\n # loss += self.mse_loss(state_future, state)\n\n return loss"
},
{
"identifier": "BaseNeuralValueBasedOffPolicyEstimator",
"path": "src/ope/base.py",
"snippet": "class BaseNeuralValueBasedOffPolicyEstimator(BaseValueBasedOffPolicyEstimator):\n \"\"\"Base class for neural value-based OPE estimators.\"\"\"\n\n def save(self, path: Path):\n torch.save(self.v_function.state_dict(), path)\n\n def save_learning_process(self, path: Path):\n np.save(path + \"_prediction\", self.predictions)\n np.save(path + \"_test_loss\", self.losses)\n\n def load(self, path: Path):\n self.v_function.load_state_dict(torch.load(path))\n\n def load_learning_process(self, path: Path):\n self.predictions = np.load(path + \"_prediction.npy\")\n self.losses = np.load(path + \"_test_loss.npy\")\n\n def _mu(\n self,\n memory_states: Optional[np.ndarray], # Z\n state: np.ndarray, # O\n action: np.ndarray, # A\n ):\n if state.ndim == 1:\n if memory_states is None:\n states = state.reshape((-1, 1))\n else:\n states = np.concatenate((memory_states, state.reshape((-1, 1))), axis=1)\n else:\n state_dim = state.shape[1]\n if memory_states is None:\n states = state.reshape((-1, state_dim))\n else:\n memory_length = memory_states.shape[1]\n states = np.concatenate(\n (memory_states, state.reshape((-1, 1, state_dim))), axis=1\n )\n states = states.reshape((-1, (memory_length + 1) * state_dim))\n\n behavior_policy = self.behavior_policy.calc_action_choice_prob_given_action(\n states,\n action,\n is_batch=True,\n )\n evaluation_policy = self.evaluation_policy.calc_action_choice_prob_given_action(\n states,\n action,\n is_batch=True,\n )\n return evaluation_policy / behavior_policy\n\n def _inverse(\n self,\n symmetric_matrix: torch.Tensor,\n regularization: float = 1e-50,\n ):\n symmetric_matrix = symmetric_matrix + regularization * torch.eye(\n len(symmetric_matrix)\n )\n return torch.linalg.pinv(symmetric_matrix, hermitian=True)\n\n def _gaussian_kernel(\n self,\n states: torch.Tensor, # H\n actions: Optional[torch.Tensor] = None, # H\n ):\n # (x - x') ** 2 = x ** 2 + x' ** 2 - 2 x x'\n with torch.no_grad():\n if actions is not None:\n emb = self.v_function._predict_state(\n states, actions, input_type=\"history\"\n )\n elif states.ndim == 1:\n emb = self.v_function._encode_state(states)\n else:\n emb = states\n\n x_2 = (emb**2).sum(dim=1)\n x_y = emb @ emb.T\n distance = x_2[:, None] + x_2[None, :] - 2 * x_y\n kernel = torch.exp(-distance / self.sigma)\n\n return kernel # shape (n_samples, n_samples)\n\n def _onehot_kernel(\n self,\n states: torch.Tensor, # H\n actions: Optional[torch.Tensor] = None, # H\n ):\n with torch.no_grad():\n if actions is not None:\n if states.shape[1] > 1:\n emb = self.v_function._predict_state_action(\n states, actions, input_type=\"history\"\n ) # one-hot\n else:\n emb = self.v_function._encode_state_action(states, actions)[:, 0, :]\n\n else:\n emb = self.v_function._encode_state(states) # one-hot\n\n emb = emb.to(torch.float32)\n return emb @ emb.T # shape (n_samples, n_samples)\n\n def _middle_term(\n self,\n kernel: torch.Tensor,\n ):\n n_samples = len(kernel)\n inverse_kernel = self._inverse(\n self.alpha * torch.eye(n_samples) + self.lambda_ * kernel\n )\n return kernel @ inverse_kernel # shape (n_samples, n_samples)"
}
] | from dataclasses import dataclass
from typing import Tuple, Optional, Union
from torch import optim
from sklearn.utils import check_random_state
from policy.policy import BasePolicy
from .v_func import DiscreteStateLSTMVfunction, ContinuousStateLSTMVfunction
from .base import BaseNeuralValueBasedOffPolicyEstimator
from utils import to_tensor
import torch
import numpy as np
import matplotlib.pyplot as plt | 3,512 | """Value-Based Estimator."""
@dataclass
class NeuralFutureDependentValueBasedOPE(BaseNeuralValueBasedOffPolicyEstimator):
behavior_policy: BasePolicy
evaluation_policy: BasePolicy
| """Value-Based Estimator."""
@dataclass
class NeuralFutureDependentValueBasedOPE(BaseNeuralValueBasedOffPolicyEstimator):
behavior_policy: BasePolicy
evaluation_policy: BasePolicy | v_function: Union[DiscreteStateLSTMVfunction, ContinuousStateLSTMVfunction] | 0 | 2023-10-24 06:09:37+00:00 | 4k |
JerBouma/FinancePortfolio | financeportfolio/portfolio_controller.py | [
{
"identifier": "excel_model",
"path": "financeportfolio/excel_model.py",
"snippet": "def create_portfolio_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_transactions_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_portfolio_overview_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_positions_overview_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):"
},
{
"identifier": "helpers",
"path": "financeportfolio/helpers.py",
"snippet": "BASE_URL = \"https://raw.githubusercontent.com/JerBouma/FinancePortfolio/main/\"\nVALID_CODE = 200\n RED = \"\\033[91m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n BLUE = \"\\033[94m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n RESET = \"\\033[0m\"\nclass Style:\ndef read_excel(location: str):\ndef read_yaml_file(location: str):\ndef download_example_datasets(base_url: str | None = None):\ndef download_yaml_configuration(example: bool = False, name: str | None = None):"
},
{
"identifier": "portfolio_model",
"path": "financeportfolio/portfolio_model.py",
"snippet": "CURRENCY_CODE_LENGTH = 3\r\ndef read_portfolio_dataset(\r\n excel_location: list,\r\n adjust_duplicates: bool,\r\n date_column: list[str],\r\n date_format: str,\r\n name_columns: list[str],\r\n ticker_columns: list[str],\r\n price_columns: list[str],\r\n volume_columns: list[str],\r\n column_mapping: dict[str, str],\r\n currency_columns: list[str] | str | None = None,\r\n costs_columns: list[str] | None = None,\r\n) -> tuple[pd.DataFrame, str, str, str, str, str, str]:\r\ndef format_portfolio_dataset(\r\n dataset: pd.DataFrame,\r\n date_columns: list[str],\r\n date_format: str,\r\n name_columns: list[str],\r\n tickers_columns: list[str],\r\n price_columns: list[str],\r\n volume_columns: list[str],\r\n column_mapping: dict[str, str],\r\n currency_columns: list[str] | str | None = None,\r\n costs_columns: list[str] | None = None,\r\n) -> tuple[pd.DataFrame, str, str, str, str, str, str, str]:\r\ndef create_transactions_overview(\r\n portfolio_volume: pd.Series,\r\n portfolio_price: pd.Series,\r\n portfolio_costs: pd.Series,\r\n latest_returns: pd.Series,\r\n):\r\ndef create_portfolio_overview(\r\n portfolio_name: pd.Series,\r\n portfolio_volume: pd.Series,\r\n portfolio_price: pd.Series,\r\n portfolio_costs: pd.Series,\r\n latest_returns: pd.Series,\r\n benchmark_prices: pd.Series,\r\n benchmark_latest_prices: pd.Series,\r\n):\r\ndef create_transactions_performance(\r\n portfolio_dataset: pd.DataFrame,\r\n ticker_column: str,\r\n date_column: str,\r\n volume_column: str,\r\n price_column: str,\r\n costs_column: str,\r\n period_prices: pd.DataFrame,\r\n period_string: str,\r\n original_ticker_combinations: dict,\r\n benchmark_per_ticker: dict,\r\n benchmark_specific_prices: pd.Series,\r\n benchmark_period_prices: pd.DataFrame,\r\n):\r\ndef create_positions_overview(\r\n portfolio_tickers: list[str],\r\n period_dates: pd.DatetimeIndex,\r\n portfolio_dataset: pd.DataFrame,\r\n historical_prices: pd.Series,\r\n columns: list[str] | None = None,\r\n):\r\ndef create_portfolio_performance(\r\n positions_dataset: pd.DataFrame,\r\n date_column: str,\r\n ticker_column: str,\r\n period_string: str,\r\n):\r"
}
] | import pandas as pd
from financetoolkit import Toolkit
from financeportfolio import excel_model, helpers, portfolio_model
| 3,123 |
Returns:
DataFrame: A DataFrame containing transaction performance metrics.
Raises:
ValueError: If an invalid or unsupported period_string is provided.
"""
if self._daily_historical_data.empty:
try:
self.collect_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect historical data: {error}"
) from error
if self._daily_benchmark_data.empty:
try:
self.collect_benchmark_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect benchmark historical data: {error}"
) from error
if not period:
raise ValueError(
"Please provide a period. This can be 'yearly', 'quarterly', 'monthly', 'weekly', or 'daily'"
)
period_string = period.lower()
if period_string == "yearly":
historical_dataset = self._yearly_historical_data["Adj Close"]
benchmark_dataset = self._yearly_benchmark_data["Adj Close"]
period_symbol = "Y"
elif period_string == "quarterly":
historical_dataset = self._quarterly_historical_data["Adj Close"]
benchmark_dataset = self._quarterly_benchmark_data["Adj Close"]
period_symbol = "Q"
elif period_string == "monthly":
historical_dataset = self._monthly_historical_data["Adj Close"]
benchmark_dataset = self._monthly_benchmark_data["Adj Close"]
period_symbol = "M"
elif period_string == "weekly":
historical_dataset = self._weekly_historical_data["Adj Close"]
benchmark_dataset = self._weekly_benchmark_data["Adj Close"]
period_symbol = "W"
elif period_string == "daily":
historical_dataset = self._daily_historical_data["Adj Close"]
benchmark_dataset = self._daily_benchmark_data["Adj Close"]
period_symbol = "D"
else:
raise ValueError(
"Please provide a valid period. This can be "
"'yearly', 'quarterly', 'monthly', 'weekly', "
"or 'daily'"
)
try:
self._transactions_performance = (
portfolio_model.create_transactions_performance(
portfolio_dataset=self._portfolio_dataset,
ticker_column=self._ticker_column,
date_column=self._date_column,
volume_column=self._volume_column,
price_column=self._price_column,
costs_column=self._costs_column,
period_prices=historical_dataset,
period_string=period_symbol,
original_ticker_combinations=self._original_ticker_combinations,
benchmark_per_ticker=self._benchmark_tickers,
benchmark_specific_prices=self._benchmark_specific_prices,
benchmark_period_prices=benchmark_dataset,
)
)
except ValueError as error:
raise ValueError(
f"Failed to create transaction performance metrics: {error}"
) from error
return self._transactions_performance
def create_excel_report(
self,
excel_file_name: str | None = None,
currency: str | None = None,
):
"""
Create an Excel report file with specified data sheets.
This function creates an Excel file with multiple data sheets, including monthly,
quarterly, and yearly overviews if the corresponding data is available. The data
sheets are populated with dataframes provided by the class attributes
_monthly_overview, _quarterly_overview, and _yearly_overview.
The Excel file is saved with the specified name or the default name from the
configuration. The date and datetime formats in the Excel file are set to
"yyyy-mm-dd" for consistency.
Args:
excel_file_name (str | None): The name of the Excel file to be created. If None,
the default file name specified in the configuration will be used.
currency (str | None): The currency to be used for formatting in the Excel file.
If None, the default currency from the configuration will be used.
"""
excel_file_name = (
excel_file_name if excel_file_name else self._cfg["excel"]["file_name"]
)
currency = currency if currency else self._cfg["excel"]["currency"]
writer = pd.ExcelWriter(
excel_file_name,
engine="xlsxwriter",
date_format="yyyy-mm-dd",
datetime_format="yyyy-mm-dd",
)
try:
# Try to create and save Portfolio Overview
self._portfolio_overview = self.get_portfolio_overview()
| """Portfolio Module"""
# pylint: disable=too-many-instance-attributes,abstract-class-instantiated,
# pylint: disable=too-few-public-methods,protected-access,too-many-lines
class Portfolio:
"""
A class for managing and analyzing your portfolio.
This class provides functionality for loading, preprocessing, categorizing, and analyzing
cash flow data based on a specified configuration file. It offers methods to read and format
the dataset, apply cost or income indicators, categorize transactions, and create periodical
cash flow overviews.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format. The
configuration file should define various settings and columns used in cash flow
analysis.
Attributes:
_configuration_file (str): The file path to the configuration file.
_cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame.
Note:
- The configuration file should be in YAML format and contain settings for date columns,
description columns, amount columns, and optionally cost/income columns.
- Initialize an instance of this class to begin cash flow analysis.
"""
def __init__(
self,
configuration_file: str | None = None,
portfolio_dataset: pd.DataFrame = pd.DataFrame(),
example: bool = False,
):
"""
Initialize a Cashflow instance with the provided configuration file.
This constructor sets up the Cashflow instance by loading the configuration file, defining
default attributes, and initializing the cash flow dataset as an empty DataFrame.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format.
Raises:
ValueError: If the provided configuration file does not have a '.yaml' extension.
Only '.yaml' configuration files are supported.
"""
if example:
configuration_file = helpers.download_yaml_configuration(example=True)
helpers.download_example_datasets()
print(
f"Creating new Portfolio Configuration file at {configuration_file} and "
"downloading example datasets.\nRunning the Portfolio class with this example "
"dataset which illustrates the functionality of the Portfolio class."
)
elif configuration_file is None:
configuration_file = helpers.download_yaml_configuration(example=False)
print(
f"Creating new Portfolio file at {configuration_file}. Please provide this file "
"path to the Portfolio class to prevent overwriting the existing file."
)
self._configuration_file = str(configuration_file)
self._custom_dataset = portfolio_dataset
self._yearly_overview: pd.DataFrame = pd.DataFrame()
self._quarterly_overview: pd.DataFrame = pd.DataFrame()
self._monthly_overview: pd.DataFrame = pd.DataFrame()
self._yearly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
self._quarterly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
self._monthly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
# Tickers
self._ticker_combinations: dict[str, str] = {}
self._original_ticker_combinations: dict[str, str] = {}
# Historical Data
self._daily_historical_data: pd.DataFrame = pd.DataFrame()
self._weekly_historical_data: pd.DataFrame = pd.DataFrame()
self._monthly_historical_data: pd.DataFrame = pd.DataFrame()
self._quarterly_historical_data: pd.DataFrame = pd.DataFrame()
self._yearly_historical_data: pd.DataFrame = pd.DataFrame()
self._historical_statistics: pd.DataFrame = pd.DataFrame()
# Benchmark Historical Data
self._benchmark_tickers: dict[str, str] = {}
self._daily_benchmark_data: pd.DataFrame = pd.DataFrame()
self._weekly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._monthly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._quarterly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._yearly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._benchmark_prices: pd.DataFrame = pd.DataFrame()
self._benchmark_specific_prices: pd.Series = pd.Series()
self._benchmark_prices_per_ticker: pd.DataFrame = pd.DataFrame()
self._latest_benchmark_price: pd.Series = pd.Series()
# Portfolio Overveiw
self._portfolio_overview: pd.DataFrame = pd.DataFrame()
self._portfolio_performance: pd.DataFrame = pd.DataFrame()
self._transactions_performance: pd.DataFrame = pd.DataFrame()
self._portfolio_dataset: pd.DataFrame = pd.DataFrame()
self._positions_overview: pd.DataFrame = pd.DataFrame()
self._transactions_overview: pd.DataFrame = pd.DataFrame()
# Finance Toolkit Initialization
self._tickers: list | None = None
self._toolkit: Toolkit | None = None
self._benchmark_toolkit: Toolkit | None = None
self._currency_toolkit: Toolkit | None = None
self._latest_price: pd.Series = pd.Series()
self._daily_currency_data: pd.DataFrame = pd.DataFrame()
if self._configuration_file.endswith(".yaml"):
self._cfg: dict[str, dict] = helpers.read_yaml_file(
location=self._configuration_file
)
else:
raise ValueError("File type not supported. Please use .yaml")
if (
self._cfg["general"]["file_location"] == "REPLACE_ME"
and self._custom_dataset.empty
):
print(
f"{helpers.Style.BOLD}Please provide a file location in the configuration file (change "
f"'REPLACE_ME' within the general section) or provide a custom dataset.{helpers.Style.RESET}"
"\nSee https://github.com/JerBouma/FinancePortfolio for instructions"
)
else:
# Column Names
self._date_column: str = self._cfg["general"]["date_columns"]
self._name_column: str = self._cfg["general"]["name_columns"]
self._ticker_column: str = self._cfg["general"]["ticker_columns"]
self._price_column: str = self._cfg["general"]["price_columns"]
self._volume_column: str = self._cfg["general"]["volume_columns"]
self._costs_column: str = self._cfg["general"]["costs_columns"]
self.read_portfolio_dataset()
def to_toolkit(
self,
api_key: str | None = None,
quarterly: bool = False,
custom_ratios: dict | None = None,
rounding: int = 4,
remove_invalid_tickers: bool = False,
sleep_timer: bool = False,
progress_bar: bool = True,
) -> Toolkit:
"""
Converts the Portfolio to a Finance Toolkit object.
This method allows you to convert your Portfolio to a Finance Toolkit object,
giving access to 30+ years of fundamental and historical data, 130+ financial
metrics and much more. It intentilligently understands the assets you have
purchased and generated a "Portfolio" column automatically which is based off
your portfolio weights and the assets you have purchased. This allows you to
easily calculate portfolio metrics such as the Sharpe Ratio, Sortino Ratio,
Treynor Ratio, Value at Risk and many more that would fit precisely to your
portfolio.
Args:
api_key (str, optional):
Your API key for access to additional data. If not provided, only historical
data and indicators are available.
start_date (str, optional):
The start date for historical data retrieval. If not provided, it defaults
to the earliest available date.
end_date (str, optional):
The end date for historical data retrieval. If not provided, it defaults to
the current date.
quarterly (bool, optional):
Set to True to retrieve quarterly data. Defaults to False.
risk_free_rate (str, optional):
The risk-free rate used for calculations. Defaults to "10y".
benchmark_ticker (str, optional):
The benchmark ticker symbol. Defaults to "^GSPC".
custom_ratios (dict, optional):
Custom ratios to calculate. Should be a dictionary of ratio names and formulas.
rounding (int, optional):
The number of decimal places to round data. Defaults to 4.
remove_invalid_tickers (bool, optional):
Remove invalid tickers from the toolkit. Defaults to True.
sleep_timer (bool, optional):
Enable a sleep timer to avoid rate limiting. Defaults to False.
progress_bar (bool, optional):
Show a progress bar during data retrieval. Defaults to True.
Returns:
Toolkit:
A Finance Toolkit object.
"""
if api_key is None:
print(
"The parameter api_key is not set. Therefore, only historical data and "
"indicators are available. Consider obtaining a key with the following link: "
"https://intelligence.financialmodelingprep.com/pricing-plans?couponCode=jeroen"
"\nThe free plan has a limit of 5 years fundamental data and has no quarterly data. "
"You can get 15% off by using the above affiliate link to get access to 30+ years "
"of (quarterly) data which also supports the project."
)
if self._daily_historical_data.empty:
self.collect_historical_data()
if self._daily_benchmark_data.empty:
self.collect_benchmark_historical_data()
if self._positions_overview.empty:
self.get_positions_overview()
symbols = list(self._tickers) + ["Portfolio"] # type: ignore
historical_columns = self._daily_historical_data.columns.get_level_values(
0
).unique()
benchmark_ticker = self._cfg["general"]["benchmark_ticker"]
benchmark_data = self._daily_benchmark_data.xs(
benchmark_ticker, axis=1, level=1
)
for column in historical_columns:
self._daily_historical_data[column, "Benchmark"] = benchmark_data[column]
self._daily_historical_data[column, "Portfolio"] = (
self._positions_overview["Current Weight"]
.mul(self._daily_historical_data[column], axis=1)
.sum(axis=1)
)
historical = (
self._daily_historical_data.sort_index(axis=1)
.reindex(historical_columns, axis=1, level=0)
.reindex(list(self._tickers) + ["Portfolio", "Benchmark"], axis=1, level=1) # type: ignore
)
historical = historical.round(rounding)
toolkit = Toolkit(
tickers=symbols,
api_key=api_key,
historical=historical,
start_date=self._start_date,
quarterly=quarterly,
benchmark_ticker=benchmark_ticker,
custom_ratios=custom_ratios,
rounding=rounding,
remove_invalid_tickers=remove_invalid_tickers,
sleep_timer=sleep_timer,
progress_bar=progress_bar,
)
return toolkit
def read_portfolio_dataset(
self,
excel_location: str | list | None = None,
adjust_duplicates: bool | None = None,
date_column: list[str] | None = None,
date_format: str | None = None,
name_columns: list[str] | None = None,
ticker_columns: list[str] | None = None,
price_columns: list[str] | None = None,
volume_columns: list[str] | None = None,
currency_columns: list[str] | None = None,
costs_columns: list[str] | None = None,
column_mapping: dict[str, str] | None = None,
):
"""
Read and consolidate cash flow data from Excel or CSV files into a single DataFrame.
This function reads cash flow data from one or more Excel or CSV files specified by the
'excel_location' parameter. It can accept a single file path as a string or a list of file
paths. If 'excel_location' is not provided, it will use the default file location from the
configuration ('self._cfg["general"]["file_location"]').
The function identifies additional files within directories specified in 'excel_location'
and includes them in the data consolidation. It supports Excel (.xlsx) and CSV (.csv) file
formats.
If the cash flow dataset is initially empty, it reads and consolidates the data, performs
optional adjustments for duplicated rows, and sets column names to lowercase. The resulting
dataset is sorted by index in descending order and has its index converted to daily frequency
('D').
Next to that, this function performs various formatting and preprocessing steps to ensure
data consistency and facilitate analysis. It includes options to customize column names
for dates, descriptions, amounts, and cost/income categories.
Parameters:
excel_location (str | list | None): A file path or a list of file paths to Excel or CSV
files containing cash flow data. If None, the default file location from the
configuration is used.
adjust_duplicates (bool | None): A boolean value indicating whether to adjust duplicated
rows in the dataset. If None, it defaults to the value specified in the configuration
('self._cfg["general"]["adjust_duplicates"]').
date_column (list[str] | None): A list of column names representing date information
in the dataset. If None, it defaults to the date columns specified in the
configuration ('self._cfg["general"]["date_columns"]').
date_format (str | None): A string representing the date format in the dataset. If None,
it defaults to the date format specified in the configuration ('self._cfg["general"]["date_format"]').
description_columns (list[str] | None): A list of column names representing
transaction descriptions in the dataset. If None, it defaults to the description
columns specified in the configuration ('self._cfg["general"]["description_columns"]').
amount_column (list[str] | None): A list of column names representing transaction
amounts in the dataset. If None, it defaults to the amount columns specified in
the configuration ('self._cfg["general"]["amount_columns"]').
cost_or_income_column (list[str] | None): A list of column names representing
cost or income categories in the dataset. If None, it defaults to the cost/income
columns specified in the configuration ('self._cfg["general"]["cost_or_income_columns"]').
decimal_seperator (str | None): A string representing the decimal separator used in
the dataset. If None, it defaults to the decimal separator specified in the
configuration ('self._cfg["general"]["decimal_seperator"]').
Returns:
pd.DataFrame: A DataFrame containing the consolidated cash flow data.
Raises:
FileNotFoundError: If any of the specified files or directories in 'excel_location'
cannot be found.
ValueError: If essential columns (date, description, amount) are not found in the dataset.
- For missing columns, specify them in the configuration or provide them explicitly.
- For cost or income columns, raise an exception if not found and configuration is empty.
Note:
- Duplicates in individual datasets are adjusted based on configuration settings
('self._cfg["general"]["adjust_duplicates"]').
- If duplicates are found in the combination of datasets, they are removed to prevent
double-counting.
- The function handles formatting of date columns, converting them to datetime objects.
- Transaction description columns are converted to categorical data.
- Transaction amount columns are converted to float, with support for different decimal separators.
- Cost or income columns are converted to categorical data, with optional customization.
"""
date_column = (
date_column if date_column else self._cfg["general"]["date_columns"]
)
date_format = (
date_format if date_format else self._cfg["general"]["date_format"]
)
name_columns = (
name_columns if name_columns else self._cfg["general"]["name_columns"]
)
ticker_columns = (
ticker_columns if ticker_columns else self._cfg["general"]["ticker_columns"]
)
price_columns = (
price_columns if price_columns else self._cfg["general"]["price_columns"]
)
volume_columns = (
volume_columns if volume_columns else self._cfg["general"]["volume_columns"]
)
currency_columns = (
currency_columns
if currency_columns
else self._cfg["adjustments"]["currency_columns"]
)
costs_columns = (
costs_columns if costs_columns else self._cfg["general"]["costs_columns"]
)
column_mapping = (
column_mapping if column_mapping else self._cfg["general"]["column_mapping"]
)
if self._portfolio_dataset.empty:
if not self._custom_dataset.empty:
(
self._portfolio_dataset,
self._date_column,
self._name_column,
self._ticker_column,
self._price_column,
self._volume_column,
self._currency_column,
self._costs_column,
) = portfolio_model.format_portfolio_dataset(
dataset=self._portfolio_dataset,
date_columns=date_column,
date_format=date_format,
name_columns=name_columns,
tickers_columns=ticker_columns,
price_columns=price_columns,
volume_columns=volume_columns,
column_mapping=column_mapping,
currency_columns=currency_columns,
costs_columns=costs_columns,
)
else:
excel_location = (
excel_location
if excel_location
else self._cfg["general"]["file_location"]
)
if isinstance(excel_location, str):
excel_location = [excel_location]
adjust_duplicates = (
adjust_duplicates
if adjust_duplicates
else self._cfg["general"]["adjust_duplicates"]
)
(
self._portfolio_dataset,
self._date_column,
self._name_column,
self._ticker_column,
self._price_column,
self._volume_column,
self._currency_column,
self._costs_column,
) = portfolio_model.read_portfolio_dataset( # type: ignore
excel_location=excel_location,
adjust_duplicates=adjust_duplicates,
date_column=date_column,
date_format=date_format,
name_columns=name_columns,
ticker_columns=ticker_columns,
price_columns=price_columns,
volume_columns=volume_columns,
currency_columns=currency_columns,
costs_columns=costs_columns,
column_mapping=column_mapping,
)
self._original_tickers = list(
self._portfolio_dataset[self._ticker_column].unique()
)
if self._cfg["adjustments"]["isin_to_ticker"]:
self._portfolio_dataset = self._portfolio_dataset.replace(
self._cfg["adjustments"]["isin_to_ticker"]
)
self._portfolio_dataset = self._portfolio_dataset.sort_values(
by=self._date_column, ascending=True
)
self._tickers = list(self._portfolio_dataset[self._ticker_column].unique())
self._start_date = (
self._portfolio_dataset[self._date_column].min().strftime("%Y-%m-%d")
)
self._transactions_currencies = list(
self._portfolio_dataset[self._currency_column].unique() # type: ignore
)
self._portfolio_dataset = self._portfolio_dataset.set_index(
[self._date_column, self._ticker_column]
)
return self._portfolio_dataset
def collect_benchmark_historical_data(
self,
benchmark_ticker: str | None = None,
benchmark_per_ticker: dict[str, str] | None = None,
):
"""
Collect historical benchmark data for the portfolio.
This method retrieves historical benchmark data, such as daily, weekly, monthly, quarterly,
and yearly prices, for the specified benchmark ticker or per-ticker mapping. It matches the
benchmark data to the dates of the portfolio's historical data.
Args:
benchmark_ticker (str | None): The benchmark ticker symbol to use if no per-ticker mapping
is provided. If None, the default benchmark ticker from the configuration is used.
benchmark_per_ticker (dict[str, str] | None): A dictionary that maps original portfolio
tickers to their corresponding benchmark tickers. If not provided, it defaults to the
mapping specified in the configuration.
Returns:
DataFrame: A DataFrame containing the historical benchmark data.
"""
if self._daily_historical_data.empty:
self.collect_historical_data()
benchmark_ticker = (
benchmark_ticker
if benchmark_ticker
else self._cfg["general"]["benchmark_ticker"]
)
benchmark_per_ticker = (
benchmark_per_ticker
if benchmark_per_ticker
else self._cfg["general"]["benchmark_per_ticker"]
)
if not self._benchmark_toolkit:
self._benchmark_tickers = {}
for ticker in self._original_tickers:
self._benchmark_tickers[ticker] = benchmark_per_ticker.get(
ticker, benchmark_ticker
)
self._benchmark_toolkit = Toolkit(
tickers=list(set(self._benchmark_tickers.values())),
benchmark_ticker=None,
start_date=self._start_date,
)
# Reindex the benchmark data to the dates of the historical dataset so that they are matched up.
self._daily_benchmark_data = self._benchmark_toolkit.get_historical_data(
period="daily"
).reindex(self._daily_historical_data.index, method="backfill")
self._weekly_benchmark_data = self._benchmark_toolkit.get_historical_data(
period="weekly"
)
self._monthly_benchmark_data = self._benchmark_toolkit.get_historical_data(
period="monthly"
)
self._quarterly_benchmark_data = self._benchmark_toolkit.get_historical_data(
period="quarterly"
)
self._yearly_benchmark_data = self._benchmark_toolkit.get_historical_data(
period="yearly"
)
# It could be that a specific date does not exist for the given benchmark. In that case,
# the previous value is used instead.
self._benchmark_prices = self._daily_benchmark_data["Adj Close"].iloc[
self._daily_benchmark_data["Adj Close"].index.get_indexer(
self._portfolio_dataset.index.get_level_values(0), method="backfill"
)
]
# The index of the benchmark prices is set to the dates of the portfolio dataset
# so that they are matched up again.
self._benchmark_prices = self._benchmark_prices.set_index(
self._portfolio_dataset.index
)
self._benchmark_prices = self._benchmark_prices.sort_index()
benchmark_specific_prices = []
benchmark_latest_price = {}
benchmark_prices_per_ticker = pd.DataFrame(
columns=self._tickers, index=self._daily_benchmark_data.index
)
for (date, ticker), _ in self._portfolio_dataset.iterrows():
original_ticker = self._original_ticker_combinations[ticker]
benchmark_ticker = self._benchmark_tickers[original_ticker]
# Add the specific benchmark price and, if multiple orders of the same ticker are made on the same day
# (e.g. buying and selling), only report the benchmark price once.
benchmark_specific_prices.append(
self._benchmark_prices.loc[
(date, ticker), benchmark_ticker
].drop_duplicates()
)
benchmark_latest_price[ticker] = self._daily_benchmark_data["Adj Close"][
benchmark_ticker
].iloc[-1]
benchmark_prices_per_ticker[ticker] = self._daily_benchmark_data[
"Adj Close"
][benchmark_ticker]
self._benchmark_specific_prices = pd.concat(benchmark_specific_prices)
self._latest_benchmark_price = pd.Series(benchmark_latest_price)
self._benchmark_prices_per_ticker = benchmark_prices_per_ticker
return self._daily_benchmark_data
def collect_historical_data(
self,
historical_columns: list[str] | None = None,
isin_to_ticker: dict[str, str] | None = None,
):
"""
Collect historical price and currency adjustment data.
This method retrieves historical price data for the portfolio's tickers and performs
currency adjustments if necessary. It collects daily, weekly, monthly, quarterly, and
yearly price data and stores it in separate DataFrames.
Args:
historical_columns (list[str] | None): A list of column names representing historical price data.
If None, it defaults to the columns specified in the configuration
('self._cfg["adjustments"]["currency_adjustment_columns"]').
isin_to_ticker (dict[str, str] | None): A dictionary that maps ISIN codes to ticker symbols.
If provided, ISIN codes in the portfolio dataset will be matched to the corresponding
tickers. If None, it defaults to the mapping specified in the configuration.
Returns:
pd.DataFrame: A DataFrame containing the daily historical price data for the portfolio.
Note:
- This method uses the Toolkit class to fetch historical price data from a data source.
- Currency conversions are performed if there is a mismatch between the currency of transactions
and the currency of historical data. Currency conversion rates are fetched using the Currency Toolkit.
- The resulting historical price data is stored in separate DataFrames for different periods.
"""
historical_columns = (
historical_columns
if historical_columns
else self._cfg["adjustments"]["currency_adjustment_columns"]
)
isin_to_ticker = (
isin_to_ticker
if isin_to_ticker
else self._cfg["adjustments"]["isin_to_ticker"]
)
if not self._toolkit:
self._toolkit = Toolkit(
tickers=self._tickers,
benchmark_ticker=None,
start_date=self._start_date,
)
# This is used in case ISIN codes are provided and therefore ISIN codes need to
# be matched to the corresponding tickers
self._ticker_combinations = dict(zip(self._toolkit._tickers, self._tickers)) # type: ignore
self._original_ticker_combinations = dict(
zip(self._tickers, self._original_tickers) # type: ignore
)
self._daily_historical_data = self._toolkit.get_historical_data(period="daily")
self._daily_historical_data = self._daily_historical_data.rename(
columns=self._ticker_combinations, level=1
)
currency_conversions = {}
if self._currency_column: # type: ignore
self._historical_statistics = self._toolkit.get_historical_statistics()
self._historical_statistics = self._historical_statistics.rename(
columns=self._ticker_combinations, level=0
)
for (_, ticker), currency in self._portfolio_dataset[
self._currency_column # type: ignore
].items():
data_currency = self._historical_statistics.loc["Currency", ticker]
if self._historical_statistics.loc["Currency", ticker] != currency:
currency_conversions[
ticker
] = f"{currency}{data_currency}=X".upper()
if currency_conversions:
print(
"Found a mismatch between the currency of the transaction and the currency of the historical data. "
"This is usually due to working with ISIN codes.\nConsider filling the 'isin_to_ticker' parameter to "
"correct this by finding the correct ticker on Yahoo Finance (e.g. VUSA.AS). The currencies are "
"automatically converted but this does lead to some inaccuracies."
)
self._currency_toolkit = Toolkit(
tickers=list(set(currency_conversions.values())),
benchmark_ticker=None,
start_date=self._start_date,
)
self._daily_currency_data = self._currency_toolkit.get_historical_data(
period="daily"
)
for ticker, currency in currency_conversions.items():
for column in historical_columns:
self._daily_historical_data.loc[:, (column, ticker)] = (
self._daily_historical_data.loc[:, (column, ticker)]
/ self._daily_currency_data.loc[:, (column, currency)]
)
self._weekly_historical_data = self._toolkit.get_historical_data(
period="weekly"
)
self._weekly_historical_data = self._weekly_historical_data.rename(
columns=self._ticker_combinations, level=1
)
self._monthly_historical_data = self._toolkit.get_historical_data(
period="monthly"
)
self._monthly_historical_data = self._monthly_historical_data.rename(
columns=self._ticker_combinations, level=1
)
self._quarterly_historical_data = self._toolkit.get_historical_data(
period="quarterly"
)
self._quarterly_historical_data = self._quarterly_historical_data.rename(
columns=self._ticker_combinations, level=1
)
self._yearly_historical_data = self._toolkit.get_historical_data(
period="yearly"
)
self._yearly_historical_data = self._yearly_historical_data.rename(
columns=self._ticker_combinations, level=1
)
self._latest_price = self._daily_historical_data["Adj Close"].iloc[-1]
return self._daily_historical_data
def get_positions_overview(self):
"""
Calculate and provide an overview of the portfolio positions.
This method calculates an overview of the portfolio's positions, including
key statistics and performance metrics. It returns a DataFrame summarizing these
metrics.
If the necessary historical data has not been collected, this method will first
trigger data collection using the `collect_historical_data` and
`collect_benchmark_historical_data` methods.
Returns:
DataFrame: A DataFrame containing an overview of the portfolio's positions.
Raises:
Exception: If data collection for historical or benchmark data fails.
"""
if self._daily_historical_data.empty:
try:
self.collect_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect historical data due to {error}"
) from error
if self._daily_benchmark_data.empty:
try:
self.collect_benchmark_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect benchmark historical data due to {error}"
) from error
if self._transactions_overview.empty:
try:
self.get_transactions_overview()
except ValueError as error:
raise ValueError(
f"Failed to get transactions overview due to {error}"
) from error
if self._positions_overview.empty:
try:
self._positions_overview = portfolio_model.create_positions_overview(
portfolio_tickers=self._tickers,
period_dates=self._daily_historical_data.index.get_level_values(0),
portfolio_dataset=self._transactions_overview,
historical_prices=self._daily_historical_data,
)
except ValueError as error:
raise ValueError(
f"Failed to create positions overview due to {error}"
) from error
return self._positions_overview
def get_portfolio_overview(self):
"""
Calculate and provide an overview of the portfolio's key statistics.
This method calculates various key statistics for the portfolio, including
performance metrics and cost-related information. It returns a DataFrame
summarizing these metrics.
If the necessary historical data has not been collected, this method will first
trigger data collection using the `collect_historical_data` and
`collect_benchmark_historical_data` methods.
Returns:
DataFrame: A DataFrame containing key statistics and an overview of the portfolio.
Raises:
Exception: If data collection for historical or benchmark data fails.
Exception: If the creation of portfolio overview fails.
"""
if self._daily_historical_data.empty:
try:
self.collect_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect historical data: {error}"
) from error
if self._daily_benchmark_data.empty:
try:
self.collect_benchmark_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect benchmark historical data: {error}"
) from error
if self._portfolio_overview.empty:
try:
self._portfolio_overview = portfolio_model.create_portfolio_overview(
portfolio_name=self._portfolio_dataset[self._name_column],
portfolio_volume=self._portfolio_dataset[self._volume_column],
portfolio_price=self._portfolio_dataset[self._price_column],
portfolio_costs=self._portfolio_dataset[self._costs_column],
latest_returns=self._latest_price,
benchmark_prices=self._benchmark_specific_prices,
benchmark_latest_prices=self._latest_benchmark_price,
)
except ValueError as error:
raise ValueError(
f"Failed to create portfolio overview: {error}"
) from error
return self._portfolio_overview
def get_portfolio_performance(self, period: str | None = None):
"""
Calculate portfolio performance metrics for a specified period.
This method calculates various portfolio performance metrics, such as returns,
for the specified period. It uses the positions overview dataset for these
calculations.
Args:
period_string (str | None): The time period for which portfolio performance
metrics should be calculated. This can be 'yearly', 'quarterly', 'monthly',
'weekly', or 'daily'. If None, the default is 'daily'.
Returns:
DataFrame: A DataFrame containing portfolio performance metrics.
Raises:
ValueError: If an invalid or unsupported period_string is provided.
"""
if self._daily_historical_data.empty:
try:
self.collect_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect historical data: {error}"
) from error
if self._daily_benchmark_data.empty:
try:
self.collect_benchmark_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect benchmark historical data: {error}"
) from error
if self._positions_overview.empty:
try:
self.get_positions_overview()
except ValueError as error:
raise ValueError(
f"Failed to get positions overview: {error}"
) from error
if not period:
raise ValueError(
"Please provide a period. This can be 'yearly', 'quarterly', 'monthly', 'weekly', or 'daily'"
)
period_string = period.lower()
if period_string == "yearly":
period_symbol = "Y"
elif period_string == "quarterly":
period_symbol = "Q"
elif period_string == "monthly":
period_symbol = "M"
elif period_string == "weekly":
period_symbol = "W"
elif period_string == "daily":
period_symbol = "D"
else:
raise ValueError(
"Please provide a valid period. This can be 'yearly', 'quarterly', 'monthly', 'weekly', or 'daily'"
)
try:
self._portfolio_performance = portfolio_model.create_portfolio_performance(
positions_dataset=self._positions_overview,
date_column=self._date_column,
ticker_column=self._ticker_column,
period_string=period_symbol,
)
except ValueError as error:
raise ValueError(
f"Failed to create portfolio performance: {error}"
) from error
return self._portfolio_performance
def get_transactions_overview(self):
"""
Calculate and collect transaction overview ratios based on the provided data.
This method calculates various transaction overview ratios, such as returns and costs,
based on the transaction dataset. It adds these ratios as new columns to the
portfolio dataset.
Returns:
DataFrame: The portfolio dataset with added transaction overview ratios.
Raises:
ValueError: If there is an issue with collecting historical data or creating the transaction overview.
"""
if self._daily_historical_data.empty:
try:
self.collect_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect historical data: {error}"
) from error
if self._daily_benchmark_data.empty:
try:
self.collect_benchmark_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect benchmark historical data: {error}"
) from error
try:
new_columns = portfolio_model.create_transactions_overview(
portfolio_volume=self._portfolio_dataset[self._volume_column],
portfolio_price=self._portfolio_dataset[self._price_column],
portfolio_costs=self._portfolio_dataset[self._costs_column],
latest_returns=self._latest_price.loc[self._tickers],
)
except ValueError as error:
raise ValueError(
f"Failed to create transaction overview: {error}"
) from error
try:
self._transactions_overview = pd.concat(
[self._portfolio_dataset, new_columns], axis=1
)
except ValueError as error:
raise ValueError(
f"Failed to add transaction overview to portfolio dataset: {error}"
) from error
return self._transactions_overview
def get_transactions_performance(self, period: str | None = None):
"""
Calculate transaction performance metrics for a specified period.
This method calculates various transaction performance metrics, such as returns,
costs, and benchmarks, for the specified period. It uses historical price data
from the corresponding period for these calculations.
Args:
period_string (str | None): The time period for which transaction performance
metrics should be calculated. This can be 'yearly', 'quarterly', 'monthly',
'weekly', or 'daily'. If None, the default is 'daily'.
Returns:
DataFrame: A DataFrame containing transaction performance metrics.
Raises:
ValueError: If an invalid or unsupported period_string is provided.
"""
if self._daily_historical_data.empty:
try:
self.collect_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect historical data: {error}"
) from error
if self._daily_benchmark_data.empty:
try:
self.collect_benchmark_historical_data()
except ValueError as error:
raise ValueError(
f"Failed to collect benchmark historical data: {error}"
) from error
if not period:
raise ValueError(
"Please provide a period. This can be 'yearly', 'quarterly', 'monthly', 'weekly', or 'daily'"
)
period_string = period.lower()
if period_string == "yearly":
historical_dataset = self._yearly_historical_data["Adj Close"]
benchmark_dataset = self._yearly_benchmark_data["Adj Close"]
period_symbol = "Y"
elif period_string == "quarterly":
historical_dataset = self._quarterly_historical_data["Adj Close"]
benchmark_dataset = self._quarterly_benchmark_data["Adj Close"]
period_symbol = "Q"
elif period_string == "monthly":
historical_dataset = self._monthly_historical_data["Adj Close"]
benchmark_dataset = self._monthly_benchmark_data["Adj Close"]
period_symbol = "M"
elif period_string == "weekly":
historical_dataset = self._weekly_historical_data["Adj Close"]
benchmark_dataset = self._weekly_benchmark_data["Adj Close"]
period_symbol = "W"
elif period_string == "daily":
historical_dataset = self._daily_historical_data["Adj Close"]
benchmark_dataset = self._daily_benchmark_data["Adj Close"]
period_symbol = "D"
else:
raise ValueError(
"Please provide a valid period. This can be "
"'yearly', 'quarterly', 'monthly', 'weekly', "
"or 'daily'"
)
try:
self._transactions_performance = (
portfolio_model.create_transactions_performance(
portfolio_dataset=self._portfolio_dataset,
ticker_column=self._ticker_column,
date_column=self._date_column,
volume_column=self._volume_column,
price_column=self._price_column,
costs_column=self._costs_column,
period_prices=historical_dataset,
period_string=period_symbol,
original_ticker_combinations=self._original_ticker_combinations,
benchmark_per_ticker=self._benchmark_tickers,
benchmark_specific_prices=self._benchmark_specific_prices,
benchmark_period_prices=benchmark_dataset,
)
)
except ValueError as error:
raise ValueError(
f"Failed to create transaction performance metrics: {error}"
) from error
return self._transactions_performance
def create_excel_report(
self,
excel_file_name: str | None = None,
currency: str | None = None,
):
"""
Create an Excel report file with specified data sheets.
This function creates an Excel file with multiple data sheets, including monthly,
quarterly, and yearly overviews if the corresponding data is available. The data
sheets are populated with dataframes provided by the class attributes
_monthly_overview, _quarterly_overview, and _yearly_overview.
The Excel file is saved with the specified name or the default name from the
configuration. The date and datetime formats in the Excel file are set to
"yyyy-mm-dd" for consistency.
Args:
excel_file_name (str | None): The name of the Excel file to be created. If None,
the default file name specified in the configuration will be used.
currency (str | None): The currency to be used for formatting in the Excel file.
If None, the default currency from the configuration will be used.
"""
excel_file_name = (
excel_file_name if excel_file_name else self._cfg["excel"]["file_name"]
)
currency = currency if currency else self._cfg["excel"]["currency"]
writer = pd.ExcelWriter(
excel_file_name,
engine="xlsxwriter",
date_format="yyyy-mm-dd",
datetime_format="yyyy-mm-dd",
)
try:
# Try to create and save Portfolio Overview
self._portfolio_overview = self.get_portfolio_overview()
| excel_model.create_portfolio_overview_excel_report(
| 0 | 2023-10-15 09:16:04+00:00 | 4k |
gschramm/2023-MIC-ImageRecon-Shortcourse | 07_osem_varnet_evaluation.py | [
{
"identifier": "EMUpdateModule",
"path": "layers.py",
"snippet": "class EMUpdateModule(torch.nn.Module):\n\n def __init__(\n self,\n projector: parallelproj.LinearOperator,\n ) -> None:\n\n super().__init__()\n self._projector = projector\n\n self._fwd_op_layer = LinearSingleChannelOperator.apply\n self._adjoint_op_layer = AdjointLinearSingleChannelOperator.apply\n\n def forward(self, x: torch.Tensor, data: torch.Tensor,\n corrections: torch.Tensor, contamination: torch.Tensor,\n adjoint_ones: torch.Tensor) -> torch.Tensor:\n \"\"\"forward pass of the EM update module\n\n Parameters\n ----------\n x : torch.Tensor\n mini batch of images with shape (batch_size, 1, *img_shape)\n data : torch.Tensor\n mini batch of emission data with shape (batch_size, *data_shape)\n corrections : torch.Tensor\n mini batch of multiplicative corrections with shape (batch_size, *data_shape)\n contamination : torch.Tensor\n mini batch of additive contamination with shape (batch_size, *data_shape)\n adjoint_ones : torch.Tensor\n mini batch of adjoint ones (back projection of multiplicative corrections) with shape (batch_size, 1, *img_shape)\n\n Returns\n -------\n torch.Tensor\n mini batch of EM updates with shape (batch_size, 1, *img_shape)\n \"\"\"\n\n # remember that all variables contain a mini batch of images / data arrays\n # and that the fwd / adjoint operator layers directly operate on mini batches\n\n y = data / (corrections * self._fwd_op_layer(x, self._projector) +\n contamination)\n\n return x * self._adjoint_op_layer(corrections * y,\n self._projector) / adjoint_ones"
},
{
"identifier": "Unet3D",
"path": "models.py",
"snippet": "class Unet3D(torch.nn.Module):\n \"\"\"3D Unet with 3D downsampling and upsampling blocks\"\"\"\n def __init__(self, num_features: int = 8, num_input_channels: int = 1):\n super().__init__()\n self._num_features = num_features\n self._num_input_channels = num_input_channels\n\n self.first_double_conv = (DoubleConv3DBlock(self._num_input_channels,\n self._num_features))\n self.down1 = (Unet3DDownBlock(self._num_features,\n 2 * self._num_features))\n self.down2 = (Unet3DDownBlock(2 * self._num_features,\n 4 * self._num_features))\n self.down3 = (Unet3DDownBlock(4 * self._num_features,\n 4 * self._num_features))\n self.up1 = (Unet3DUpBlock(8 * self._num_features,\n 2 * self._num_features))\n self.up2 = (Unet3DUpBlock(4 * self._num_features,\n 1 * self._num_features))\n self.up3 = (Unet3DUpBlock(2 * self._num_features, self._num_features))\n self.final_conv = Unet3dFinalConv(self._num_features, 1)\n\n def forward(self, x):\n x1 = self.first_double_conv(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n y = self.up1(x4, x3)\n y = self.up2(y, x2)\n y = self.up3(y, x1)\n return self.final_conv(y)"
},
{
"identifier": "SimpleOSEMVarNet",
"path": "models.py",
"snippet": "class SimpleOSEMVarNet(torch.nn.Module):\n \"\"\"dummy cascaded model that includes layers combining projections and convolutions\"\"\"\n def __init__(self, osem_update_modules: torch.nn.Module,\n neural_net: torch.nn.Module, depth: int, device: str, fusion_mode : str = 'simple') -> None:\n\n super().__init__()\n\n self._osem_update_modules = osem_update_modules\n\n self._num_subsets = len(osem_update_modules)\n self._subset_order = distributed_subset_order(self._num_subsets)\n\n self._neural_net = neural_net\n self._depth = depth\n\n self._neural_net_weight = torch.nn.Parameter(torch.tensor(0.5, device = device))\n\n if fusion_mode in {'de_pierro', 'simple'}:\n self._fusion_mode = fusion_mode\n else:\n raise ValueError('fusion_mode must be \"de_pierro\" or \"simple\"')\n\n @property\n def neural_net_weight(self) -> torch.Tensor:\n return self._neural_net_weight\n\n @property\n def neural_net(self) -> torch.nn.Module:\n return self._neural_net\n\n @property\n def fusion_mode(self) -> str:\n return self._fusion_mode\n\n def forward(self, x: torch.Tensor, emission_data_batch: torch.Tensor,\n correction_batch: torch.Tensor,\n contamination_batch: torch.Tensor,\n adjoint_ones_batch: torch.Tensor) -> torch.Tensor:\n\n for j in range(self._depth):\n subset = self._subset_order[j % self._num_subsets]\n x_em = self._osem_update_modules[subset](\n x, emission_data_batch[subset, ...], correction_batch[subset,\n ...],\n contamination_batch[subset, ...], adjoint_ones_batch[subset,\n ...])\n\n if self._fusion_mode == 'de_pierro':\n # De Pierro fusion which is guaranteed to be non-negative\n x_sm = x + self._neural_net(x)\n beta_nu = self._neural_net_weight/adjoint_ones_batch[subset,...]\n denom = (1 - beta_nu*x_sm) + torch.sqrt((1 - beta_nu*x_sm)**2 + 4*beta_nu*x_em)\n x = 2*x_em / denom\n else:\n # fusion of EM update and neural net update with trainable weight\n # we use an ReLU activation to ensure that the output of each block is non-negative\n x = torch.nn.ReLU()(x_em + self._neural_net_weight * self._neural_net(x))\n\n return x"
},
{
"identifier": "PostReconNet",
"path": "models.py",
"snippet": "class PostReconNet(torch.nn.Module):\n \"\"\"dummy cascaded model that includes layers combining projections and convolutions\"\"\"\n def __init__(self, neural_net: torch.nn.Module) -> None:\n super().__init__()\n self._neural_net = neural_net\n\n @property\n def neural_net(self) -> torch.nn.Module:\n return self._neural_net\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # fusion of EM update and neural net update with trainable weight\n # we use an ReLU activation to ensure that the output of each block is non-negative\n return torch.nn.ReLU()(x + self._neural_net(x))"
},
{
"identifier": "load_brain_image_batch",
"path": "data.py",
"snippet": "def load_brain_image_batch(ids, xp, dev, **kwargs):\n for i, ii in enumerate(ids):\n em_img, att_img = load_brain_image(ii, xp, dev, **kwargs)\n\n if i == 0:\n img_shape = em_img.shape\n em_img_batch = xp.zeros((len(ids), 1) + img_shape,\n device=dev,\n dtype=xp.float32)\n att_img_batch = xp.zeros((len(ids), 1) + img_shape,\n device=dev,\n dtype=xp.float32)\n\n em_img_batch[i, 0, ...] = em_img\n att_img_batch[i, 0, ...] = att_img\n\n return em_img_batch, att_img_batch"
},
{
"identifier": "simulate_data_batch",
"path": "data.py",
"snippet": "def simulate_data_batch(\n emission_image_batch: npt.NDArray,\n attenuation_image_batch: npt.NDArray,\n subset_projectors: npt.NDArray,\n sens: float = 1.,\n contam_fraction: float = 0.4,\n random_seed: int | None = None\n) -> tuple[npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray]:\n \"\"\"Simulate a batch of emission data from a batch of emission and attenuation images\n\n Parameters\n ----------\n emission_image_batch : npt.NDArray\n batch of emission images with shape (batch_size, 1, *image_shape)\n attenuation_image_batch : npt.NDArray\n batch of attenuation images with shape (batch_size, 1, *image_shape)\n subset_projectors : npt.NDArray\n subset projectors\n sens : float, optional\n sensitivity value that determines number of prompts, by default 1.\n contam_fraction : float, optional\n contamination fraction, by default 0.4\n random_seed : int | None, optional\n random seed for reproducibility, by default None -> not set\n\n Returns\n -------\n npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray\n emission_data_batch, correction_batch, contamination_batch, adjoint_ones_batch\n \"\"\"\n\n xp = get_namespace(emission_image_batch)\n dev = device(emission_image_batch)\n\n if 'torch' in xp.__name__:\n xp.manual_seed(random_seed)\n else:\n xp.random.seed(random_seed)\n\n num_subsets = subset_projectors.num_subsets\n batch_size = emission_image_batch.shape[0]\n\n # mini batch of multiplicative corrections (attenuation and normalization)\n correction_batch = xp.zeros(\n (num_subsets, batch_size) + subset_projectors.out_shapes[0],\n device=dev,\n dtype=xp.float32)\n\n # mini batch of emission data\n emission_data_batch = xp.zeros(\n (num_subsets, batch_size) + subset_projectors.out_shapes[0],\n device=dev,\n dtype=xp.float32)\n\n # calculate the adjoint ones (back projection of the multiplicative corrections) - sensitivity images\n adjoint_ones_batch = xp.zeros(\n (num_subsets, batch_size, 1) + subset_projectors.in_shape,\n device=dev,\n dtype=xp.float32)\n\n # mini batch of additive contamination (scatter)\n contamination_batch = xp.zeros(\n (num_subsets, batch_size) + subset_projectors.out_shapes[0],\n device=dev,\n dtype=xp.float32)\n\n for j in range(num_subsets):\n for i in range(batch_size):\n correction_batch[\n j, i, ...] = sens * xp.exp(-subset_projectors.apply_subset(\n attenuation_image_batch[i, 0, ...], j))\n\n adjoint_ones_batch[j, i, 0,\n ...] = subset_projectors.adjoint_subset(\n correction_batch[j, i, ...], j)\n\n emission_data_batch[j, i, ...] = correction_batch[\n j, i, ...] * subset_projectors.apply_subset(\n emission_image_batch[i, 0, ...], j)\n\n contamination_batch[j, i, ...] = (\n 1 /\n (1 - contam_fraction)) * emission_data_batch[j, i, ...].mean()\n emission_data_batch[j, i, ...] += contamination_batch[j, i, ...]\n\n if 'torch' in xp.__name__:\n emission_data_batch[j, i,\n ...] = xp.poisson(emission_data_batch[j, i,\n ...])\n else:\n emission_data_batch[j, i, ...] = xp.random.poisson(\n emission_data_batch[j, i, ...])\n\n return emission_data_batch, correction_batch, contamination_batch, adjoint_ones_batch"
}
] | import argparse
import json
import utils
import parallelproj
import array_api_compat.torch as torch
import array_api_compat.numpy as np
import pymirc.viewer as pv
from layers import EMUpdateModule
from models import Unet3D, SimpleOSEMVarNet, PostReconNet
from data import load_brain_image_batch, simulate_data_batch
from pathlib import Path | 3,351 | """minimal script that evaluates trained OSEM varnets
"""
from __future__ import annotations
parser = argparse.ArgumentParser(description='OSEM-VARNet evaluation')
parser.add_argument('--run_dir')
parser.add_argument('--sens', type=float, default=1)
args = parser.parse_args()
run_dir = Path(args.run_dir)
sens = args.sens
with open(run_dir / 'input_cfg.json', 'r') as f:
cfg = json.load(f)
num_datasets = cfg['num_datasets']
num_training = cfg['num_training']
num_validation = cfg['num_validation']
num_subsets = cfg['num_subsets']
depth = cfg['depth']
num_epochs = cfg['num_epochs']
num_epochs_post = cfg['num_epochs_post']
batch_size = cfg['batch_size']
num_features = cfg['num_features']
num_rings = cfg['num_rings']
radial_trim = cfg['radial_trim']
random_seed = cfg['random_seed']
voxel_size = tuple(cfg['voxel_size'])
if 'fusion_mode' in cfg:
fusion_mode = cfg['fusion_mode']
else:
fusion_mode = 'simple'
# device variable (cpu or cuda) that determines whether calculations
# are performed on the cpu or cuda gpu
if parallelproj.cuda_present:
dev = 'cuda'
else:
dev = 'cpu'
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- setup the scanner / LOR geometry ---------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# setup a line of response descriptor that describes the LOR start / endpoints of
# a "narrow" clinical PET scanner with 9 rings
lor_descriptor = utils.DemoPETScannerLORDescriptor(torch,
dev,
num_rings=num_rings,
radial_trim=radial_trim)
axial_fov_mm = float(lor_descriptor.scanner.num_rings *
(lor_descriptor.scanner.ring_positions[1] -
lor_descriptor.scanner.ring_positions[0]))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- load the brainweb images -----------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# image properties
dataset_ids = tuple(
[i for i in range(num_training, num_training + num_validation)])
val_loss_post = {}
val_loss = {}
for i, dataset_id in enumerate(dataset_ids):
| """minimal script that evaluates trained OSEM varnets
"""
from __future__ import annotations
parser = argparse.ArgumentParser(description='OSEM-VARNet evaluation')
parser.add_argument('--run_dir')
parser.add_argument('--sens', type=float, default=1)
args = parser.parse_args()
run_dir = Path(args.run_dir)
sens = args.sens
with open(run_dir / 'input_cfg.json', 'r') as f:
cfg = json.load(f)
num_datasets = cfg['num_datasets']
num_training = cfg['num_training']
num_validation = cfg['num_validation']
num_subsets = cfg['num_subsets']
depth = cfg['depth']
num_epochs = cfg['num_epochs']
num_epochs_post = cfg['num_epochs_post']
batch_size = cfg['batch_size']
num_features = cfg['num_features']
num_rings = cfg['num_rings']
radial_trim = cfg['radial_trim']
random_seed = cfg['random_seed']
voxel_size = tuple(cfg['voxel_size'])
if 'fusion_mode' in cfg:
fusion_mode = cfg['fusion_mode']
else:
fusion_mode = 'simple'
# device variable (cpu or cuda) that determines whether calculations
# are performed on the cpu or cuda gpu
if parallelproj.cuda_present:
dev = 'cuda'
else:
dev = 'cpu'
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- setup the scanner / LOR geometry ---------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# setup a line of response descriptor that describes the LOR start / endpoints of
# a "narrow" clinical PET scanner with 9 rings
lor_descriptor = utils.DemoPETScannerLORDescriptor(torch,
dev,
num_rings=num_rings,
radial_trim=radial_trim)
axial_fov_mm = float(lor_descriptor.scanner.num_rings *
(lor_descriptor.scanner.ring_positions[1] -
lor_descriptor.scanner.ring_positions[0]))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- load the brainweb images -----------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# image properties
dataset_ids = tuple(
[i for i in range(num_training, num_training + num_validation)])
val_loss_post = {}
val_loss = {}
for i, dataset_id in enumerate(dataset_ids): | emission_image_database, attenuation_image_database = load_brain_image_batch( | 4 | 2023-10-16 07:18:26+00:00 | 4k |
ZiaWang/jqtrade | jqtrade/account/api.py | [
{
"identifier": "InvalidParam",
"path": "jqtrade/common/exceptions.py",
"snippet": "class InvalidParam(UserError):\n \"\"\" 用户参数错误 \"\"\"\n pass"
},
{
"identifier": "sys_logger",
"path": "jqtrade/common/log.py",
"snippet": "class SystemLogFormatter(logging.Formatter):\n class ContextFilter(logging.Filter):\n def formatTime(self, record, datefmt=None):\ndef setup_logger(level=\"INFO\"):\ndef setup_file_logger(file, level=\"INFO\"):\ndef set_log_context(context):\n def filter(self, record):"
},
{
"identifier": "Context",
"path": "jqtrade/scheduler/context.py",
"snippet": "class Context(object):\n \"\"\"\n Usage:\n 上下文对象,方便各对象之间调用\n \"\"\"\n\n _instance = None\n\n def __init__(self, task_name, event_bus, loop, scheduler, loader, debug, config, out, start=None, end=None):\n self._task_name = task_name\n self._event_bus = event_bus\n self._event_loop = loop\n self._scheduler = scheduler\n self._loader = loader\n self._debug = debug\n self._config = config\n self._out = out\n\n self._start = start or datetime.datetime.now()\n self._end = end\n\n self._account = None\n self._trade_gate = None\n self._portfolio = None\n self._strategy = None\n\n self._use_account = None\n\n self.__class__._instance = self\n\n @property\n def event_bus(self):\n return self._event_bus\n\n @property\n def event_loop(self):\n return self._event_loop\n\n @property\n def scheduler(self):\n return self._scheduler\n\n @property\n def loader(self):\n return self._loader\n\n @property\n def debug(self):\n return self._debug\n\n @property\n def loop(self):\n return self._event_loop\n\n @classmethod\n def get_instance(cls):\n if not cls._instance:\n raise InternalError(\"Context not initialized\")\n return cls._instance\n\n @property\n def start(self):\n return self._start\n\n @property\n def end(self):\n return self._end\n\n @property\n def current_dt(self):\n \"\"\" 当前真实无力时间 \"\"\"\n return self._event_loop.current_dt\n\n @property\n def strategy_dt(self):\n \"\"\" 策略中当前逻辑时间,每次处理某个事件时更新,用于方便了解处理到哪个事件了 \"\"\"\n return self._event_loop.strategy_dt\n\n @property\n def account(self):\n if not self._use_account:\n raise InvalidCall(\"检测到use_account=False,程序未加载账户组件,无法调用账户模块相关API,\"\n \"请在set_options中设置use_account=True后再试\")\n return self._account\n\n @account.setter\n def account(self, acc):\n self._account = acc\n\n @property\n def trade_gate(self):\n if not self._use_account:\n raise InvalidCall(\"检测到use_account=False,程序未加载账户组件,无法调用账户模块相关API,\"\n \"请在set_options中设置use_account=True后再试\")\n\n return self._trade_gate\n\n @trade_gate.setter\n def trade_gate(self, gate):\n self._trade_gate = gate\n\n @property\n def portfolio(self):\n if not self._use_account:\n raise InvalidCall(\"检测到use_account=False,程序未加载账户组件,无法调用账户模块相关API,\"\n \"请在set_options中设置use_account=True后再试\")\n\n return self._portfolio\n\n @portfolio.setter\n def portfolio(self, p):\n self._portfolio = p\n\n @property\n def task_name(self):\n return self._task_name\n\n @property\n def strategy(self):\n return self._strategy\n\n @strategy.setter\n def strategy(self, s):\n self._strategy = s\n\n @property\n def config(self):\n return self._config\n\n @property\n def use_account(self):\n return self._use_account\n\n @use_account.setter\n def use_account(self, val):\n self._use_account = val\n\n @property\n def out(self):\n return self._out"
},
{
"identifier": "OrderSide",
"path": "jqtrade/account/order.py",
"snippet": "class OrderSide(Enum):\n # 多仓\n long = \"long\"\n\n # 空仓\n short = \"short\"\n\n @classmethod\n def is_valid_side(cls, side):\n return side in cls.__members__\n\n @classmethod\n def get_side(cls, side):\n if isinstance(side, cls):\n return side\n\n try:\n return cls.__members__[side]\n except KeyError:\n raise ValueError(f\"invalid side: {side}\")"
},
{
"identifier": "OrderStatus",
"path": "jqtrade/account/order.py",
"snippet": "class OrderStatus(Enum):\n # 用户提交委托订单,尚未收到柜台委托确认\n new = \"new\"\n\n # 订单收到委托确认\n open = \"open\"\n\n # 订单部分成交\n filling = \"filling\"\n\n # 订单全部成交\n filled = \"filled\"\n\n # 订单撤销中\n canceling = \"canceling\"\n\n # 部成部撤\n partly_canceled = \"partly_canceled\"\n\n # 订单已撤销\n canceled = \"canceled\"\n\n # 订单被废单\n rejected = \"rejected\"\n\n @classmethod\n def is_valid_status(cls, status):\n return status in cls.__members__\n\n @classmethod\n def finished_status(cls):\n return cls.filled, cls.partly_canceled, cls.canceled, cls.rejected\n\n @classmethod\n def get_status(cls, status):\n if isinstance(status, cls):\n return status\n\n try:\n return cls.__members__[status]\n except KeyError:\n raise ValueError(f\"bad status: {status}\")"
},
{
"identifier": "OrderStyle",
"path": "jqtrade/account/order.py",
"snippet": "class OrderStyle(object):\n def __init__(self, price):\n self._price = price\n\n @property\n def price(self):\n return self._price\n\n @classmethod\n def is_valid_style(cls, style):\n return isinstance(style, (MarketOrderStyle, LimitOrderStyle))\n\n @classmethod\n def get_style(cls, style, price):\n if isinstance(style, cls):\n return style\n\n if style not in (\"market\", \"limit\"):\n raise ValueError(f\"invalid style: {style}\")\n\n if style == \"market\":\n return MarketOrderStyle(price)\n else:\n return LimitOrderStyle(price)\n\n def __str__(self):\n return f\"{self.__class__.__name__}(price={self._price})\""
},
{
"identifier": "MarketOrderStyle",
"path": "jqtrade/account/order.py",
"snippet": "class MarketOrderStyle(OrderStyle):\n def __init__(self, price=0):\n super(MarketOrderStyle, self).__init__(price)\n\n @property\n def value(self):\n return \"market\""
},
{
"identifier": "LimitOrderStyle",
"path": "jqtrade/account/order.py",
"snippet": "class LimitOrderStyle(OrderStyle):\n @property\n def value(self):\n return \"limit\""
},
{
"identifier": "Position",
"path": "jqtrade/account/position.py",
"snippet": "class Position(AbsPosition):\n def __init__(self, code, amount, available_amount, avg_cost, side, **kwargs):\n\n # 持仓标的代码\n self._code = code\n\n # 持仓数量\n self._amount = amount\n\n # 可用数量\n self._available_amount = available_amount\n\n # 持仓成本\n self._avg_cost = avg_cost\n\n # 持仓方向\n self._side = side\n\n # 标的最新价格\n self._last_price = kwargs.get(\"last_price\", None)\n\n # 持仓市值\n self._position_value = kwargs.get(\"position_value\", None)\n\n def on_order_created(self, order):\n # 订单平仓时,调整持仓可用数量,避免用户下超,方便用户查询到当前可用数量,等sync_balance同步回有些延迟\n if order.action == OrderAction.close:\n self._available_amount = max(self._available_amount - order.amount, 0)\n\n def on_order_rejected(self, order):\n # 平仓单被拒绝时,不尝试调整可用数量,因为同步订单之前会同步持仓,这里处理可用数量可能会将用户刚下的同标的单子冻结数量释放掉\n pass\n\n def on_deal(self, price, amount):\n # 全量同步,do nothing\n pass\n\n @property\n def code(self):\n return self._code\n\n @property\n def amount(self):\n return self._amount\n\n @property\n def available_amount(self):\n return self._available_amount\n\n @property\n def locked_amount(self):\n return self._amount - self._available_amount\n\n @property\n def avg_cost(self):\n return self._avg_cost\n\n @property\n def side(self):\n return self._side\n\n @property\n def last_price(self):\n return self._last_price\n\n @property\n def position_value(self):\n return self._position_value"
}
] | from ..common.exceptions import InvalidParam
from ..common.log import sys_logger
from ..scheduler.context import Context
from .order import OrderSide, OrderStatus, OrderStyle, MarketOrderStyle, LimitOrderStyle
from .position import Position | 2,969 | # -*- coding: utf-8 -*-
logger = sys_logger.getChild("account.api")
def _check_code(code):
if not (code[-4:] in ("XSHE", "XSHG") and code[:-5].isdigit()):
raise InvalidParam(f"标的代码错误: {code}")
def _check_amount(amount):
if not isinstance(amount, int) or amount == 0:
raise InvalidParam(f"委托数量错误,只能是非零整数:{amount}")
def _check_style(style):
if not OrderStyle.is_valid_style(style):
raise InvalidParam(f"style参数错误,只能是MarketOrderStyle, LimitOrderStyle类型的实例: {style}")
def _check_side(side):
if not OrderSide.is_valid_side(side):
raise InvalidParam(f"side参数错误,只能是{list(OrderSide.__members__)}中的一种")
def _check_status(status):
if not OrderStatus.is_valid_status(status):
raise InvalidParam(f"status参数错误,只能是{list(OrderStatus.__members__)}中的一种")
def order(code, amount, style=None, side='long'):
""" 下单
Args:
code: 标的代码字符串,暂只支持上交所和深交所标的下单
上交所示例:600000.XSHG
深交所示例:000001.XSHE
amount: 委托数量,正数代表买入、负数代表卖出
style: 下单类型,支持MarketOrderStyle(市价单)、LimitOrderStyle(限价单)
side: 买卖方向,做多:'long',做空:'short'
Return:
返回内部委托id字符串
"""
_check_code(code)
_check_amount(amount)
if style:
_check_style(style)
else:
style = MarketOrderStyle(0)
if side:
_check_side(side)
else:
side = "long"
side = OrderSide.get_side(side)
| # -*- coding: utf-8 -*-
logger = sys_logger.getChild("account.api")
def _check_code(code):
if not (code[-4:] in ("XSHE", "XSHG") and code[:-5].isdigit()):
raise InvalidParam(f"标的代码错误: {code}")
def _check_amount(amount):
if not isinstance(amount, int) or amount == 0:
raise InvalidParam(f"委托数量错误,只能是非零整数:{amount}")
def _check_style(style):
if not OrderStyle.is_valid_style(style):
raise InvalidParam(f"style参数错误,只能是MarketOrderStyle, LimitOrderStyle类型的实例: {style}")
def _check_side(side):
if not OrderSide.is_valid_side(side):
raise InvalidParam(f"side参数错误,只能是{list(OrderSide.__members__)}中的一种")
def _check_status(status):
if not OrderStatus.is_valid_status(status):
raise InvalidParam(f"status参数错误,只能是{list(OrderStatus.__members__)}中的一种")
def order(code, amount, style=None, side='long'):
""" 下单
Args:
code: 标的代码字符串,暂只支持上交所和深交所标的下单
上交所示例:600000.XSHG
深交所示例:000001.XSHE
amount: 委托数量,正数代表买入、负数代表卖出
style: 下单类型,支持MarketOrderStyle(市价单)、LimitOrderStyle(限价单)
side: 买卖方向,做多:'long',做空:'short'
Return:
返回内部委托id字符串
"""
_check_code(code)
_check_amount(amount)
if style:
_check_style(style)
else:
style = MarketOrderStyle(0)
if side:
_check_side(side)
else:
side = "long"
side = OrderSide.get_side(side)
| ctx = Context.get_instance() | 2 | 2023-10-24 01:34:27+00:00 | 4k |
Glasgow-AI4BioMed/GenKIE | data/pretrain_data/unify_dataset.py | [
{
"identifier": "data_utils",
"path": "data/data_utils.py",
"snippet": "def infer_language_pair(path):\ndef collate_tokens(\n values,\n pad_idx,\n eos_idx=None,\n left_pad=False,\n move_eos_to_beginning=False,\n pad_to_length=None,\n pad_to_multiple=1,\n pad_to_bsz=None,\n):\n def copy_tensor(src, dst):\ndef load_indexed_dataset(\n path, dictionary=None, dataset_impl=None, combine=False, default=\"cached\"\n):\ndef numpy_seed(seed, *addl_seeds):\ndef collect_filtered(function, iterable, filtered):\ndef _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):\n def compare_leq(a, b):\n def check_size(idx):\ndef filter_by_size(indices, dataset, max_positions, raise_exception=False):\ndef filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):\ndef batch_by_size(\n indices,\n num_tokens_fn,\n num_tokens_vec=None,\n max_tokens=None,\n max_sentences=None,\n required_batch_size_multiple=1,\n fixed_shapes=None,\n):\ndef post_process(sentence: str, symbol: str):\ndef compute_mask_indices(\n shape: Tuple[int, int],\n padding_mask: Optional[torch.Tensor],\n mask_prob: float,\n mask_length: int,\n mask_type: str = \"static\",\n mask_other: float = 0.0,\n min_masks: int = 0,\n no_overlap: bool = False,\n min_space: int = 0,\n) -> np.ndarray:\n def arrange(s, e, length, keep_length):\ndef get_mem_usage():\ndef lengths_to_padding_mask(lens):\ndef lengths_to_mask(lens):\ndef get_buckets(sizes, num_buckets):\ndef get_bucketed_sizes(orig_sizes, buckets):\ndef _find_extra_valid_paths(dataset_path: str) -> set:\ndef raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:"
},
{
"identifier": "OFADataset",
"path": "data/ofa_dataset.py",
"snippet": "class OFADataset(FairseqDataset):\n def __init__(self, split, dataset, bpe, src_dict, tgt_dict):\n self.split = split\n self.dataset = dataset\n self.bpe = bpe\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n self.bos = src_dict.bos()\n self.eos = src_dict.eos()\n self.pad = src_dict.pad()\n self.bos_item = torch.LongTensor([self.bos])\n self.eos_item = torch.LongTensor([self.eos])\n\n def __len__(self):\n return len(self.dataset)\n\n def encode_text(self, text, length=None, append_bos=False, append_eos=False, use_bpe=True):\n s = self.tgt_dict.encode_line(\n line=self.bpe.encode(text) if use_bpe else text,\n add_if_not_exist=False,\n append_eos=False\n ).long()\n if length is not None:\n s = s[:length]\n if append_bos:\n s = torch.cat([self.bos_item, s])\n if append_eos:\n s = torch.cat([s, self.eos_item])\n return s\n\n def pre_question(self, question, max_ques_words=None):\n question = question.lower().lstrip(\",.!?*#:;~\").replace('-', ' ').replace('/', ' ')\n\n question = re.sub(\n r\"\\s{2,}\",\n ' ',\n question,\n )\n question = question.rstrip('\\n')\n question = question.strip(' ')\n\n # truncate question\n question_words = question.split(' ')\n if max_ques_words is not None and len(question_words) > max_ques_words:\n question = ' '.join(question_words[:max_ques_words])\n\n return question\n\n def pre_caption(self, caption, max_words=None):\n caption = caption.lower().lstrip(\",.!?*#:;~\").replace('-', ' ').replace('/', ' ').replace('<person>', 'person')\n\n caption = re.sub(\n r\"\\s{2,}\",\n ' ',\n caption,\n )\n caption = caption.rstrip('\\n')\n caption = caption.strip(' ')\n\n # truncate caption\n caption_words = caption.split(' ')\n if max_words is not None and len(caption_words) > max_words:\n caption = ' '.join(caption_words[:max_words])\n\n return caption"
},
{
"identifier": "RandomAugment",
"path": "utils/vision_helper.py",
"snippet": "class RandomAugment(object):\n\n def __init__(self, N=2, M=10, isPIL=False, augs=[]):\n self.N = N\n self.M = M\n self.isPIL = isPIL\n if augs:\n self.augs = augs\n else:\n self.augs = list(arg_dict.keys())\n\n def get_random_ops(self):\n sampled_ops = np.random.choice(self.augs, self.N)\n return [(op, 0.5, self.M) for op in sampled_ops]\n\n def __call__(self, img):\n if self.isPIL:\n img = np.array(img)\n ops = self.get_random_ops()\n for name, prob, level in ops:\n if np.random.random() > prob:\n continue\n args = arg_dict[name](level)\n img = func_dict[name](img, *args)\n return img"
}
] | from io import BytesIO
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
from utils.vision_helper import RandomAugment
import math
import logging
import random
import warnings
import numpy as np
import torch
import base64
import utils.transforms as T | 2,902 | batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"code_masks": code_masks,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"conf": conf
}
return batch
class UnifyDataset(OFADataset):
def __init__(
self,
split,
dataset,
bpe,
src_dict,
tgt_dict=None,
max_src_length=128,
max_tgt_length=30,
seed=7,
code_dict_size=8192,
num_bins=1000,
patch_image_size=384,
code_image_size=128,
pure_text_dataset=None,
pure_image_dataset=None,
detection_dataset=None,
all_object_list=None,
all_caption_list=None,
type2ans_dict=None,
ans2type_dict=None,
max_image_size=512,
mask_ratio=0.3,
random_ratio=0.0,
keep_ratio=0.0,
mask_length="span-poisson",
poisson_lambda=3.0,
replace_length=1
):
super().__init__(split, dataset, bpe, src_dict, tgt_dict)
self.max_src_length = max_src_length
self.max_tgt_length = max_tgt_length
self.seed = seed
self.code_dict_size = code_dict_size
self.num_bins = num_bins
self.patch_image_size = patch_image_size
self.code_image_size = code_image_size
self.pure_text_dataset = pure_text_dataset
self.pure_image_dataset = pure_image_dataset
self.detection_dataset = detection_dataset
self.epoch = 0
self.all_object_list = all_object_list
self.all_caption_list = all_caption_list
self.type2ans_dict = type2ans_dict
self.ans2type_dict = ans2type_dict
self.mask_ratio = mask_ratio
self.random_ratio = random_ratio
self.keep_ratio = keep_ratio
self.mask_length = mask_length
self.poisson_lambda = poisson_lambda
self.replace_length = replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f"invalid arg: replace_length={self.replace_length}")
if self.mask_length not in ["subword", "word", "span-poisson"]:
raise ValueError(f"invalid arg: mask-length={self.mask_length}")
if self.mask_length == "subword" and self.replace_length not in [0, 1]:
raise ValueError(f"if using subwords, use replace-length=1 or 0")
self.mask_idx = src_dict.index("<mask>")
self.mask_whole_word = (
get_whole_word_mask(self.bpe, self.src_dict)
if self.mask_length != "subword"
else None
)
self.mask_span_distribution = None
if self.mask_length == "span-poisson":
_lambda = self.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.pos_tgt_item = self.encode_text(" yes")
self.neg_tgt_item = self.encode_text(" no")
self.mask_left = self.mask_top = int(0.5 * self.code_image_size)
self.mask_right = self.mask_bottom = int(1.5 * self.code_image_size)
self.mask_ids = [
i*self.code_image_size*2+j
for i in range(self.code_image_size*2) for j in range(self.code_image_size*2)
if not (self.mask_left <= i < self.mask_right and self.mask_top <= j < self.mask_bottom)
]
scales = np.arange(patch_image_size, 481).tolist()
# for image-text pair
self.patch_resize_transform = transforms.Compose([
T.RandomResize(scales, max_size=672),
transforms.CenterCrop(patch_image_size),
| # Copyright 2022 The OFA-Sys Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
ImageFile.LOAD_TRUNCATED_IMAGES = True
ImageFile.MAX_IMAGE_PIXELS = None
Image.MAX_IMAGE_PIXELS = None
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
def get_whole_word_mask(bpe, dictionary):
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith("madeupword"):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(dictionary))))
)
return mask_whole_words
return None
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
code_masks = None
if samples[0].get("code_mask", None) is not None:
code_masks = torch.cat([sample['code_mask'] for sample in samples])
conf = torch.cat([s['conf'] for s in samples], dim=0)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor([s["target"].ne(pad_idx).long().sum() for s in samples])
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"code_masks": code_masks,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"conf": conf
}
return batch
class UnifyDataset(OFADataset):
def __init__(
self,
split,
dataset,
bpe,
src_dict,
tgt_dict=None,
max_src_length=128,
max_tgt_length=30,
seed=7,
code_dict_size=8192,
num_bins=1000,
patch_image_size=384,
code_image_size=128,
pure_text_dataset=None,
pure_image_dataset=None,
detection_dataset=None,
all_object_list=None,
all_caption_list=None,
type2ans_dict=None,
ans2type_dict=None,
max_image_size=512,
mask_ratio=0.3,
random_ratio=0.0,
keep_ratio=0.0,
mask_length="span-poisson",
poisson_lambda=3.0,
replace_length=1
):
super().__init__(split, dataset, bpe, src_dict, tgt_dict)
self.max_src_length = max_src_length
self.max_tgt_length = max_tgt_length
self.seed = seed
self.code_dict_size = code_dict_size
self.num_bins = num_bins
self.patch_image_size = patch_image_size
self.code_image_size = code_image_size
self.pure_text_dataset = pure_text_dataset
self.pure_image_dataset = pure_image_dataset
self.detection_dataset = detection_dataset
self.epoch = 0
self.all_object_list = all_object_list
self.all_caption_list = all_caption_list
self.type2ans_dict = type2ans_dict
self.ans2type_dict = ans2type_dict
self.mask_ratio = mask_ratio
self.random_ratio = random_ratio
self.keep_ratio = keep_ratio
self.mask_length = mask_length
self.poisson_lambda = poisson_lambda
self.replace_length = replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f"invalid arg: replace_length={self.replace_length}")
if self.mask_length not in ["subword", "word", "span-poisson"]:
raise ValueError(f"invalid arg: mask-length={self.mask_length}")
if self.mask_length == "subword" and self.replace_length not in [0, 1]:
raise ValueError(f"if using subwords, use replace-length=1 or 0")
self.mask_idx = src_dict.index("<mask>")
self.mask_whole_word = (
get_whole_word_mask(self.bpe, self.src_dict)
if self.mask_length != "subword"
else None
)
self.mask_span_distribution = None
if self.mask_length == "span-poisson":
_lambda = self.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.pos_tgt_item = self.encode_text(" yes")
self.neg_tgt_item = self.encode_text(" no")
self.mask_left = self.mask_top = int(0.5 * self.code_image_size)
self.mask_right = self.mask_bottom = int(1.5 * self.code_image_size)
self.mask_ids = [
i*self.code_image_size*2+j
for i in range(self.code_image_size*2) for j in range(self.code_image_size*2)
if not (self.mask_left <= i < self.mask_right and self.mask_top <= j < self.mask_bottom)
]
scales = np.arange(patch_image_size, 481).tolist()
# for image-text pair
self.patch_resize_transform = transforms.Compose([
T.RandomResize(scales, max_size=672),
transforms.CenterCrop(patch_image_size), | RandomAugment(2, 7, isPIL=True, augs=['Identity', 'AutoContrast', 'Equalize', 'Brightness', 'Sharpness', | 2 | 2023-10-20 20:01:42+00:00 | 4k |
ArnaudParant/sel | tests/test_parser_n_formator.py | [
{
"identifier": "query_string_parser",
"path": "sel/query_string_parser.py",
"snippet": "AGGREG_TYPES = [\"aggreg\", \"histogram\", \"count\", \"distinct\", \"min\", \"max\", \"sum\", \"average\", \"stats\"]\nAGGREG_PARAMETER_MAPPING = {\n \"subaggreg\": None,\n \"interval\": None,\n \"size\": int,\n \"under\": None,\n \"where\": None,\n}\ndef split_if_contains(keywords, name):\ndef syntaxerror_parser(parser, text, pos=None, name=None, expected_keywords=None):\n def __init__(self, context):\n def parse(self, parser, text, pos=None):\n def __init__(self, name):\n def parse(self, parser, text, pos=None):\ndef unexpect_manager(input_string, remaining):\ndef parse(input_string, grammar=Query):\nclass SyntaxErrorChecker(str):\nclass Error(str):\nclass Value(str):\nclass Values(str):\nclass Comparator(str):\nclass NumericalComparator(str):\nclass InComparator(str):\nclass RangeComparator(str):\nclass Operator(str):\nclass Order(str):\nclass Name(str):\nclass Integer(str):\nclass FieldPath(str):\nclass QueryString(str):\nclass RangeValue(str):\nclass Filter(str):\nclass RangeFilter(str):\nclass Not(str):\nclass Context(str):\nclass Group(List):\nclass NoBracketGroup(List):\nclass QueryElement(List):\nclass AggregType(str):\nclass BracketAggreg(str):\nclass SubAggreg(str):\nclass AggregParameter(str):\nclass Aggreg(str):\nclass SortParameter(str):\nclass Sort(str):\nclass Query(List):"
},
{
"identifier": "Value",
"path": "sel/query_string_parser.py",
"snippet": "class Value(str):\n \"\"\" General value definition \"\"\"\n grammar = [\n re.compile(r'\"\"\"((?!\"\"\").)*\"\"\"'),\n re.compile(r'\"\"((?!\"\").)*\"\"'),\n re.compile(r'\"((?!\").)*\"'),\n re.compile(r\"'''((?!''').)*'''\"),\n re.compile(r\"''((?!'').)*''\"),\n re.compile(r\"'((?!').)*'\"),\n re.compile(r'[\\w\\d\\-\\_\\.\\#\\@/*]+')\n ]"
},
{
"identifier": "QueryString",
"path": "sel/query_string_parser.py",
"snippet": "class QueryString(str):\n \"\"\" Use as shorcut to query content with elastic query_string syntax \"\"\"\n grammar = [\n re.compile(r'\"\"\"((?!\"\"\").)*\"\"\"'),\n re.compile(r'\"\"((?!\"\").)*\"\"'),\n re.compile(r'\"((?!\").)*\"'),\n re.compile(r\"'''((?!''').)*'''\"),\n re.compile(r\"''((?!'').)*''\"),\n re.compile(r\"'((?!').)*'\")\n ]"
},
{
"identifier": "Comparator",
"path": "sel/query_string_parser.py",
"snippet": "class Comparator(str):\n \"\"\" Allow comparator in filters \"\"\"\n grammar = [\n re.compile(r'(!=|!~|>=|>|<=|<|=|~)'),\n (re.compile(\"prefix\", re.IGNORECASE), blank),\n (re.compile(\"nprefix\", re.IGNORECASE), blank),\n (re.compile(\"not\", re.IGNORECASE), blank, re.compile(\"prefix\", re.IGNORECASE), blank)\n ]"
},
{
"identifier": "Not",
"path": "sel/query_string_parser.py",
"snippet": "class Not(str):\n \"\"\" Defined 'not' grammar bellow \"\"\"\n pass"
},
{
"identifier": "RangeFilter",
"path": "sel/query_string_parser.py",
"snippet": "class RangeFilter(str):\n \"\"\" Defined range filter grammar bellow \"\"\"\n pass"
},
{
"identifier": "Filter",
"path": "sel/query_string_parser.py",
"snippet": "class Filter(str):\n \"\"\" Defined filter grammar bellow \"\"\"\n pass"
},
{
"identifier": "Context",
"path": "sel/query_string_parser.py",
"snippet": "class Context(str):\n \"\"\" Defined context grammar bellow \"\"\"\n pass"
},
{
"identifier": "Aggreg",
"path": "sel/query_string_parser.py",
"snippet": "class Aggreg(str):\n \"\"\" Aggregations grammar, parameters can be placed in any order \"\"\"\n grammar = (\n attr(\"aggreg_type\", AggregType),\n blank,\n optional(attr(\"name\", Name)),\n ignore(re.compile(\":\")),\n attr(\"field\", [FieldPath, Error(\"field path for aggregation\")]),\n attr(\"parameters\", maybe_some(blank, AggregParameter)),\n optional(SyntaxErrorChecker(\"aggreg\"))\n )"
},
{
"identifier": "Sort",
"path": "sel/query_string_parser.py",
"snippet": "class Sort(str):\n \"\"\" Sort grammar \"\"\"\n grammar = (\n re.compile(\"sort\", re.IGNORECASE),\n ignore(re.compile(\":\")),\n attr(\"field\", [FieldPath, Error(\"field path for sort query\")]),\n optional(blank, attr(\"order\", Order)),\n attr(\"parameters\", maybe_some(blank, SortParameter)),\n optional(SyntaxErrorChecker(\"sort\"))\n )"
},
{
"identifier": "Group",
"path": "sel/query_string_parser.py",
"snippet": "class Group(List):\n \"\"\" Defined group of filter grammar bellow \"\"\"\n pass"
},
{
"identifier": "NoBracketGroup",
"path": "sel/query_string_parser.py",
"snippet": "class NoBracketGroup(List):\n \"\"\" Group without bracket for main level query part \"\"\"\n pass"
},
{
"identifier": "Query",
"path": "sel/query_string_parser.py",
"snippet": "class Query(List):\n \"\"\" Full query grammar \"\"\"\n grammar = (\n optional(attr(\"query\", NoBracketGroup)),\n optional(attr(\"aggreg\", maybe_some(blank, Aggreg))),\n optional(attr(\"sort\", maybe_some(blank, Sort)))\n )"
},
{
"identifier": "query_object_formator",
"path": "sel/query_object_formator.py",
"snippet": "ALLOW_QUOTES = ['\"\"\"', '\"\"', '\"', \"'''\", \"''\", \"'\"]\nSPECIAL_COMPARATORS = {\n \"nin\": [\"not\", \"in\"],\n \"nrange\": [\"not\", \"range\"],\n \"nprefix\": [\"not\", \"prefix\"],\n}\nREVERT_NUMERICAL_COMPARATORS = {\n \">\": \"<\",\n \">=\": \"<=\",\n \"<\": \">\",\n \"<=\": \">=\",\n}\nSORT_PARAMETER_MAPPING = {\n \"seed\": int,\n \"mode\": None,\n \"under\": None,\n \"where\": None,\n}\nTYPE_FORMAT_MAPPING = {\n Value: format_value,\n QueryString: format_query_string,\n\n Filter: format_filter,\n RangeFilter: format_range_filter,\n Context: format_context,\n QueryElement: format_class_container,\n Not: format_not,\n Group: format_group,\n NoBracketGroup: format_group,\n\n Comparator: format_string,\n Name: format_string,\n FieldPath: format_string,\n\n Aggreg: format_aggreg,\n SubAggreg: format_subaggreg,\n BracketAggreg: format_class_container,\n\n Sort: format_sort,\n\n Query: format_query,\n}\ndef format_string(obj):\ndef format_value(obj):\ndef format_query_string(obj):\ndef format_filter(obj):\ndef format_range_filter(obj):\ndef format_context(obj):\ndef format_class_container(obj):\ndef format_not(obj):\ndef format_group(obj):\n def to_group(operator, items):\ndef to_int(value, name=None):\ndef format_parameters(parameters, mapping):\ndef format_aggreg(obj):\ndef format_subaggreg(obj):\ndef format_sort(obj):\ndef format_query(obj):\ndef formator(obj, name=None):"
}
] | import json
import pytest
import traceback
from sel import query_string_parser
from sel.query_string_parser import (
Value, QueryString, Comparator, Not, RangeFilter, Filter, Context,
Aggreg, Sort, Group, NoBracketGroup, Query
)
from sel import query_object_formator | 1,743 |
class TestParserNFormator:
@pytest.mark.parametrize(["query", "expected"], [
["toto", "toto"],
['"toto tata titi"', "toto tata titi"],
["toto tata titi", None], # Exception, does not match type Value
])
def test_value(self, query, expected):
try:
|
class TestParserNFormator:
@pytest.mark.parametrize(["query", "expected"], [
["toto", "toto"],
['"toto tata titi"', "toto tata titi"],
["toto tata titi", None], # Exception, does not match type Value
])
def test_value(self, query, expected):
try: | res = query_string_parser.parse(query, grammar=Value) | 0 | 2023-10-16 09:03:13+00:00 | 4k |
Qualcomm-AI-research/outlier-free-transformers | quantization/autoquant_utils.py | [
{
"identifier": "FP32Acts",
"path": "quantization/base_quantized_classes.py",
"snippet": "class FP32Acts(nn.Module):\n def forward(self, x):\n return x\n\n def reset_ranges(self):\n pass"
},
{
"identifier": "QuantizedActivation",
"path": "quantization/base_quantized_classes.py",
"snippet": "class QuantizedActivation(QuantizedModule):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.activation_quantizer = QuantizationManager(\n qmethod=self.act_method,\n qparams=self.act_qparams,\n init=self.act_range_method,\n init_params=self.act_range_options,\n )\n\n def quantize_activations(self, x):\n if self._quant_a:\n return self.activation_quantizer(x)\n else:\n return x\n\n def forward(self, x):\n return self.quantize_activations(x)"
},
{
"identifier": "QuantizedModule",
"path": "quantization/base_quantized_classes.py",
"snippet": "class QuantizedModule(nn.Module):\n \"\"\"\n Parent class for a quantized module. It adds the basic functionality of switching the module\n between quantized and full precision mode. It also defines the cached parameters and handles\n the reset of the cache properly.\n \"\"\"\n\n def __init__(\n self,\n *args,\n method: QuantizerBase = AsymmetricUniformQuantizer,\n act_method=None,\n weight_range_method: RangeEstimatorBase = CurrentMinMaxEstimator,\n act_range_method: RangeEstimatorBase = RunningMinMaxEstimator,\n n_bits=8,\n n_bits_act=None,\n per_channel_weights=False,\n percentile=None,\n weight_range_options=None,\n act_range_options=None,\n scale_domain=\"linear\",\n bayesian_bits_kwargs=None,\n prune_method=None,\n prune_kwargs=None,\n **kwargs\n ):\n kwargs.pop(\"act_quant_dict\", None)\n kwargs.pop(\"quant_dict\", None)\n\n super().__init__(*args, **kwargs)\n\n self.method = method\n self.act_method = act_method or method\n self.n_bits = n_bits\n self.n_bits_act = n_bits_act or n_bits\n self.per_channel_weights = per_channel_weights\n self.percentile = percentile\n self.weight_range_method = weight_range_method\n self.weight_range_options = weight_range_options if weight_range_options else {}\n self.act_range_method = act_range_method\n self.act_range_options = act_range_options if act_range_options else {}\n self.scale_domain = scale_domain\n\n self.bayesian_bits_kwargs = bayesian_bits_kwargs or {}\n self.prune_method = prune_method\n self.prune_kwargs = prune_kwargs\n\n self.cached_params = None\n self._caching = True\n\n self.quant_params = None\n self.register_buffer(\"_quant_w\", torch.BoolTensor([False]))\n self.register_buffer(\"_quant_a\", torch.BoolTensor([False]))\n\n self.act_qparams = dict(\n n_bits=self.n_bits_act,\n scale_domain=self.scale_domain,\n act_quant=True,\n **self.bayesian_bits_kwargs\n )\n self.weight_qparams = dict(\n n_bits=self.n_bits,\n scale_domain=self.scale_domain,\n act_quant=False,\n **self.bayesian_bits_kwargs\n )\n\n @property\n def caching(self):\n return self._caching\n\n @caching.setter\n def caching(self, value: bool):\n self._caching = value\n if not value:\n self.cached_params = None\n\n def quantized_weights(self):\n self.cached_params = None\n self._quant_w = torch.BoolTensor([True])\n\n def full_precision_weights(self):\n self.cached_params = None\n self._quant_w = torch.BoolTensor([False])\n\n def quantized_acts(self):\n self._quant_a = torch.BoolTensor([True])\n\n def full_precision_acts(self):\n self._quant_a = torch.BoolTensor([False])\n\n def quantized(self):\n self.quantized_weights()\n self.quantized_acts()\n\n def full_precision(self):\n self.full_precision_weights()\n self.full_precision_acts()\n\n def get_quantizer_status(self):\n return dict(quant_a=self._quant_a.item(), quant_w=self._quant_w.item())\n\n def set_quantizer_status(self, quantizer_status):\n if quantizer_status[\"quant_a\"]:\n self.quantized_acts()\n else:\n self.full_precision_acts()\n\n if quantizer_status[\"quant_w\"]:\n self.quantized_weights()\n else:\n self.full_precision_weights()\n\n def learn_ranges(self):\n self.apply(_set_layer_learn_ranges)\n\n def fix_ranges(self):\n self.apply(_set_layer_fix_ranges)\n\n def estimate_ranges(self):\n self.apply(_set_layer_estimate_ranges)\n\n def estimate_ranges_train(self):\n self.apply(_set_layer_estimate_ranges_train)\n\n def train(self, mode=True):\n super().train(mode)\n if mode:\n self.cached_params = None\n return self\n\n def _apply(self, *args, **kwargs):\n self.cached_params = None\n return super(QuantizedModule, self)._apply(*args, **kwargs)\n\n def extra_repr(self):\n quant_state = \"weight_quant={}, act_quant={}\".format(\n self._quant_w.item(), self._quant_a.item()\n )\n parent_repr = super().extra_repr()\n return \"{},\\n{}\".format(parent_repr, quant_state) if parent_repr else quant_state"
},
{
"identifier": "QuantizationHijacker",
"path": "quantization/hijacker.py",
"snippet": "class QuantizationHijacker(QuantizedModule):\n def __init__(self, *args, activation: nn.Module = None, **kwargs):\n def forward(self, x, offsets=None):\n def get_params(self):\n def quantize_weights(self, weights):\n def get_weight_bias(self):\n def run_forward(self, x, weight, bias, offsets=None):\n def quantize_activations(self, activations):"
},
{
"identifier": "QuantizationManager",
"path": "quantization/quantization_manager.py",
"snippet": "class QuantizationManager(nn.Module):\n \"\"\"Implementation of Quantization and Quantization Range Estimation\n\n Parameters\n ----------\n n_bits: int\n Number of bits for the quantization.\n qmethod: QMethods member (Enum)\n The quantization scheme to use, e.g. symmetric_uniform, asymmetric_uniform,\n qmn_uniform etc.\n init: RangeEstimators member (Enum)\n Initialization method for the grid from\n per_channel: bool\n If true, will use a separate quantization grid for each kernel/channel.\n x_min: float or PyTorch Tensor\n The minimum value which needs to be represented.\n x_max: float or PyTorch Tensor\n The maximum value which needs to be represented.\n \"\"\"\n\n def __init__(\n self,\n qmethod: QuantizerBase = QMethods.symmetric_uniform.cls,\n init: RangeEstimatorBase = RangeEstimators.current_minmax.cls,\n per_channel=False,\n x_min=None,\n x_max=None,\n qparams=None,\n init_params=None,\n ):\n super().__init__()\n self.state = Qstates.estimate_ranges\n self.qmethod = qmethod\n self.init = init\n self.per_channel = per_channel\n self.qparams = qparams if qparams else {}\n self.init_params = init_params if init_params else {}\n self.range_estimator = None\n\n # define quantizer\n self.quantizer = self.qmethod(per_channel=self.per_channel, **qparams)\n self.quantizer.state = self.state\n\n # define range estimation method for quantizer initialisation\n if x_min is not None and x_max is not None:\n self.set_quant_range(x_min, x_max)\n self.fix_ranges()\n else:\n # set up the collector function to set the ranges\n self.range_estimator = self.init(\n per_channel=self.per_channel, quantizer=self.quantizer, **self.init_params\n )\n\n @property\n def n_bits(self):\n return self.quantizer.n_bits\n\n def estimate_ranges(self):\n self.state = Qstates.estimate_ranges\n self.quantizer.state = self.state\n\n def fix_ranges(self):\n if self.quantizer.is_initialized:\n self.state = Qstates.fix_ranges\n self.quantizer.state = self.state\n self.quantizer.fix_ranges()\n else:\n raise QuantizerNotInitializedError()\n\n def learn_ranges(self):\n self.quantizer.make_range_trainable()\n self.state = Qstates.learn_ranges\n self.quantizer.state = self.state\n\n def estimate_ranges_train(self):\n self.state = Qstates.estimate_ranges_train\n self.quantizer.state = self.state\n\n def reset_ranges(self):\n self.range_estimator.reset()\n self.quantizer.reset()\n self.estimate_ranges()\n\n def forward(self, x):\n if self.state == Qstates.estimate_ranges or (\n self.state == Qstates.estimate_ranges_train and self.training\n ):\n # Note this can be per tensor or per channel\n cur_xmin, cur_xmax = self.range_estimator(x)\n self.set_quant_range(cur_xmin, cur_xmax)\n\n return self.quantizer(x)\n\n def set_quant_range(self, x_min, x_max):\n self.quantizer.set_quant_range(x_min, x_max)\n\n def extra_repr(self):\n return \"state={}\".format(self.state.name)"
}
] | import copy
import warnings
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.pooling import _AdaptiveAvgPoolNd, _AvgPoolNd
from quantization.base_quantized_classes import (
FP32Acts,
QuantizedActivation,
QuantizedModule,
)
from quantization.hijacker import QuantizationHijacker, activations_set
from quantization.quantization_manager import QuantizationManager | 3,541 | def run_forward(self, x, weight, bias, offsets=None):
return F.layer_norm(
input=x.contiguous(),
normalized_shape=self.normalized_shape,
weight=weight.contiguous(),
bias=bias.contiguous(),
eps=self.eps,
)
class QuantEmbedding(QuantizationHijacker, nn.Embedding):
def __init__(self, *args, activation=None, **kwargs):
super().__init__(*args, activation=activation, **kwargs)
# NB: We should not (re-)quantize activations of this module, as it is a
# lookup table (=weights), which is already quantized
self.activation_quantizer = FP32Acts()
def run_forward(self, x, weight, bias, offsets=None):
return F.embedding(
input=x.contiguous(),
weight=weight.contiguous(),
padding_idx=self.padding_idx,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
# Modules Map
module_map = {nn.Linear: QuantLinear, nn.LayerNorm: QuantLayerNorm, nn.Embedding: QuantEmbedding}
non_param_modules = (_AdaptiveAvgPoolNd, _AvgPoolNd)
def next_bn(module, i):
return len(module) > i + 1 and isinstance(module[i + 1], (nn.BatchNorm2d, nn.BatchNorm1d))
def get_act(module, i):
# Case 1: conv + act
if len(module) - i > 1 and isinstance(module[i + 1], tuple(activations_set)):
return module[i + 1], i + 1
# Case 2: conv + bn + act
if (
len(module) - i > 2
and next_bn(module, i)
and isinstance(module[i + 2], tuple(activations_set))
):
return module[i + 2], i + 2
# Case 3: conv + bn + X -> return false
# Case 4: conv + X -> return false
return None, None
def get_linear_args(module):
args = dict(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
)
return args
def get_layernorm_args(module):
args = dict(normalized_shape=module.normalized_shape, eps=module.eps)
return args
def get_embedding_args(module):
args = dict(
num_embeddings=module.num_embeddings,
embedding_dim=module.embedding_dim,
padding_idx=module.padding_idx,
max_norm=module.max_norm,
norm_type=module.norm_type,
scale_grad_by_freq=module.scale_grad_by_freq,
sparse=module.sparse,
)
return args
def get_module_args(mod, act):
if isinstance(mod, nn.Linear):
kwargs = get_linear_args(mod)
elif isinstance(mod, nn.LayerNorm):
kwargs = get_layernorm_args(mod)
elif isinstance(mod, nn.Embedding):
kwargs = get_embedding_args(mod)
else:
raise ValueError
kwargs["activation"] = act
return kwargs
def quant_module(module, i, **quant_params):
act, _ = get_act(module, i)
modtype = module_map[type(module[i])]
kwargs = get_module_args(module[i], act)
new_module = modtype(**kwargs, **quant_params)
new_module.weight.data = module[i].weight.data.clone()
if module[i].bias is not None:
new_module.bias.data = module[i].bias.data.clone()
return new_module, i + int(bool(act)) + 1
def quantize_sequential(model, specials=None, tie_activation_quantizers=False, **quant_params):
specials = specials or dict()
i = 0
quant_modules = []
while i < len(model):
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All Rights Reserved.
class QuantLinear(QuantizationHijacker, nn.Linear):
def run_forward(self, x, weight, bias, offsets=None):
return F.linear(x.contiguous(), weight.contiguous(), bias=bias)
class QuantizedActivationWrapper(QuantizedActivation):
"""
Wraps over a layer and quantized the activation.
It also allow for tying the input and output quantizer which is helpful
for layers such Average Pooling
"""
def __init__(
self,
layer,
*args,
tie_activation_quantizers=False,
input_quantizer: QuantizationManager = None,
**kwargs,
):
super().__init__(*args, **kwargs)
self.tie_activation_quantizers = tie_activation_quantizers
if input_quantizer:
assert isinstance(input_quantizer, QuantizationManager)
self.activation_quantizer = input_quantizer
self.layer = layer
def quantize_activations_no_range_update(self, x):
if self._quant_a:
return self.activation_quantizer.quantizer(x)
else:
return x
def forward(self, x):
x = self.layer(x)
if self.tie_activation_quantizers:
# The input activation quantizer is used to quantize the activation
# but without updating the quantization range
return self.quantize_activations_no_range_update(x)
else:
return self.quantize_activations(x)
def extra_repr(self):
return f"tie_activation_quantizers={self.tie_activation_quantizers}"
class QuantLayerNorm(QuantizationHijacker, nn.LayerNorm):
def run_forward(self, x, weight, bias, offsets=None):
return F.layer_norm(
input=x.contiguous(),
normalized_shape=self.normalized_shape,
weight=weight.contiguous(),
bias=bias.contiguous(),
eps=self.eps,
)
class QuantEmbedding(QuantizationHijacker, nn.Embedding):
def __init__(self, *args, activation=None, **kwargs):
super().__init__(*args, activation=activation, **kwargs)
# NB: We should not (re-)quantize activations of this module, as it is a
# lookup table (=weights), which is already quantized
self.activation_quantizer = FP32Acts()
def run_forward(self, x, weight, bias, offsets=None):
return F.embedding(
input=x.contiguous(),
weight=weight.contiguous(),
padding_idx=self.padding_idx,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
# Modules Map
module_map = {nn.Linear: QuantLinear, nn.LayerNorm: QuantLayerNorm, nn.Embedding: QuantEmbedding}
non_param_modules = (_AdaptiveAvgPoolNd, _AvgPoolNd)
def next_bn(module, i):
return len(module) > i + 1 and isinstance(module[i + 1], (nn.BatchNorm2d, nn.BatchNorm1d))
def get_act(module, i):
# Case 1: conv + act
if len(module) - i > 1 and isinstance(module[i + 1], tuple(activations_set)):
return module[i + 1], i + 1
# Case 2: conv + bn + act
if (
len(module) - i > 2
and next_bn(module, i)
and isinstance(module[i + 2], tuple(activations_set))
):
return module[i + 2], i + 2
# Case 3: conv + bn + X -> return false
# Case 4: conv + X -> return false
return None, None
def get_linear_args(module):
args = dict(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
)
return args
def get_layernorm_args(module):
args = dict(normalized_shape=module.normalized_shape, eps=module.eps)
return args
def get_embedding_args(module):
args = dict(
num_embeddings=module.num_embeddings,
embedding_dim=module.embedding_dim,
padding_idx=module.padding_idx,
max_norm=module.max_norm,
norm_type=module.norm_type,
scale_grad_by_freq=module.scale_grad_by_freq,
sparse=module.sparse,
)
return args
def get_module_args(mod, act):
if isinstance(mod, nn.Linear):
kwargs = get_linear_args(mod)
elif isinstance(mod, nn.LayerNorm):
kwargs = get_layernorm_args(mod)
elif isinstance(mod, nn.Embedding):
kwargs = get_embedding_args(mod)
else:
raise ValueError
kwargs["activation"] = act
return kwargs
def quant_module(module, i, **quant_params):
act, _ = get_act(module, i)
modtype = module_map[type(module[i])]
kwargs = get_module_args(module[i], act)
new_module = modtype(**kwargs, **quant_params)
new_module.weight.data = module[i].weight.data.clone()
if module[i].bias is not None:
new_module.bias.data = module[i].bias.data.clone()
return new_module, i + int(bool(act)) + 1
def quantize_sequential(model, specials=None, tie_activation_quantizers=False, **quant_params):
specials = specials or dict()
i = 0
quant_modules = []
while i < len(model): | if isinstance(model[i], QuantizedModule): | 2 | 2023-10-23 15:59:50+00:00 | 4k |
QgZhan/ESVAE | main_snn_ae.py | [
{
"identifier": "aboutCudaDevices",
"path": "utils.py",
"snippet": "class aboutCudaDevices():\r\n def __init__(self):\r\n pass\r\n\r\n def num_devices(self):\r\n \"\"\"Return number of devices connected.\"\"\"\r\n return cuda.Device.count()\r\n\r\n def devices(self):\r\n \"\"\"Get info on all devices connected.\"\"\"\r\n num = cuda.Device.count()\r\n print(\"%d device(s) found:\" % num)\r\n for i in range(num):\r\n print(cuda.Device(i).name(), \"(Id: %d)\" % i)\r\n\r\n def mem_info(self):\r\n \"\"\"Get available and total memory of all devices.\"\"\"\r\n available, total = cuda.mem_get_info()\r\n print(\"Available: %.2f GB\\nTotal: %.2f GB\" % (available / 1e9, total / 1e9))\r\n\r\n def attributes(self, device_id=0):\r\n \"\"\"Get attributes of device with device Id = device_id\"\"\"\r\n return cuda.Device(device_id).get_attributes()\r\n\r\n def info(self):\r\n \"\"\"Class representation as number of devices connected and about them.\"\"\"\r\n num = cuda.Device.count()\r\n string = \"\"\r\n string += (\"%d device(s) found:\\n\" % num)\r\n for i in range(num):\r\n string += (\" %d) %s (Id: %d)\\n\" % ((i + 1), cuda.Device(i).name(), i))\r\n string += (\" Memory: %.2f GB\\n\" % (cuda.Device(i).total_memory() / 1e9))\r\n return string\r"
},
{
"identifier": "AverageMeter",
"path": "utils.py",
"snippet": "class AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r"
},
{
"identifier": "aboutCudaDevices",
"path": "utils.py",
"snippet": "class aboutCudaDevices():\r\n def __init__(self):\r\n pass\r\n\r\n def num_devices(self):\r\n \"\"\"Return number of devices connected.\"\"\"\r\n return cuda.Device.count()\r\n\r\n def devices(self):\r\n \"\"\"Get info on all devices connected.\"\"\"\r\n num = cuda.Device.count()\r\n print(\"%d device(s) found:\" % num)\r\n for i in range(num):\r\n print(cuda.Device(i).name(), \"(Id: %d)\" % i)\r\n\r\n def mem_info(self):\r\n \"\"\"Get available and total memory of all devices.\"\"\"\r\n available, total = cuda.mem_get_info()\r\n print(\"Available: %.2f GB\\nTotal: %.2f GB\" % (available / 1e9, total / 1e9))\r\n\r\n def attributes(self, device_id=0):\r\n \"\"\"Get attributes of device with device Id = device_id\"\"\"\r\n return cuda.Device(device_id).get_attributes()\r\n\r\n def info(self):\r\n \"\"\"Class representation as number of devices connected and about them.\"\"\"\r\n num = cuda.Device.count()\r\n string = \"\"\r\n string += (\"%d device(s) found:\\n\" % num)\r\n for i in range(num):\r\n string += (\" %d) %s (Id: %d)\\n\" % ((i + 1), cuda.Device(i).name(), i))\r\n string += (\" Memory: %.2f GB\\n\" % (cuda.Device(i).total_memory() / 1e9))\r\n return string\r"
},
{
"identifier": "load_dataset_snn",
"path": "datasets/load_dataset_snn.py",
"snippet": "def load_mnist(data_path, batch_size=None, input_size=None, small=False):\r\ndef load_fashionmnist(data_path, batch_size=None, input_size=None, small=False):\r\ndef load_cifar10(data_path, batch_size=None, input_size=None, small=False):\r\ndef load_celebA(data_path, batch_size=None, input_size=None, small=False):\r"
}
] | import os
import os.path
import numpy as np
import logging
import argparse
import pycuda.driver as cuda
import torch
import torchvision
import svae_models.sae as sae
from torch.utils.tensorboard import SummaryWriter
from utils import aboutCudaDevices
from utils import AverageMeter
from utils import aboutCudaDevices
from datasets import load_dataset_snn
| 1,921 |
max_accuracy = 0
min_loss = 1000
def train(network, trainloader, opti, epoch, n_step):
loss_meter = AverageMeter()
network = network.train()
for batch_idx, (real_img, label) in enumerate(trainloader):
opti.zero_grad()
real_img = real_img.to(device)
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_step) # (N, C, H, W, T)
recons, latent = network(spike_input)
loss = network.loss_function(recons, real_img)
loss.backward()
opti.step()
loss_meter.update(loss.detach().cpu().item())
print(f'Train[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}')
if batch_idx == len(trainloader)-1:
os.makedirs(f'checkpoint/{args.name}/imgs/train/', exist_ok=True)
torchvision.utils.save_image((real_img+1)/2, f'checkpoint/{args.name}/imgs/train/epoch{epoch}_input.png')
torchvision.utils.save_image((recons+1)/2, f'checkpoint/{args.name}/imgs/train/epoch{epoch}_recons.png')
writer.add_images('Train/input_img', (real_img+1)/2, epoch)
writer.add_images('Train/recons_img', (recons+1)/2, epoch)
logging.info(f"Train [{epoch}] Loss: {loss_meter.avg}")
writer.add_scalar('Train/loss', loss_meter.avg, epoch)
return loss_meter.avg
def test(network, trainloader, epoch, n_step):
loss_meter = AverageMeter()
network = network.eval()
with torch.no_grad():
for batch_idx, (real_img, label) in enumerate(trainloader):
real_img = real_img.to(device)
#normalized_img = normalized_img.to(device)
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_step) # (N, C, H, W, T)
recons, latent = network(spike_input)
loss = network.loss_function(recons, real_img)
loss_meter.update(loss.detach().cpu().item())
print(f'Test[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}')
if batch_idx == len(trainloader)-1:
os.makedirs(f'checkpoint/{args.name}/imgs/test/', exist_ok=True)
torchvision.utils.save_image((real_img+1)/2, f'checkpoint/{args.name}/imgs/test/epoch{epoch}_input.png')
torchvision.utils.save_image((recons+1)/2, f'checkpoint/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img+1)/2, epoch)
writer.add_images('Test/recons_img', (recons+1)/2, epoch)
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
return loss_meter.avg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('name', type=str)
parser.add_argument('-dataset', type=str, required=True)
parser.add_argument('-batch_size', type=int, default=128)
parser.add_argument('-latent_dim', type=int, default=128)
parser.add_argument('-n_step', type=int, default=16)
parser.add_argument('-checkpoint', action='store', dest='checkpoint', help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int, default=0)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.device is None:
device = torch.device("cuda:0")
else:
device = torch.device(f"cuda:{args.device}")
logging.info("dataset loading...")
if args.dataset == "MNIST":
data_path = os.path.expanduser("/data/zhan/CV_data/mnist")
in_channels = 1
input_size = 32
|
max_accuracy = 0
min_loss = 1000
def train(network, trainloader, opti, epoch, n_step):
loss_meter = AverageMeter()
network = network.train()
for batch_idx, (real_img, label) in enumerate(trainloader):
opti.zero_grad()
real_img = real_img.to(device)
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_step) # (N, C, H, W, T)
recons, latent = network(spike_input)
loss = network.loss_function(recons, real_img)
loss.backward()
opti.step()
loss_meter.update(loss.detach().cpu().item())
print(f'Train[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}')
if batch_idx == len(trainloader)-1:
os.makedirs(f'checkpoint/{args.name}/imgs/train/', exist_ok=True)
torchvision.utils.save_image((real_img+1)/2, f'checkpoint/{args.name}/imgs/train/epoch{epoch}_input.png')
torchvision.utils.save_image((recons+1)/2, f'checkpoint/{args.name}/imgs/train/epoch{epoch}_recons.png')
writer.add_images('Train/input_img', (real_img+1)/2, epoch)
writer.add_images('Train/recons_img', (recons+1)/2, epoch)
logging.info(f"Train [{epoch}] Loss: {loss_meter.avg}")
writer.add_scalar('Train/loss', loss_meter.avg, epoch)
return loss_meter.avg
def test(network, trainloader, epoch, n_step):
loss_meter = AverageMeter()
network = network.eval()
with torch.no_grad():
for batch_idx, (real_img, label) in enumerate(trainloader):
real_img = real_img.to(device)
#normalized_img = normalized_img.to(device)
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_step) # (N, C, H, W, T)
recons, latent = network(spike_input)
loss = network.loss_function(recons, real_img)
loss_meter.update(loss.detach().cpu().item())
print(f'Test[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}')
if batch_idx == len(trainloader)-1:
os.makedirs(f'checkpoint/{args.name}/imgs/test/', exist_ok=True)
torchvision.utils.save_image((real_img+1)/2, f'checkpoint/{args.name}/imgs/test/epoch{epoch}_input.png')
torchvision.utils.save_image((recons+1)/2, f'checkpoint/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img+1)/2, epoch)
writer.add_images('Test/recons_img', (recons+1)/2, epoch)
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
return loss_meter.avg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('name', type=str)
parser.add_argument('-dataset', type=str, required=True)
parser.add_argument('-batch_size', type=int, default=128)
parser.add_argument('-latent_dim', type=int, default=128)
parser.add_argument('-n_step', type=int, default=16)
parser.add_argument('-checkpoint', action='store', dest='checkpoint', help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int, default=0)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.device is None:
device = torch.device("cuda:0")
else:
device = torch.device(f"cuda:{args.device}")
logging.info("dataset loading...")
if args.dataset == "MNIST":
data_path = os.path.expanduser("/data/zhan/CV_data/mnist")
in_channels = 1
input_size = 32
| train_loader, test_loader = load_dataset_snn.load_mnist(data_path, args.batch_size, input_size, True)
| 3 | 2023-10-23 07:33:27+00:00 | 4k |
iesl/softmax_CPR_recommend | run_hyper.py | [
{
"identifier": "HyperTuning",
"path": "recbole/trainer/hyper_tuning.py",
"snippet": "class HyperTuning(object):\n r\"\"\"HyperTuning Class is used to manage the parameter tuning process of recommender system models.\n Given objective funciton, parameters range and optimization algorithm, using HyperTuning can find\n the best result among these parameters\n\n Note:\n HyperTuning is based on the hyperopt (https://github.com/hyperopt/hyperopt)\n\n Thanks to sbrodeur for the exhaustive search code.\n https://github.com/hyperopt/hyperopt/issues/200\n \"\"\"\n\n def __init__(\n self,\n objective_function,\n space=None,\n params_file=None,\n params_dict=None,\n fixed_config_file_list=None,\n algo='exhaustive',\n max_evals=100\n ):\n\n self.best_score = None\n self.best_params = None\n self.best_test_result = None\n self.params2result = {}\n #self.seed = 1\n self.neg_sampling = None\n\n self.objective_function = objective_function\n self.max_evals = max_evals\n self.fixed_config_file_list = fixed_config_file_list\n if space:\n self.space = space\n elif params_file:\n self.space = self._build_space_from_file(params_file)\n elif params_dict:\n self.space = self._build_space_from_dict(params_dict)\n else:\n raise ValueError('at least one of `space`, `params_file` and `params_dict` is provided')\n if isinstance(algo, str):\n if algo == 'exhaustive':\n self.algo = partial(exhaustive_search, nbMaxSucessiveFailures=1000)\n self.max_evals = _spacesize(self.space)\n else:\n raise ValueError('Illegal algo [{}]'.format(algo))\n else:\n self.algo = algo\n\n @staticmethod\n def _build_space_from_file(file):\n from hyperopt import hp\n space = {}\n with open(file, 'r') as fp:\n for line in fp:\n para_list = line.strip().split(' ')\n if len(para_list) < 3:\n continue\n para_name, para_type, para_value = para_list[0], para_list[1], \"\".join(para_list[2:])\n if para_type == 'choice':\n para_value = eval(para_value)\n space[para_name] = hp.choice(para_name, para_value)\n elif para_type == 'uniform':\n low, high = para_value.strip().split(',')\n space[para_name] = hp.uniform(para_name, float(low), float(high))\n elif para_type == 'quniform':\n low, high, q = para_value.strip().split(',')\n space[para_name] = hp.quniform(para_name, float(low), float(high), float(q))\n elif para_type == 'loguniform':\n low, high = para_value.strip().split(',')\n space[para_name] = hp.loguniform(para_name, float(low), float(high))\n else:\n raise ValueError('Illegal param type [{}]'.format(para_type))\n return space\n\n @staticmethod\n def _build_space_from_dict(config_dict):\n from hyperopt import hp\n space = {}\n for para_type in config_dict:\n if para_type == 'choice':\n for para_name in config_dict['choice']:\n para_value = config_dict['choice'][para_name]\n space[para_name] = hp.choice(para_name, para_value)\n elif para_type == 'uniform':\n for para_name in config_dict['uniform']:\n para_value = config_dict['uniform'][para_name]\n low = para_value[0]\n high = para_value[1]\n space[para_name] = hp.uniform(para_name, float(low), float(high))\n elif para_type == 'quniform':\n for para_name in config_dict['quniform']:\n para_value = config_dict['quniform'][para_name]\n low = para_value[0]\n high = para_value[1]\n q = para_value[2]\n space[para_name] = hp.quniform(para_name, float(low), float(high), float(q))\n elif para_type == 'loguniform':\n for para_name in config_dict['loguniform']:\n para_value = config_dict['loguniform'][para_name]\n low = para_value[0]\n high = para_value[1]\n space[para_name] = hp.loguniform(para_name, float(low), float(high))\n else:\n raise ValueError('Illegal param type [{}]'.format(para_type))\n return space\n\n @staticmethod\n def params2str(params):\n r\"\"\" convert dict to str\n\n Args:\n params (dict): parameters dict\n Returns:\n str: parameters string\n \"\"\"\n params_str = ''\n for param_name in params:\n params_str += param_name + ':' + str(params[param_name]) + ', '\n return params_str[:-2]\n\n @staticmethod\n def _print_result(result_dict: dict):\n print('current best valid score: %.4f' % result_dict['best_valid_score'])\n print('current best valid result:')\n print(result_dict['best_valid_result'])\n print('current test result:')\n print(result_dict['test_result'])\n print()\n\n def export_result(self, output_file=None):\n r\"\"\" Write the searched parameters and corresponding results to the file\n\n Args:\n output_file (str): the output file\n\n \"\"\"\n with open(output_file, 'w') as fp:\n for params in self.params2result:\n fp.write(params + '\\n')\n fp.write('Valid result:\\n' + dict2str(self.params2result[params]['best_valid_result']) + '\\n')\n fp.write('Test result:\\n' + dict2str(self.params2result[params]['test_result']) + '\\n\\n')\n\n def trial(self, params):\n r\"\"\"Given a set of parameters, return results and optimization status\n\n Args:\n params (dict): the parameter dictionary\n \"\"\"\n import hyperopt\n config_dict = params.copy()\n \n config_dict['neg_sampling'] = self.neg_sampling #HS hack\n #saved = False #HS hack\n \n params_str = self.params2str(params)\n print('running parameters:', config_dict)\n result_dict = self.objective_function(config_dict, self.fixed_config_file_list)\n #result_dict = self.objective_function(config_dict, self.fixed_config_file_list, saved=saved) #HS hack\n self.params2result[params_str] = result_dict\n score, bigger = result_dict['best_valid_score'], result_dict['valid_score_bigger']\n\n if not self.best_score:\n self.best_score = score\n self.best_params = params\n self._print_result(result_dict)\n else:\n if bigger:\n if score > self.best_score:\n self.best_score = score\n self.best_params = params\n self._print_result(result_dict)\n else:\n if score < self.best_score:\n self.best_score = score\n self.best_params = params\n self._print_result(result_dict)\n\n if bigger:\n score = -score\n return {'loss': score, 'status': hyperopt.STATUS_OK}\n\n def run(self):\n r\"\"\" begin to search the best parameters\n\n \"\"\"\n from hyperopt import fmin\n #fmin(self.trial, self.space, algo=self.algo, max_evals=self.max_evals, rstate=np.random.default_rng(self.seed))\n fmin(self.trial, self.space, algo=self.algo, max_evals=self.max_evals)"
},
{
"identifier": "objective_function",
"path": "recbole/quick_start/quick_start.py",
"snippet": "def objective_function(config_dict=None, config_file_list=None, saved=True):\n r\"\"\" The default objective_function used in HyperTuning\n\n Args:\n config_dict (dict, optional): Parameters dictionary used to modify experiment parameters. Defaults to ``None``.\n config_file_list (list, optional): Config files used to modify experiment parameters. Defaults to ``None``.\n saved (bool, optional): Whether to save the model. Defaults to ``True``.\n \"\"\"\n\n config = Config(config_dict=config_dict, config_file_list=config_file_list)\n init_seed(config['seed'], config['reproducibility'])\n logging.basicConfig(level=logging.ERROR)\n dataset = create_dataset(config)\n train_data, valid_data, test_data = data_preparation(config, dataset)\n model = get_model(config['model'])(config, train_data.dataset).to(config['device'])\n trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)\n best_valid_score, best_valid_result = trainer.fit(train_data, valid_data, verbose=False, saved=saved)\n test_result = trainer.evaluate(test_data, load_best_model=saved)\n\n return {\n 'best_valid_score': best_valid_score,\n 'valid_score_bigger': config['valid_metric_bigger'],\n 'best_valid_result': best_valid_result,\n 'test_result': test_result\n }"
}
] | import argparse
from recbole.trainer import HyperTuning
from recbole.quick_start import objective_function | 2,441 | # -*- coding: utf-8 -*-
# @Time : 2020/7/24 15:57
# @Author : Shanlei Mu
# @Email : [email protected]
# @File : run_hyper.py
# UPDATE:
# @Time : 2020/8/20 21:17, 2020/8/29
# @Author : Zihan Lin, Yupeng Hou
# @Email : [email protected], [email protected]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_files', type=str, default=None, help='fixed config files')
parser.add_argument('--params_file', type=str, default=None, help='parameters file')
parser.add_argument('--hyper_results', type=str, default=None, help='the result file name of hyperparameter search')
args, _ = parser.parse_known_args()
print(args.hyper_results)
#parameter_dict = {'neg_sampling': None}
# plz set algo='exhaustive' to use exhaustive search, in this case, max_evals is auto set
config_file_list = args.config_files.strip().split(' ') if args.config_files else None
| # -*- coding: utf-8 -*-
# @Time : 2020/7/24 15:57
# @Author : Shanlei Mu
# @Email : [email protected]
# @File : run_hyper.py
# UPDATE:
# @Time : 2020/8/20 21:17, 2020/8/29
# @Author : Zihan Lin, Yupeng Hou
# @Email : [email protected], [email protected]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_files', type=str, default=None, help='fixed config files')
parser.add_argument('--params_file', type=str, default=None, help='parameters file')
parser.add_argument('--hyper_results', type=str, default=None, help='the result file name of hyperparameter search')
args, _ = parser.parse_known_args()
print(args.hyper_results)
#parameter_dict = {'neg_sampling': None}
# plz set algo='exhaustive' to use exhaustive search, in this case, max_evals is auto set
config_file_list = args.config_files.strip().split(' ') if args.config_files else None | hp = HyperTuning(objective_function, algo='exhaustive', | 1 | 2023-10-21 16:31:44+00:00 | 4k |
timapage/pyqt6-yolov8 | src/models/tracking/deep_sort/deep_sort.py | [
{
"identifier": "Extractor",
"path": "src/models/tracking/deep_sort/deep/feature_extractor.py",
"snippet": "class Extractor(object):\n def __init__(self, model_path, use_cuda=True):\n self.net = Net(reid=True)\n self.device = \"cuda\" if torch.cuda.is_available() and use_cuda else \"cpu\"\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']\n self.net.load_state_dict(state_dict)\n logger = logging.getLogger(\"root.tracker\")\n logger.info(\"Loading weights from {}... Done!\".format(model_path))\n self.net.to(self.device)\n self.size = (64, 128)\n self.norm = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n \n\n\n def _preprocess(self, im_crops):\n \"\"\"\n TODO:\n 1. to float with scale from 0 to 1\n 2. resize to (64, 128) as Market1501 dataset did\n 3. concatenate to a numpy array\n 3. to torch Tensor\n 4. normalize\n \"\"\"\n def _resize(im, size):\n return cv2.resize(im.astype(np.float32)/255., size)\n\n im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()\n return im_batch\n\n\n def __call__(self, im_crops):\n im_batch = self._preprocess(im_crops)\n with torch.no_grad():\n im_batch = im_batch.to(self.device)\n features = self.net(im_batch)\n return features.cpu().numpy()"
},
{
"identifier": "NearestNeighborDistanceMetric",
"path": "src/models/tracking/deep_sort/sort/nn_matching.py",
"snippet": "class NearestNeighborDistanceMetric(object):\n \"\"\"\n A nearest neighbor distance metric that, for each target, returns\n the closest distance to any sample that has been observed so far.\n\n Parameters\n ----------\n metric : str\n Either \"euclidean\" or \"cosine\".\n matching_threshold: float\n The matching threshold. Samples with larger distance are considered an\n invalid match.\n budget : Optional[int]\n If not None, fix samples per class to at most this number. Removes\n the oldest samples when the budget is reached.\n\n Attributes\n ----------\n samples : Dict[int -> List[ndarray]]\n A dictionary that maps from target identities to the list of samples\n that have been observed so far.\n\n \"\"\"\n\n def __init__(self, metric, matching_threshold, budget=None):\n\n\n if metric == \"euclidean\":\n self._metric = _nn_euclidean_distance\n elif metric == \"cosine\":\n self._metric = _nn_cosine_distance\n else:\n raise ValueError(\n \"Invalid metric; must be either 'euclidean' or 'cosine'\")\n self.matching_threshold = matching_threshold\n self.budget = budget\n self.samples = {}\n\n def partial_fit(self, features, targets, active_targets):\n \"\"\"Update the distance metric with new data.\n\n Parameters\n ----------\n features : ndarray\n An NxM matrix of N features of dimensionality M.\n targets : ndarray\n An integer array of associated target identities.\n active_targets : List[int]\n A list of targets that are currently present in the scene.\n\n \"\"\"\n for feature, target in zip(features, targets):\n self.samples.setdefault(target, []).append(feature)\n if self.budget is not None:\n self.samples[target] = self.samples[target][-self.budget:]\n self.samples = {k: self.samples[k] for k in active_targets}\n\n def distance(self, features, targets):\n \"\"\"Compute distance between features and targets.\n\n Parameters\n ----------\n features : ndarray\n An NxM matrix of N features of dimensionality M.\n targets : List[int]\n A list of targets to match the given `features` against.\n\n Returns\n -------\n ndarray\n Returns a cost matrix of shape len(targets), len(features), where\n element (i, j) contains the closest squared distance between\n `targets[i]` and `features[j]`.\n\n \"\"\"\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix"
},
{
"identifier": "Detection",
"path": "src/models/tracking/deep_sort/sort/detection.py",
"snippet": "class Detection(object):\n\n def __init__(self, tlwh, cls_, confidence, feature, kpt_, seg_):\n self.tlwh = np.asarray(tlwh, dtype=np.float64)\n self.cls_ = cls_\n self.confidence = float(confidence)\n self.feature = np.asarray(feature, dtype=np.float32)\n self.keypoint = kpt_\n self.segmentation = seg_\n\n def to_tlbr(self):\n \"\"\"Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\n `(top left, bottom right)`.\n \"\"\"\n ret = self.tlwh.copy()\n ret[2:] += ret[:2]\n return ret\n\n def to_xyah(self):\n \"\"\"Convert bounding box to format `(center x, center y, aspect ratio,\n height)`, where the aspect ratio is `width / height`.\n \"\"\"\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret"
},
{
"identifier": "Tracker",
"path": "src/models/tracking/deep_sort/sort/tracker.py",
"snippet": "class Tracker:\n\n def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):\n self.metric = metric\n self.max_iou_distance = max_iou_distance\n self.max_age = max_age\n self.n_init = n_init\n\n self.kf = kalman_filter.KalmanFilter()\n self.tracks = []\n self._next_id = 1\n\n def predict(self):\n \"\"\"Propagate track state distributions one time step forward.\n\n This function should be called once every time step, before `update`.\n \"\"\"\n for track in self.tracks:\n track.predict(self.kf)\n\n def update(self, detections):\n \"\"\"Perform measurement update and track management.\n\n Parameters\n ----------\n detections : List[deep_sort.detection.Detection]\n A list of detections at the current time step.\n\n \"\"\"\n # Run matching cascade.\n matches, unmatched_tracks, unmatched_detections = \\\n self._match(detections)\n\n # Update track set.\n for track_idx, detection_idx in matches:\n self.tracks[track_idx].update(\n self.kf, detections[detection_idx])\n for track_idx in unmatched_tracks:\n self.tracks[track_idx].mark_missed()\n for detection_idx in unmatched_detections:\n self._initiate_track(detections[detection_idx])\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\n\n # Update distance metric.\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\n features, targets = [], []\n for track in self.tracks:\n if not track.is_confirmed():\n continue\n features += track.features\n targets += [track.track_id for _ in track.features]\n track.features = []\n self.metric.partial_fit(\n np.asarray(features), np.asarray(targets), active_targets)\n\n def _match(self, detections):\n\n def gated_metric(tracks, dets, track_indices, detection_indices):\n features = np.array([dets[i].feature for i in detection_indices])\n targets = np.array([tracks[i].track_id for i in track_indices])\n cost_matrix = self.metric.distance(features, targets)\n cost_matrix = linear_assignment.gate_cost_matrix(\n self.kf, cost_matrix, tracks, dets, track_indices,\n detection_indices)\n\n return cost_matrix\n\n # Split track set into confirmed and unconfirmed tracks.\n confirmed_tracks = [\n i for i, t in enumerate(self.tracks) if t.is_confirmed()]\n unconfirmed_tracks = [\n i for i, t in enumerate(self.tracks) if not t.is_confirmed()]\n\n # Associate confirmed tracks using appearance features.\n matches_a, unmatched_tracks_a, unmatched_detections = \\\n linear_assignment.matching_cascade(\n gated_metric, self.metric.matching_threshold, self.max_age,\n self.tracks, detections, confirmed_tracks)\n\n # Associate remaining tracks together with unconfirmed tracks using IOU.\n iou_track_candidates = unconfirmed_tracks + [\n k for k in unmatched_tracks_a if\n self.tracks[k].time_since_update == 1]\n unmatched_tracks_a = [\n k for k in unmatched_tracks_a if\n self.tracks[k].time_since_update != 1]\n matches_b, unmatched_tracks_b, unmatched_detections = \\\n linear_assignment.min_cost_matching(\n iou_matching.iou_cost, self.max_iou_distance, self.tracks,\n detections, iou_track_candidates, unmatched_detections)\n matches = matches_a + matches_b\n unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))\n return matches, unmatched_tracks, unmatched_detections\n\n def _initiate_track(self, detection):\n mean, covariance = self.kf.initiate(detection.to_xyah())\n self.tracks.append(Track(\n mean, detection.cls_, detection.confidence, detection.keypoint, detection.segmentation, covariance, self._next_id, self.n_init, self.max_age,\n detection.feature))\n self._next_id += 1"
}
] | import numpy as np
import torch
from .deep.feature_extractor import Extractor
from .sort.nn_matching import NearestNeighborDistanceMetric
from .sort.detection import Detection
from .sort.tracker import Tracker | 2,484 |
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(self, model_path, max_dist=0.2, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
self.extractor = Extractor(model_path, use_cuda=use_cuda)
max_cosine_distance = max_dist
nn_budget = 100
|
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(self, model_path, max_dist=0.2, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
self.extractor = Extractor(model_path, use_cuda=use_cuda)
max_cosine_distance = max_dist
nn_budget = 100 | metric = NearestNeighborDistanceMetric( | 1 | 2023-10-18 09:21:01+00:00 | 4k |
OthersideAI/self-operating-computer | operate/dialog.py | [
{
"identifier": "ModelNotRecognizedException",
"path": "operate/exceptions.py",
"snippet": "class ModelNotRecognizedException(Exception):\n \"\"\"Exception raised for unrecognized models.\n\n Attributes:\n model -- the unrecognized model\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, model, message=\"Model not recognized\"):\n self.model = model\n self.message = message\n super().__init__(self.message)\n\n def __str__(self):\n return f\"{self.message} : {self.model} \""
},
{
"identifier": "USER_QUESTION",
"path": "operate/prompts.py",
"snippet": "USER_QUESTION = \"Hello, I can help you with anything. What would you like done?\""
},
{
"identifier": "Config",
"path": "operate/settings.py",
"snippet": "class Config:\n \"\"\"\n Configuration class for managing settings.\n\n Attributes:\n debug (bool): Flag indicating whether debug mode is enabled.\n openai_api_key (str): API key for OpenAI.\n google_api_key (str): API key for Google.\n monitor_size (dict): Dictionary containing the width and height of the monitor.\n \"\"\"\n\n def __init__(self):\n load_dotenv()\n self.debug = False\n self.openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n self.google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n self.monitor_size = {\n \"width\": 1920,\n \"height\": 1080,\n }\n\n def initialize_openai_client(self):\n \"\"\"\n Initializes and returns an OpenAI client with the configured API key.\n\n Returns:\n OpenAI or None: An instance of the OpenAI client if the API key is provided, else None.\n \"\"\"\n if self.openai_api_key:\n client = OpenAI()\n client.api_key = self.openai_api_key\n client.base_url = os.getenv(\"OPENAI_API_BASE_URL\", client.base_url)\n return client\n return None"
},
{
"identifier": "ANSI_GREEN",
"path": "operate/utils/style.py",
"snippet": "def supports_ansi():\nANSI_GREEN = \"\\033[32m\" if supports_ansi() else \"\" # Standard green text\nANSI_BRIGHT_GREEN = \"\\033[92m\" if supports_ansi() else \"\" # Bright/bold green text\nANSI_RESET = \"\\033[0m\" if supports_ansi() else \"\" # Reset to default text color\nANSI_BLUE = \"\\033[94m\" if supports_ansi() else \"\" # Bright blue\nANSI_YELLOW = \"\\033[33m\" if supports_ansi() else \"\" # Standard yellow text\nANSI_RED = \"\\033[31m\" if supports_ansi() else \"\"\nANSI_BRIGHT_MAGENTA = \"\\033[95m\" if supports_ansi() else \"\" # Bright magenta text"
},
{
"identifier": "keyboard_type",
"path": "operate/utils/os.py",
"snippet": "def keyboard_type(text):\n \"\"\"\n Types the given text using the keyboard.\n\n Args:\n text (str): The text to be typed.\n\n Returns:\n str: A message indicating the typed text.\n \"\"\"\n text = text.replace(\"\\\\n\", \"\\n\")\n for char in text:\n pyautogui.write(char)\n pyautogui.press(\"enter\")\n return \"Type: \" + text"
},
{
"identifier": "search",
"path": "operate/utils/os.py",
"snippet": "def search(text):\n \"\"\"\n Searches for a program or file by typing the given text in the search bar and pressing Enter.\n\n Args:\n text (str): The text to be searched.\n\n Returns:\n str: A message indicating that the program or file has been opened.\n \"\"\"\n if platform.system() == \"Windows\":\n pyautogui.press(\"win\")\n elif platform.system() == \"Linux\":\n pyautogui.press(\"win\")\n else:\n # Press and release Command and Space separately\n pyautogui.keyDown(\"command\")\n pyautogui.press(\"space\")\n pyautogui.keyUp(\"command\")\n\n time.sleep(1)\n\n # Now type the text\n for char in text:\n pyautogui.write(char)\n\n pyautogui.press(\"enter\")\n return \"Open program: \" + text"
},
{
"identifier": "click",
"path": "operate/utils/os.py",
"snippet": "def click(click_detail):\n \"\"\"\n Perform a mouse click at the specified coordinates.\n\n Args:\n click_detail (dict): A dictionary containing the coordinates of the click.\n\n Returns:\n str: The description of the click if successful, otherwise \"We failed to click\".\n \"\"\"\n try:\n x = convert_percent_to_decimal(click_detail[\"x\"])\n y = convert_percent_to_decimal(click_detail[\"y\"])\n\n if click_detail and isinstance(x, float) and isinstance(y, float):\n click_at_percentage(x, y)\n return click_detail[\"description\"]\n else:\n return \"We failed to click\"\n\n except Exception as e:\n print(f\"Error parsing JSON: {e}\")\n return \"We failed to click\""
},
{
"identifier": "get_next_action",
"path": "operate/actions.py",
"snippet": "async def get_next_action(model, messages, objective):\n if model == \"gpt-4\":\n return call_gpt_4_v(messages, objective)\n if model == \"gpt-4-with-som\":\n return await call_gpt_4_v_labeled(messages, objective)\n elif model == \"agent-1\":\n return \"coming soon\"\n elif model == \"gemini-pro-vision\":\n return call_gemini_pro_vision(messages, objective)\n\n raise ModelNotRecognizedException(model)"
},
{
"identifier": "summarize",
"path": "operate/actions.py",
"snippet": "def summarize(model, messages, objective):\n try:\n screenshots_dir = \"screenshots\"\n if not os.path.exists(screenshots_dir):\n os.makedirs(screenshots_dir)\n\n screenshot_filename = os.path.join(screenshots_dir, \"summary_screenshot.png\")\n # Call the function to capture the screen with the cursor\n capture_screen_with_cursor(screenshot_filename)\n\n summary_prompt = format_summary_prompt(objective)\n\n if model == \"gpt-4-vision-preview\":\n with open(screenshot_filename, \"rb\") as img_file:\n img_base64 = base64.b64encode(img_file.read()).decode(\"utf-8\")\n\n summary_message = {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": summary_prompt},\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": f\"data:image/jpeg;base64,{img_base64}\"},\n },\n ],\n }\n \n messages.append(summary_message)\n\n response = client.chat.completions.create(\n model=\"gpt-4-vision-preview\",\n messages=messages,\n max_tokens=500,\n )\n\n content = response.choices[0].message.content\n elif model == \"gemini-pro-vision\":\n model = genai.GenerativeModel(\"gemini-pro-vision\")\n summary_message = model.generate_content(\n [summary_prompt, Image.open(screenshot_filename)]\n )\n content = summary_message.text\n return content\n\n except Exception as e:\n print(f\"Error in summarize: {e}\")\n return \"Failed to summarize the workflow\""
},
{
"identifier": "parse_response",
"path": "operate/utils/misc.py",
"snippet": "def parse_response(response):\n \"\"\"\n Parses the given response and returns a dictionary with the type and data.\n\n Args:\n response (str): The response to parse.\n\n Returns:\n dict: A dictionary with the type and data extracted from the response.\n The dictionary has the following structure:\n {\n \"type\": <response_type>,\n \"data\": <response_data>\n }\n If the response is \"DONE\", the type is \"DONE\" and the data is None.\n If the response starts with \"CLICK\", the type is \"CLICK\" and the data is a JSON object.\n If the response starts with \"TYPE\", the type is \"TYPE\" and the data is the text to type.\n If the response starts with \"SEARCH\", the type is \"SEARCH\" and the data is the search query.\n If the response doesn't match any of the above patterns, the type is \"UNKNOWN\" and the data is the original response.\n \"\"\"\n if response == \"DONE\":\n return {\"type\": \"DONE\", \"data\": None}\n elif response.startswith(\"CLICK\"):\n # Adjust the regex to match the correct format\n click_data = re.search(r\"CLICK \\{ (.+) \\}\", response).group(1)\n click_data_json = json.loads(f\"{{{click_data}}}\")\n return {\"type\": \"CLICK\", \"data\": click_data_json}\n\n elif response.startswith(\"TYPE\"):\n # Extract the text to type\n try:\n type_data = re.search(r\"TYPE (.+)\", response, re.DOTALL).group(1)\n except:\n type_data = re.search(r'TYPE \"(.+)\"', response, re.DOTALL).group(1)\n return {\"type\": \"TYPE\", \"data\": type_data}\n\n elif response.startswith(\"SEARCH\"):\n # Extract the search query\n try:\n search_data = re.search(r'SEARCH \"(.+)\"', response).group(1)\n except:\n search_data = re.search(r\"SEARCH (.+)\", response).group(1)\n return {\"type\": \"SEARCH\", \"data\": search_data}\n\n return {\"type\": \"UNKNOWN\", \"data\": response}"
}
] | import sys
import os
import platform
import asyncio
from prompt_toolkit.shortcuts import message_dialog
from prompt_toolkit import prompt
from operate.exceptions import ModelNotRecognizedException
from operate.prompts import USER_QUESTION
from operate.settings import Config
from operate.utils.style import (
ANSI_GREEN,
ANSI_RESET,
ANSI_BLUE,
ANSI_YELLOW,
ANSI_RED,
ANSI_BRIGHT_MAGENTA,
style,
)
from operate.utils.os import (
keyboard_type,
search,
click,
)
from operate.actions import get_next_action, summarize
from operate.utils.misc import parse_response
from whisper_mic import WhisperMic | 2,962 |
# Load configuration
config = Config()
def main(model, terminal_prompt, voice_mode=False):
"""
Main function for the Self-Operating Computer.
Parameters:
- model: The model used for generating responses.
- terminal_prompt: A string representing the prompt provided in the terminal.
- voice_mode: A boolean indicating whether to enable voice mode.
Returns:
None
"""
mic = None
# Initialize `WhisperMic`, if `voice_mode` is True
validation(model, voice_mode)
if voice_mode:
try:
# Initialize WhisperMic if import is successful
mic = WhisperMic()
except ImportError:
print(
"Voice mode requires the 'whisper_mic' module. Please install it using 'pip install -r requirements-audio.txt'"
)
sys.exit(1)
# Skip message dialog if prompt was given directly
if not terminal_prompt:
message_dialog(
title="Self-Operating Computer",
text="Ask a computer to do anything.",
style=style,
).run()
else:
print("Running direct prompt...")
print("SYSTEM", platform.system())
# Clear the console
if platform.system() == "Windows":
os.system("cls")
else:
print("\033c", end="")
if terminal_prompt: # Skip objective prompt if it was given as an argument
objective = terminal_prompt
elif voice_mode:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RESET} Listening for your command... (speak now)"
)
try:
objective = mic.listen()
except Exception as e:
print(f"{ANSI_RED}Error in capturing voice input: {e}{ANSI_RESET}")
return # Exit if voice input fails
else:
print(f"{ANSI_GREEN}[Self-Operating Computer]\n{ANSI_RESET}{USER_QUESTION}")
print(f"{ANSI_YELLOW}[User]{ANSI_RESET}")
objective = prompt(style=style)
assistant_message = {"role": "assistant", "content": USER_QUESTION}
user_message = {
"role": "user",
"content": f"Objective: {objective}",
}
messages = [assistant_message, user_message]
loop_count = 0
while True:
if config.debug:
print("[loop] messages before next action:\n\n\n", messages[1:])
try:
response = asyncio.run(get_next_action(model, messages, objective))
action = parse_response(response)
action_type = action.get("type")
action_detail = action.get("data")
except ModelNotRecognizedException as e:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}"
)
break
except Exception as e:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}"
)
break
if action_type == "DONE":
print(
|
# Load configuration
config = Config()
def main(model, terminal_prompt, voice_mode=False):
"""
Main function for the Self-Operating Computer.
Parameters:
- model: The model used for generating responses.
- terminal_prompt: A string representing the prompt provided in the terminal.
- voice_mode: A boolean indicating whether to enable voice mode.
Returns:
None
"""
mic = None
# Initialize `WhisperMic`, if `voice_mode` is True
validation(model, voice_mode)
if voice_mode:
try:
# Initialize WhisperMic if import is successful
mic = WhisperMic()
except ImportError:
print(
"Voice mode requires the 'whisper_mic' module. Please install it using 'pip install -r requirements-audio.txt'"
)
sys.exit(1)
# Skip message dialog if prompt was given directly
if not terminal_prompt:
message_dialog(
title="Self-Operating Computer",
text="Ask a computer to do anything.",
style=style,
).run()
else:
print("Running direct prompt...")
print("SYSTEM", platform.system())
# Clear the console
if platform.system() == "Windows":
os.system("cls")
else:
print("\033c", end="")
if terminal_prompt: # Skip objective prompt if it was given as an argument
objective = terminal_prompt
elif voice_mode:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RESET} Listening for your command... (speak now)"
)
try:
objective = mic.listen()
except Exception as e:
print(f"{ANSI_RED}Error in capturing voice input: {e}{ANSI_RESET}")
return # Exit if voice input fails
else:
print(f"{ANSI_GREEN}[Self-Operating Computer]\n{ANSI_RESET}{USER_QUESTION}")
print(f"{ANSI_YELLOW}[User]{ANSI_RESET}")
objective = prompt(style=style)
assistant_message = {"role": "assistant", "content": USER_QUESTION}
user_message = {
"role": "user",
"content": f"Objective: {objective}",
}
messages = [assistant_message, user_message]
loop_count = 0
while True:
if config.debug:
print("[loop] messages before next action:\n\n\n", messages[1:])
try:
response = asyncio.run(get_next_action(model, messages, objective))
action = parse_response(response)
action_type = action.get("type")
action_detail = action.get("data")
except ModelNotRecognizedException as e:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}"
)
break
except Exception as e:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}"
)
break
if action_type == "DONE":
print( | f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BLUE} Objective complete {ANSI_RESET}" | 3 | 2023-11-04 03:13:45+00:00 | 4k |
netease-youdao/EmotiVoice | demo_page_databaker.py | [
{
"identifier": "g2p_cn_en",
"path": "frontend.py",
"snippet": "def g2p_cn_en(text, g2p, lexicon):\ndef contains_chinese(text):"
},
{
"identifier": "JETSGenerator",
"path": "models/prompt_tts_modified/jets.py",
"snippet": "class JETSGenerator(nn.Module):\n def __init__(self, config) -> None:\n\n super().__init__()\n \n self.upsample_factor=int(np.prod(config.model.upsample_rates))\n\n self.segment_size = config.segment_size\n\n self.am = PromptTTS(config)\n\n self.generator = HiFiGANGenerator(config.model)\n\n # try:\n # model_CKPT = torch.load(config.pretrained_am, map_location=\"cpu\")\n # self.am.load_state_dict(model_CKPT['model'])\n # state_dict_g = torch.load(config.pretrained_vocoder,map_location=\"cpu\")\n # self.generator.load_state_dict(state_dict_g['generator'])\n # print(\"pretrained generator is loaded\")\n # except:\n # print(\"pretrained generator is not loaded for training\")\n self.config=config\n\n\n def forward(self, inputs_ling, input_lengths, inputs_speaker, inputs_style_embedding , inputs_content_embedding, mel_targets=None, output_lengths=None, pitch_targets=None, energy_targets=None, alpha=1.0, cut_flag=True):\n \n outputs = self.am(inputs_ling, input_lengths, inputs_speaker, inputs_style_embedding , inputs_content_embedding, mel_targets , output_lengths , pitch_targets , energy_targets , alpha)\n\n\n if mel_targets is not None and cut_flag:\n z_segments, z_start_idxs, segment_size = get_random_segments(\n outputs[\"dec_outputs\"].transpose(1,2),\n output_lengths,\n self.segment_size,\n )\n else:\n z_segments = outputs[\"dec_outputs\"].transpose(1,2)\n z_start_idxs=None\n segment_size=self.segment_size\n\n wav = self.generator(z_segments)\n\n outputs[\"wav_predictions\"] = wav\n outputs[\"z_start_idxs\"]= z_start_idxs\n outputs[\"segment_size\"] = segment_size\n return outputs"
},
{
"identifier": "StyleEncoder",
"path": "models/prompt_tts_modified/simbert.py",
"snippet": "class StyleEncoder(nn.Module):\n def __init__(self, config) -> None:\n super().__init__()\n\n self.bert = AutoModel.from_pretrained(config.bert_path)\n\n self.pitch_clf = ClassificationHead(config.bert_hidden_size, config.pitch_n_labels)\n self.speed_clf = ClassificationHead(config.bert_hidden_size, config.speed_n_labels)\n self.energy_clf = ClassificationHead(config.bert_hidden_size, config.energy_n_labels)\n self.emotion_clf = ClassificationHead(config.bert_hidden_size, config.emotion_n_labels)\n self.style_embed_proj = nn.Linear(config.bert_hidden_size, config.style_dim)\n\n \n \n\n def forward(self, input_ids, token_type_ids, attention_mask):\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n ) # return a dict having ['last_hidden_state', 'pooler_output']\n\n pooled_output = outputs[\"pooler_output\"]\n\n pitch_outputs = self.pitch_clf(pooled_output)\n speed_outputs = self.speed_clf(pooled_output)\n energy_outputs = self.energy_clf(pooled_output)\n emotion_outputs = self.emotion_clf(pooled_output)\n pred_style_embed = self.style_embed_proj(pooled_output)\n\n res = {\n \"pooled_output\":pooled_output,\n \"pitch_outputs\":pitch_outputs,\n \"speed_outputs\":speed_outputs,\n \"energy_outputs\":energy_outputs,\n \"emotion_outputs\":emotion_outputs,\n # \"pred_style_embed\":pred_style_embed,\n }\n\n return res"
}
] | import streamlit as st
import os, glob
import numpy as np
import torch
import re
import base64
from yacs import config as CONFIG
from frontend import g2p_cn_en, ROOT_DIR, read_lexicon, G2p
from exp.DataBaker.config.config import Config
from models.prompt_tts_modified.jets import JETSGenerator
from models.prompt_tts_modified.simbert import StyleEncoder
from transformers import AutoTokenizer
from pathlib import Path | 1,692 | # Copyright 2023, YOUDAO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_WAV_VALUE = 32768.0
config = Config()
def create_download_link():
pdf_path = Path("EmotiVoice_UserAgreement_易魔声用户协议.pdf")
base64_pdf = base64.b64encode(pdf_path.read_bytes()).decode("utf-8") # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{base64_pdf}" download="EmotiVoice_UserAgreement_易魔声用户协议.pdf.pdf">EmotiVoice_UserAgreement_易魔声用户协议.pdf</a>'
html=create_download_link()
st.set_page_config(
page_title="demo page",
page_icon="📕",
)
st.write("# Text-To-Speech")
st.markdown(f"""
### How to use:
- Simply select a **Speaker ID**, type in the **text** you want to convert and the emotion **Prompt**, like a single word or even a sentence. Then click on the **Synthesize** button below to start voice synthesis.
- You can download the audio by clicking on the vertical three points next to the displayed audio widget.
- For more information on **'Speaker ID'**, please consult the [EmotiVoice voice wiki page](https://github.com/netease-youdao/EmotiVoice/tree/main/data/youdao/text)
- This interactive demo page is provided under the {html} file. The audio is synthesized by AI. 音频由AI合成,仅供参考。
""", unsafe_allow_html=True)
def scan_checkpoint(cp_dir, prefix, c=8):
pattern = os.path.join(cp_dir, prefix + '?'*c)
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
@st.cache_resource
def get_models():
am_checkpoint_path = scan_checkpoint(f'{config.output_directory}/ckpt', 'g_')
style_encoder_checkpoint_path = config.style_encoder_ckpt
with open(config.model_config_path, 'r') as fin:
conf = CONFIG.load_cfg(fin)
conf.n_vocab = config.n_symbols
conf.n_speaker = config.speaker_n_labels
style_encoder = StyleEncoder(config)
model_CKPT = torch.load(style_encoder_checkpoint_path, map_location="cpu")
model_ckpt = {}
for key, value in model_CKPT['model'].items():
new_key = key[7:]
model_ckpt[new_key] = value
style_encoder.load_state_dict(model_ckpt, strict=False)
| # Copyright 2023, YOUDAO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_WAV_VALUE = 32768.0
config = Config()
def create_download_link():
pdf_path = Path("EmotiVoice_UserAgreement_易魔声用户协议.pdf")
base64_pdf = base64.b64encode(pdf_path.read_bytes()).decode("utf-8") # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{base64_pdf}" download="EmotiVoice_UserAgreement_易魔声用户协议.pdf.pdf">EmotiVoice_UserAgreement_易魔声用户协议.pdf</a>'
html=create_download_link()
st.set_page_config(
page_title="demo page",
page_icon="📕",
)
st.write("# Text-To-Speech")
st.markdown(f"""
### How to use:
- Simply select a **Speaker ID**, type in the **text** you want to convert and the emotion **Prompt**, like a single word or even a sentence. Then click on the **Synthesize** button below to start voice synthesis.
- You can download the audio by clicking on the vertical three points next to the displayed audio widget.
- For more information on **'Speaker ID'**, please consult the [EmotiVoice voice wiki page](https://github.com/netease-youdao/EmotiVoice/tree/main/data/youdao/text)
- This interactive demo page is provided under the {html} file. The audio is synthesized by AI. 音频由AI合成,仅供参考。
""", unsafe_allow_html=True)
def scan_checkpoint(cp_dir, prefix, c=8):
pattern = os.path.join(cp_dir, prefix + '?'*c)
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
@st.cache_resource
def get_models():
am_checkpoint_path = scan_checkpoint(f'{config.output_directory}/ckpt', 'g_')
style_encoder_checkpoint_path = config.style_encoder_ckpt
with open(config.model_config_path, 'r') as fin:
conf = CONFIG.load_cfg(fin)
conf.n_vocab = config.n_symbols
conf.n_speaker = config.speaker_n_labels
style_encoder = StyleEncoder(config)
model_CKPT = torch.load(style_encoder_checkpoint_path, map_location="cpu")
model_ckpt = {}
for key, value in model_CKPT['model'].items():
new_key = key[7:]
model_ckpt[new_key] = value
style_encoder.load_state_dict(model_ckpt, strict=False) | generator = JETSGenerator(conf).to(DEVICE) | 1 | 2023-11-08 10:15:27+00:00 | 4k |
daveshap/OpenAI_Agent_Swarm | agents/tool_maker/unit_manager.py | [
{
"identifier": "AssistantManager",
"path": "agents/tool_maker/assistant_manager.py",
"snippet": "class AssistantManager:\n\n def __init__(self, client):\n self.client = client\n self.assistant = None\n self.agent_builder = AgentBuilder(client=self.client)\n Path(__file__).absolute().parent\n tools_path = os.path.join(\n Path(__file__).absolute().parent, \"tool_creator_metadata.json\"\n )\n with open(tools_path, \"r\") as file:\n self.assistant_package = json.load(file)\n\n def get_assistant(self):\n \"\"\"Retrieve or create an assistant for testing this functionality\"\"\"\n name = self.assistant_package[\"creator\"][\"name\"]\n self.agent_builder.create_assistant(name)\n if not name in [\n assistant.name for assistant in self.client.beta.assistants.list()\n ]:\n raise ValueError(f'{name} needs to be created using create.py in /agents/agent_builder/')\n else:\n assistant_dict = {\n assistant.name: assistant.id\n for assistant in self.client.beta.assistants.list()\n }\n assistant = self.client.beta.assistants.retrieve(\n assistant_id=assistant_dict[name]\n )\n self.assistant = assistant\n return assistant\n\n def get_coding_assistant(self):\n \"\"\"Retrieve or create an assistant for testing this functionality\"\"\"\n name = self.assistant_package[\"writer\"][\"name\"]\n self.agent_builder.create_assistant(name)\n if not name in [\n assistant.name for assistant in self.client.beta.assistants.list()\n ]:\n raise ValueError(f'{name} needs to be created using create.py in /agents/agent_builder/')\n else:\n assistant_dict = {\n assistant.name: assistant.id\n for assistant in self.client.beta.assistants.list()\n }\n assistant = self.client.beta.assistants.retrieve(\n assistant_id=assistant_dict[name]\n )\n self.assistant = assistant\n return assistant"
},
{
"identifier": "ChatManager",
"path": "agents/tool_maker/chat_manager.py",
"snippet": "class ChatManager:\n def __init__(self, client: OpenAI):\n self.client = client\n functions_path = os.path.join(\n Path(__file__).absolute().parent, \"python_functions\"\n )\n self.functions_path = functions_path\n print(self.functions_path)\n\n def create_thread_from_user_input(self):\n return self.client.beta.threads.create(\n messages=[{\"role\": \"user\", \"content\": input(\"Begin\\n\")}]\n )\n\n def create_empty_thread(self):\n return self.client.beta.threads.create()\n\n def run_python_from_function_name(self, call):\n print(\"CALLING FUNCTION\")\n base = \".\".join(__name__.split(\".\")[:-1])\n try:\n function_name = call.function.name\n\n fn = getattr(\n importlib.reload(\n importlib.import_module(f\"{base}.python_functions.{function_name}\")\n ),\n function_name,\n )\n print(fn)\n result = fn(**json.loads(call.function.arguments))\n response = {\"tool_call_id\": call.id, \"output\": f\"result:{result}\"}\n except Exception as error:\n response = {\n \"tool_call_id\": call.id,\n \"output\": f\"{{{type(error)}:{error.args}}}\",\n }\n print(response)\n return response\n \n def get_existing_functions(self):\n print(\"Get Built Functions\")\n results = []\n if os.path.exists(self.functions_path):\n for filename in os.listdir(self.functions_path):\n if filename.endswith(\".json\"):\n file_path = os.path.join(self.functions_path,filename)\n with open(file_path, \"r\") as file:\n results.append(file)\n return results\n\n def handle_fucntion_request(\n self,\n call,\n interface_assistant: Assistant,\n interface_thread: Thread,\n functional_assistant: Assistant,\n functional_thread: Thread,\n ):\n try:\n # Create Function Tool\n schema = ToolManager.schema_from_response(call.function.arguments)\n tool = ToolManager.tool_from_function_schema(schema)\n filtered_interface_assistant_tools = list(filter(lambda tool: tool.type == \"function\" ,interface_assistant.tools))\n if tool[\"function\"][\"name\"] in [\n previous_tool.function.name\n for previous_tool in filtered_interface_assistant_tools\n ]:\n tools = [\n previous_tool\n for previous_tool in filtered_interface_assistant_tools\n if previous_tool.function.name != tool[\"function\"][\"name\"]\n ]\n interface_assistant = self.client.beta.assistants.update(\n assistant_id=interface_assistant.id,\n tools=[*tools, tool],\n )\n else:\n interface_assistant = self.client.beta.assistants.update(\n assistant_id=interface_assistant.id,\n tools=[*interface_assistant.tools, tool],\n )\n\n # Generate Python Function\n self.client.beta.threads.messages.create(\n thread_id=functional_thread.id, content=str(tool), role=\"user\"\n )\n functional_run = self.client.beta.threads.runs.create(\n thread_id=functional_thread.id,\n assistant_id=functional_assistant.id,\n )\n \n functional_response = self.simple_run(\n run=functional_run,\n thread=functional_thread,\n )\n function_lines = functional_response.split(\"```python\")[1].split(\"```\")[0]\n name = tool[\"function\"][\"name\"]\n if not os.path.exists(self.functions_path):\n os.mkdir(self.functions_path)\n with open(f\"{self.functions_path}/{name}.py\", \"w\") as file:\n file.writelines(function_lines)\n with open(f\"{self.functions_path}/{name}.json\", \"w\") as file:\n file.writelines(str(schema))\n\n response = {\"tool_call_id\": call.id, \"output\": \"{success}\"}\n\n except Exception as error:\n # If error, pass details back to assistant for next steps\n response = {\n \"tool_call_id\": call.id,\n \"output\": f\"{{{type(error)}:{error.args}}}\",\n }\n\n return interface_assistant, response\n\n def simple_run(self, run, thread):\n \"\"\"Supply context to assistant and await for next user response\"\"\"\n while run.status != \"completed\":\n run = self.client.beta.threads.runs.retrieve(\n run_id=run.id, thread_id=thread.id\n )\n if run.status == \"requires_action\":\n responses = []\n for call in run.required_action.submit_tool_outputs.tool_calls:\n print(f\"calling: {call.function.name}\")\n if call.function.name == \"get_existing_functions\":\n available_functions = self.get_existing_functions()\n response = {\"tool_call_id\": call.id, \"output\": f\"result:{available_functions}\"}\n responses.append(response)\n else:\n response = {\"tool_call_id\": call.id, \"output\": f\"result:None\"}\n responses.append(response)\n try:\n run = self.client.beta.threads.runs.submit_tool_outputs(\n run_id=run.id,\n thread_id=thread.id,\n tool_outputs=responses,\n )\n except:\n print(run.status)\n print(run)\n print(call)\n print(responses)\n\n response = (\n self.client.beta.threads.messages.list(thread_id=thread.id)\n .data[0]\n .content[0]\n .text.value\n )\n return response\n\n def begin_run(\n self,\n run,\n interface_assistant,\n interface_thread,\n functional_assistant,\n functional_thread,\n ):\n while run.status != \"completed\":\n run = self.client.beta.threads.runs.retrieve(\n run_id=run.id, thread_id=interface_thread.id\n )\n if run.status == \"requires_action\":\n tools = []\n responses = []\n for call in run.required_action.submit_tool_outputs.tool_calls:\n print(f\"calling: {call.function.name}\")\n if call.function.name == \"function_request\":\n interface_assistant, response = self.handle_fucntion_request(\n call=call,\n interface_assistant=interface_assistant,\n interface_thread=interface_thread,\n functional_assistant=functional_assistant,\n functional_thread=functional_thread,\n )\n else:\n response = self.run_python_from_function_name(call)\n responses.append(response)\n try:\n run = self.client.beta.threads.runs.submit_tool_outputs(\n run_id=run.id,\n thread_id=interface_thread.id,\n tool_outputs=responses,\n )\n except:\n print(run.status)\n print(run)\n print(call)\n print(responses)\n if run.status == \"failed\" or run.status == \"expired\":\n print(\"DIED\")\n run.status = \"completed\"\n response = (\n self.client.beta.threads.messages.list(thread_id=interface_thread.id)\n .data[0]\n .content[0]\n .text.value\n )\n return interface_assistant, response\n\n def run_unit(\n self,\n interface_assistant: Assistant,\n interface_thread: Thread,\n functional_assistant: Assistant,\n functional_thread: Thread,\n ):\n self.client.beta.threads.messages.create(\n thread_id=interface_thread.id, content=input(\"type: \"), role=\"user\"\n )\n print()\n interface_run = self.client.beta.threads.runs.create(\n thread_id=interface_thread.id,\n assistant_id=interface_assistant.id,\n instructions=\"please remember you are talking to an API, minimize output text tokens for cost saving. You are also able to communicate with the function ai using the description property of function_request.\",\n )\n interface_assistant, response = self.begin_run(\n run=interface_run,\n interface_assistant=interface_assistant,\n interface_thread=interface_thread,\n functional_assistant=functional_assistant,\n functional_thread=functional_thread,\n )\n interface_thread = self.client.beta.threads.retrieve(\n thread_id=interface_thread.id\n )\n functional_thread = self.client.beta.threads.retrieve(\n thread_id=functional_thread.id\n )\n print(response)\n print()\n return interface_assistant, interface_thread, functional_thread"
}
] | from agents.tool_maker.assistant_manager import AssistantManager
from agents.tool_maker.chat_manager import ChatManager
from shared.openai_config import get_openai_client | 2,723 |
class Unit:
"""
A class which creates and exposes chat functionality for a Unit Agent.
A Unit is a first prototype for a Minmium Viable Agent (MVA).
A `Unit` is two `Assistant`s in a symbiotic relationship.
One `Assistant` is the Interface with a thread sharing input with the contents passed via the `chat` method,
the other `Assistant` is a functional one which shares a thread with `submit_tool` requests during runs and is responsible for writing python functions.
:param AssistantManager assistant_manager: Creates and retrieves different `Assistant` types
:param ChatManager chat_manager: provides functionality for managing `Threads`
:param Assistant interface_assistant: talks with `chat` method
:param Assistant functional_assistant: writes python functions when `OpenAI.beta.threads.runs.submit_tools` is called in `chat`
:param Thread interface_thread: `Thread` between `interface_assistant` and `chat`
:param Thread functional_thread: `Thread` between `functional_assistant` and `OpenAI.beta.threads.runs.submit_tools`
:returns: this is retured
"""
def __init__(self, client):
"""
Instantiates a Unit object
:param Client client: OpenAI instance
"""
self.assistant_manager = AssistantManager(client=client)
|
class Unit:
"""
A class which creates and exposes chat functionality for a Unit Agent.
A Unit is a first prototype for a Minmium Viable Agent (MVA).
A `Unit` is two `Assistant`s in a symbiotic relationship.
One `Assistant` is the Interface with a thread sharing input with the contents passed via the `chat` method,
the other `Assistant` is a functional one which shares a thread with `submit_tool` requests during runs and is responsible for writing python functions.
:param AssistantManager assistant_manager: Creates and retrieves different `Assistant` types
:param ChatManager chat_manager: provides functionality for managing `Threads`
:param Assistant interface_assistant: talks with `chat` method
:param Assistant functional_assistant: writes python functions when `OpenAI.beta.threads.runs.submit_tools` is called in `chat`
:param Thread interface_thread: `Thread` between `interface_assistant` and `chat`
:param Thread functional_thread: `Thread` between `functional_assistant` and `OpenAI.beta.threads.runs.submit_tools`
:returns: this is retured
"""
def __init__(self, client):
"""
Instantiates a Unit object
:param Client client: OpenAI instance
"""
self.assistant_manager = AssistantManager(client=client) | self.chat_manager = ChatManager(client=client) | 1 | 2023-11-07 23:12:05+00:00 | 4k |
S-LoRA/S-LoRA | slora/models/llama/layer_infer/post_layer_infer.py | [
{
"identifier": "LlamaPreAndPostLayerWeight",
"path": "slora/models/llama/layer_weights/pre_and_post_layer_weight.py",
"snippet": "class LlamaPreAndPostLayerWeight(PreAndPostLayerWeight):\n def __init__(self, tp_rank, world_size, data_type, network_config, mode):\n super().__init__(tp_rank, world_size, data_type, network_config, mode)\n return\n\n\n def load_dummy_weights(self):\n vob_size = self.network_config_[\"vocab_size\"]\n split_vob_size = vob_size // self.world_size_\n n_embed = self.network_config_[\"hidden_size\"]\n self.wte_weight_ = (torch.rand((split_vob_size, n_embed), \n dtype=self.data_type_, device=\"cuda\").contiguous() * 2 - 1) * 1e-3\n self.lm_head_weight_ = (torch.rand((split_vob_size, n_embed), \n dtype=self.data_type_, device=\"cuda\").contiguous() * 2 - 1) * 1e-3\n self.final_norm_weight_ = (torch.rand((n_embed), \n dtype=self.data_type_, device=\"cuda\") * 2 - 1) * 1e-3\n \n\n def load_hf_weights(self, weights, dummy=False):\n if dummy:\n self.load_dummy_weights()\n return\n\n vob_size = self.network_config_[\"vocab_size\"]\n split_vob_size = vob_size // self.world_size_\n n_embed = self.network_config_[\"hidden_size\"]\n if \"model.embed_tokens.weight\" in weights:\n # print(weights['model.embed_tokens.weight'].shape)\n self.wte_weight_ = self._cuda(weights['model.embed_tokens.weight'][split_vob_size *\n self.tp_rank_: split_vob_size * (self.tp_rank_ + 1), :])\n if 'lm_head.weight' in weights:\n # print(weights['lm_head.weight'].shape)\n self.lm_head_weight_ = self._cuda(weights['lm_head.weight'][split_vob_size * self.tp_rank_: split_vob_size *\n (self.tp_rank_ + 1), :])\n if 'model.norm.weight' in weights:\n self.final_norm_weight_ = self._cuda(weights['model.norm.weight'])\n\n return\n \n def verify_load(self):\n errors = \"weights load not ok\"\n weights = [self.wte_weight_, \n self.lm_head_weight_, \n self.final_norm_weight_]\n for i in range(len(weights)):\n assert weights[i] is not None, \"index:\" + str(i) + \" \" + errors\n return "
},
{
"identifier": "LlamaInferStateInfo",
"path": "slora/models/llama/infer_struct.py",
"snippet": "class LlamaInferStateInfo(InferStateInfo):\n def __init__(self):\n super().__init__()\n self.position_cos = None\n self.position_sin = None\n self.other_kv_index = None\n \n def init_some_extra_state(self, \n model, \n batch_size, \n total_token_num,\n max_len_in_batch,\n input_ids : torch.Tensor,\n b_loc : torch.Tensor,\n b_start_loc : torch.Tensor,\n b_seq_len : torch.Tensor,\n is_prefill):\n if is_prefill:\n b_seq_len_numpy = b_seq_len.cpu().numpy()\n position_ids = torch.from_numpy(np.concatenate([np.arange(0, b_seq_len_numpy[i])\n for i in range(len(b_seq_len_numpy))], axis=0)).cuda()\n self.position_cos = torch.index_select(model._cos_cached, 0, position_ids).view(position_ids.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, position_ids).view(position_ids.shape[0], -1)\n position_ids = None\n else:\n self.position_cos = torch.index_select(model._cos_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.other_kv_index = b_loc[0, max_len_in_batch - 1].item()\n return"
},
{
"identifier": "rmsnorm_forward",
"path": "slora/models/llama/triton_kernel/rmsnorm.py",
"snippet": "def rmsnorm_forward(x, weight, eps):\n # allocate output\n y = torch.empty_like(x)\n # reshape input data into 2D tensor\n x_arg = x.view(-1, x.shape[-1])\n M, N = x_arg.shape\n # Less than 64KB per feature: enqueue fused kernel\n MAX_FUSED_SIZE = 65536 // x.element_size()\n BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))\n # print(\"BLOCK_SIZE:\", BLOCK_SIZE)\n if N > BLOCK_SIZE:\n raise RuntimeError(\"This layer norm doesn't support feature dim >= 64KB.\")\n # heuristics for number of warps\n num_warps = min(max(BLOCK_SIZE // 256, 1), 8)\n # print(BLOCK_SIZE, num_warps, \"block_size, numwarps\")\n BLOCK_SIZE = 128 * 2 * 2 * 2 * 2 * 2 * 2 * 2\n num_warps = 8\n # enqueue kernel\n _rms_norm_fwd_fused[(M,)](x_arg, y, weight,\n x_arg.stride(0), N, eps,\n BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)\n return y"
},
{
"identifier": "PostLayerInferTpl",
"path": "slora/common/basemodel/layer_infer/template/post_layer_infer_template.py",
"snippet": "class PostLayerInferTpl(PostLayerInfer):\n \"\"\"\n \"\"\"\n def __init__(self, tp_rank, world_size, network_config, mode):\n super().__init__(tp_rank, world_size, network_config, mode)\n self.eps_ = 1e-5\n self.vocab_size_ = network_config[\"vocab_size\"]\n self.embed_dim_ = network_config[\"n_embed\"]\n return\n \n def _norm(self, input, infer_state, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")"
}
] | import torch
import torch.functional as F
import torch.distributed as dist
import numpy as np
from slora.models.llama.layer_weights.pre_and_post_layer_weight import LlamaPreAndPostLayerWeight
from einops import rearrange
from slora.models.llama.infer_struct import LlamaInferStateInfo
from slora.models.llama.triton_kernel.rmsnorm import rmsnorm_forward
from slora.common.basemodel import PostLayerInferTpl | 1,649 |
class LlamaPostLayerInfer(PostLayerInferTpl):
"""
"""
def __init__(self, tp_rank, world_size, network_config, mode):
super().__init__(tp_rank, world_size, network_config, mode)
self.eps_ = network_config["rms_norm_eps"]
self.vocab_size_ = network_config["vocab_size"]
self.embed_dim_ = network_config["n_embed"]
return
|
class LlamaPostLayerInfer(PostLayerInferTpl):
"""
"""
def __init__(self, tp_rank, world_size, network_config, mode):
super().__init__(tp_rank, world_size, network_config, mode)
self.eps_ = network_config["rms_norm_eps"]
self.vocab_size_ = network_config["vocab_size"]
self.embed_dim_ = network_config["n_embed"]
return
| def _norm(self, input, infer_state, layer_weight:LlamaPreAndPostLayerWeight) -> torch.Tensor: | 0 | 2023-11-05 04:08:36+00:00 | 4k |
Yuliang-Liu/Monkey | data_generation/grit/grit/modeling/backbone/vit.py | [
{
"identifier": "PatchEmbed",
"path": "data_generation/grit/grit/modeling/backbone/utils.py",
"snippet": "class PatchEmbed(nn.Module):\n \"\"\"\n Image to Patch Embedding.\n \"\"\"\n\n def __init__(\n self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768\n ):\n \"\"\"\n Args:\n kernel_size (Tuple): kernel size of the projection layer.\n stride (Tuple): stride of the projection layer.\n padding (Tuple): padding size of the projection layer.\n in_chans (int): Number of input image channels.\n embed_dim (int): embed_dim (int): Patch embedding dimension.\n \"\"\"\n super().__init__()\n\n self.proj = nn.Conv2d(\n in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding\n )\n\n def forward(self, x):\n x = self.proj(x)\n # B C H W -> B H W C\n x = x.permute(0, 2, 3, 1)\n return x"
},
{
"identifier": "add_decomposed_rel_pos",
"path": "data_generation/grit/grit/modeling/backbone/utils.py",
"snippet": "def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):\n \"\"\"\n Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950\n Args:\n attn (Tensor): attention map.\n q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).\n rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.\n rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.\n q_size (Tuple): spatial sequence size of query q with (q_h, q_w).\n k_size (Tuple): spatial sequence size of key k with (k_h, k_w).\n\n Returns:\n attn (Tensor): attention map with added relative positional embeddings.\n \"\"\"\n q_h, q_w = q_size\n k_h, k_w = k_size\n Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n\n B, _, dim = q.shape\n r_q = q.reshape(B, q_h, q_w, dim)\n rel_h = torch.einsum(\"bhwc,hkc->bhwk\", r_q, Rh)\n rel_w = torch.einsum(\"bhwc,wkc->bhwk\", r_q, Rw)\n\n attn = (\n attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n ).view(B, q_h * q_w, k_h * k_w)\n\n return attn"
},
{
"identifier": "get_abs_pos",
"path": "data_generation/grit/grit/modeling/backbone/utils.py",
"snippet": "def get_abs_pos(abs_pos, has_cls_token, hw):\n \"\"\"\n Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token\n dimension for the original embeddings.\n Args:\n abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).\n has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.\n hw (Tuple): size of input image tokens.\n\n Returns:\n Absolute positional embeddings after processing with shape (1, H, W, C)\n \"\"\"\n h, w = hw\n if has_cls_token:\n abs_pos = abs_pos[:, 1:]\n xy_num = abs_pos.shape[1]\n size = int(math.sqrt(xy_num))\n assert size * size == xy_num\n\n if size != h or size != w:\n new_abs_pos = F.interpolate(\n abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),\n size=(h, w),\n mode=\"bicubic\",\n align_corners=False,\n )\n\n return new_abs_pos.permute(0, 2, 3, 1)\n else:\n return abs_pos.reshape(1, h, w, -1)"
},
{
"identifier": "window_partition",
"path": "data_generation/grit/grit/modeling/backbone/utils.py",
"snippet": "def window_partition(x, window_size):\n \"\"\"\n Partition into non-overlapping windows with padding if needed.\n Args:\n x (tensor): input tokens with [B, H, W, C].\n window_size (int): window size.\n\n Returns:\n windows: windows after partition with [B * num_windows, window_size, window_size, C].\n (Hp, Wp): padded height and width before partition\n \"\"\"\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)"
},
{
"identifier": "window_unpartition",
"path": "data_generation/grit/grit/modeling/backbone/utils.py",
"snippet": "def window_unpartition(windows, window_size, pad_hw, hw):\n \"\"\"\n Window unpartition into original sequences and removing padding.\n Args:\n x (tensor): input tokens with [B * num_windows, window_size, window_size, C].\n window_size (int): window size.\n pad_hw (Tuple): padded height and width (Hp, Wp).\n hw (Tuple): original height and width (H, W) before padding.\n\n Returns:\n x: unpartitioned sequences with [B, H, W, C].\n \"\"\"\n Hp, Wp = pad_hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n if Hp > H or Wp > W:\n x = x[:, :H, :W, :].contiguous()\n return x"
}
] | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
import sys
import torch.utils.checkpoint as checkpoint
from functools import partial
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers import ShapeSpec
from centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5
from timm.models.layers import DropPath, Mlp, trunc_normal_
from detectron2.modeling.backbone.backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
) | 3,506 | bottleneck_channels,
norm="LN",
act_layer=nn.GELU,
):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
act_layer (callable): activation for all conv layers.
"""
super().__init__(in_channels, out_channels, 1)
self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = get_norm(norm, bottleneck_channels)
self.act1 = act_layer()
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
3,
padding=1,
bias=False,
)
self.norm2 = get_norm(norm, bottleneck_channels)
self.act2 = act_layer()
self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = get_norm(norm, out_channels)
for layer in [self.conv1, self.conv2, self.conv3]:
weight_init.c2_msra_fill(layer)
for layer in [self.norm1, self.norm2]:
layer.weight.data.fill_(1.0)
layer.bias.data.zero_()
# zero init last norm layer.
self.norm3.weight.data.zero_()
self.norm3.bias.data.zero_()
def forward(self, x):
out = x
for layer in self.children():
out = layer(out)
out = x + out
return out
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=True,
drop_path=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
use_residual_block=False,
input_size=None,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then not
use window attention.
use_residual_block (bool): If True, use a residual block after the MLP block.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
self.window_size = window_size
self.use_residual_block = use_residual_block
if use_residual_block:
# Use a residual block with bottleneck channel as dim // 2
self.residual = ResBottleneckBlock(
in_channels=dim,
out_channels=dim,
bottleneck_channels=dim // 2,
norm="LN",
act_layer=act_layer,
)
def forward(self, x):
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
| # Modified by Jialian Wu from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py
sys.path.insert(0, 'models/grit_src/third_party/CenterNet2/projects/CenterNet2/')
logger = logging.getLogger(__name__)
__all__ = ["ViT"]
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
use_rel_pos=False,
rel_pos_zero_init=True,
input_size=None,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool: If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
if not rel_pos_zero_init:
trunc_normal_(self.rel_pos_h, std=0.02)
trunc_normal_(self.rel_pos_w, std=0.02)
def forward(self, x):
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
class ResBottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block without the last activation layer.
It contains 3 conv layers with kernels 1x1, 3x3, 1x1.
"""
def __init__(
self,
in_channels,
out_channels,
bottleneck_channels,
norm="LN",
act_layer=nn.GELU,
):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
act_layer (callable): activation for all conv layers.
"""
super().__init__(in_channels, out_channels, 1)
self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = get_norm(norm, bottleneck_channels)
self.act1 = act_layer()
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
3,
padding=1,
bias=False,
)
self.norm2 = get_norm(norm, bottleneck_channels)
self.act2 = act_layer()
self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = get_norm(norm, out_channels)
for layer in [self.conv1, self.conv2, self.conv3]:
weight_init.c2_msra_fill(layer)
for layer in [self.norm1, self.norm2]:
layer.weight.data.fill_(1.0)
layer.bias.data.zero_()
# zero init last norm layer.
self.norm3.weight.data.zero_()
self.norm3.bias.data.zero_()
def forward(self, x):
out = x
for layer in self.children():
out = layer(out)
out = x + out
return out
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=True,
drop_path=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
use_residual_block=False,
input_size=None,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then not
use window attention.
use_residual_block (bool): If True, use a residual block after the MLP block.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
self.window_size = window_size
self.use_residual_block = use_residual_block
if use_residual_block:
# Use a residual block with bottleneck channel as dim // 2
self.residual = ResBottleneckBlock(
in_channels=dim,
out_channels=dim,
bottleneck_channels=dim // 2,
norm="LN",
act_layer=act_layer,
)
def forward(self, x):
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2] | x, pad_hw = window_partition(x, self.window_size) | 3 | 2023-11-09 14:31:48+00:00 | 4k |
disler/multi-agent-postgres-data-analytics | postgres_da_ai_agent/agents/agents.py | [
{
"identifier": "PostgresAgentInstruments",
"path": "postgres_da_ai_agent/agents/instruments.py",
"snippet": "class PostgresAgentInstruments(AgentInstruments):\n \"\"\"\n Unified Toolset for the Postgres Data Analytics Multi-Agent System\n\n Advantages:\n - All agents have access to the same state and functions\n - Gives agent functions awareness of changing context\n - Clear and concise capabilities for agents\n - Clean database connection management\n\n Guidelines:\n - Agent Functions should not call other agent functions directly\n - Instead Agent Functions should call external lower level modules\n - Prefer 1 to 1 mapping of agents and their functions\n - The state lifecycle lives between all agent orchestrations\n \"\"\"\n\n def __init__(self, db_url: str, session_id: str) -> None:\n super().__init__()\n\n self.db_url = db_url\n self.db = None\n self.session_id = session_id\n self.messages = []\n self.innovation_index = 0\n\n def __enter__(self):\n \"\"\"\n Support entering the 'with' statement\n \"\"\"\n self.reset_files()\n self.db = PostgresManager()\n self.db.connect_with_url(self.db_url)\n return self, self.db\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n Support exiting the 'with' statement\n \"\"\"\n self.db.close()\n\n def sync_messages(self, messages: list):\n \"\"\"\n Syncs messages with the orchestrator\n \"\"\"\n self.messages = messages\n\n def reset_files(self):\n \"\"\"\n Clear everything in the root_dir\n \"\"\"\n\n # if it does not exist create it\n if not os.path.exists(self.root_dir):\n os.makedirs(self.root_dir)\n\n for fname in os.listdir(self.root_dir):\n os.remove(os.path.join(self.root_dir, fname))\n\n def get_file_path(self, fname: str):\n \"\"\"\n Get the full path to a file in the root_dir\n \"\"\"\n return os.path.join(self.root_dir, fname)\n\n # -------------------------- Agent Properties -------------------------- #\n\n @property\n def run_sql_results_file(self):\n return self.get_file_path(\"run_sql_results.json\")\n\n @property\n def sql_query_file(self):\n return self.get_file_path(\"sql_query.sql\")\n\n # -------------------------- Agent Functions -------------------------- #\n\n def run_sql(self, sql: str) -> str:\n \"\"\"\n Run a SQL query against the postgres database\n \"\"\"\n results_as_json = self.db.run_sql(sql)\n\n fname = self.run_sql_results_file\n\n # dump these results to a file\n with open(fname, \"w\") as f:\n f.write(results_as_json)\n\n with open(self.sql_query_file, \"w\") as f:\n f.write(sql)\n\n return \"Successfully delivered results to json file\"\n\n def validate_run_sql(self):\n \"\"\"\n validate that the run_sql results file exists and has content\n \"\"\"\n fname = self.run_sql_results_file\n\n with open(fname, \"r\") as f:\n content = f.read()\n\n if not content:\n return False, f\"File {fname} is empty\"\n\n return True, \"\"\n\n def write_file(self, content: str):\n fname = self.get_file_path(f\"write_file.txt\")\n return file.write_file(fname, content)\n\n def write_json_file(self, json_str: str):\n fname = self.get_file_path(f\"write_json_file.json\")\n return file.write_json_file(fname, json_str)\n\n def write_yml_file(self, json_str: str):\n fname = self.get_file_path(f\"write_yml_file.yml\")\n return file.write_yml_file(fname, json_str)\n\n def write_innovation_file(self, content: str):\n fname = self.get_file_path(f\"{self.innovation_index}_innovation_file.json\")\n file.write_file(fname, content)\n self.innovation_index += 1\n return f\"Successfully wrote innovation file. You can check my work.\"\n\n def validate_innovation_files(self):\n \"\"\"\n loop from 0 to innovation_index and verify file exists with content\n \"\"\"\n for i in range(self.innovation_index):\n fname = self.get_file_path(f\"{i}_innovation_file.json\")\n with open(fname, \"r\") as f:\n content = f.read()\n if not content:\n return False, f\"File {fname} is empty\"\n\n return True, \"\""
},
{
"identifier": "orchestrator",
"path": "postgres_da_ai_agent/modules/orchestrator.py",
"snippet": "class Orchestrator:\n def __init__(\n self,\n name: str,\n agents: List[autogen.ConversableAgent],\n instruments: AgentInstruments,\n validate_results_func: callable = None,\n ):\n def total_agents(self):\n def last_message_is_dict(self):\n def last_message_is_string(self):\n def last_message_is_func_call(self):\n def last_message_is_content(self):\n def latest_message(self) -> Optional[str]:\n def last_message_always_string(self):\n def handle_validate_func(self) -> Tuple[bool, str]:\n def send_message(\n self,\n from_agent: autogen.ConversableAgent,\n to_agent: autogen.ConversableAgent,\n message: str,\n ):\n def add_message(self, message: str):\n def get_message_as_str(self):\n def get_cost_and_tokens(self):\n def has_functions(self, agent: autogen.ConversableAgent):\n def basic_chat(\n self,\n agent_a: autogen.ConversableAgent,\n agent_b: autogen.ConversableAgent,\n message: str,\n ):\n def memory_chat(\n self,\n agent_a: autogen.ConversableAgent,\n agent_b: autogen.ConversableAgent,\n message: str,\n ):\n def function_chat(\n self,\n agent_a: autogen.ConversableAgent,\n agent_b: autogen.ConversableAgent,\n message: str,\n ):\n def self_function_chat(self, agent: autogen.ConversableAgent, message: str):\n def spy_on_agents(self, append_to_file: bool = True):\n def sequential_conversation(self, prompt: str) -> ConversationResult:\n def broadcast_conversation(self, prompt: str) -> ConversationResult:\n def round_robin_conversation(\n self, prompt: str, loops: int = 1\n ) -> ConversationResult:"
},
{
"identifier": "agent_config",
"path": "postgres_da_ai_agent/agents/agent_config.py",
"snippet": ""
}
] | from typing import Optional, List, Dict, Any
from postgres_da_ai_agent.agents.instruments import PostgresAgentInstruments
from postgres_da_ai_agent.modules import orchestrator
from postgres_da_ai_agent.agents import agent_config
import autogen
import guidance | 3,173 | sr_data_analyst = autogen.AssistantAgent(
name="Sr_Data_Analyst",
llm_config=agent_config.run_sql_config,
system_message=SR_DATA_ANALYST_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
function_map={
"run_sql": instruments.run_sql,
},
)
return [
user_proxy,
data_engineer,
sr_data_analyst,
]
def build_data_viz_team(instruments: PostgresAgentInstruments):
# admin user proxy agent - takes in the prompt and manages the group chat
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
# text report analyst - writes a summary report of the results and saves them to a local text file
text_report_analyst = autogen.AssistantAgent(
name="Text_Report_Analyst",
llm_config=agent_config.write_file_config,
system_message=TEXT_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_file": instruments.write_file,
},
)
# json report analyst - writes a summary report of the results and saves them to a local json file
json_report_analyst = autogen.AssistantAgent(
name="Json_Report_Analyst",
llm_config=agent_config.write_json_file_config,
system_message=JSON_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_json_file": instruments.write_json_file,
},
)
yaml_report_analyst = autogen.AssistantAgent(
name="Yml_Report_Analyst",
llm_config=agent_config.write_yaml_file_config,
system_message=YML_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_yml_file": instruments.write_yml_file,
},
)
return [
user_proxy,
text_report_analyst,
json_report_analyst,
yaml_report_analyst,
]
def build_scrum_master_team(instruments: PostgresAgentInstruments):
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
scrum_agent = DefensiveScrumMasterAgent(
name="Scrum_Master",
llm_config=agent_config.base_config,
system_message=GUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT,
human_input_mode="NEVER",
)
return [user_proxy, scrum_agent]
def build_insights_team(instruments: PostgresAgentInstruments):
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
insights_agent = InsightsAgent(
name="Insights",
llm_config=agent_config.base_config,
system_message=DATA_INSIGHTS_GUIDANCE_PROMPT,
human_input_mode="NEVER",
)
insights_data_reporter = autogen.AssistantAgent(
name="Insights_Data_Reporter",
llm_config=agent_config.write_innovation_file_config,
system_message=INSIGHTS_FILE_REPORTER_PROMPT,
human_input_mode="NEVER",
function_map={
"write_innovation_file": instruments.write_innovation_file,
},
)
return [user_proxy, insights_agent, insights_data_reporter]
# ------------------------ ORCHESTRATION ------------------------
def build_team_orchestrator(
team: str,
agent_instruments: PostgresAgentInstruments,
validate_results: callable = None,
|
# ------------------------ PROMPTS ------------------------
USER_PROXY_PROMPT = "A human admin. Interact with the Product Manager to discuss the plan. Plan execution needs to be approved by this admin."
DATA_ENGINEER_PROMPT = "A Data Engineer. Generate the initial SQL based on the requirements provided. Send it to the Sr Data Analyst to be executed. "
SR_DATA_ANALYST_PROMPT = "Sr Data Analyst. You run the SQL query using the run_sql function, send the raw response to the data viz team. You use the run_sql function exclusively."
GUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT = """
Is the following block of text a SQL Natural Language Query (NLQ)? Please rank from 1 to 5, where:
1: Definitely not NLQ
2: Likely not NLQ
3: Neutral / Unsure
4: Likely NLQ
5: Definitely NLQ
Return the rank as a number exclusively using the rank variable to be casted as an integer.
Block of Text: {{potential_nlq}}
{{#select "rank" logprobs='logprobs'}} 1{{or}} 2{{or}} 3{{or}} 4{{or}} 5{{/select}}
"""
DATA_INSIGHTS_GUIDANCE_PROMPT = """
You're a data innovator. You analyze SQL databases table structure and generate 3 novel insights for your team to reflect on and query.
Format your insights in JSON format.
```json
[{{#geneach 'insight' num_iterations=3 join=','}}
{
"insight": "{{gen 'insight' temperature=0.7}}",
"actionable_business_value": "{{gen 'actionable_value' temperature=0.7}}",
"sql": "{{gen 'new_query' temperature=0.7}}"
}
{{/geneach}}]
```"""
INSIGHTS_FILE_REPORTER_PROMPT = "You're a data reporter. You write json data you receive directly into a file using the write_innovation_file function."
# unused prompts
COMPLETION_PROMPT = "If everything looks good, respond with APPROVED"
PRODUCT_MANAGER_PROMPT = (
"Product Manager. Validate the response to make sure it's correct"
+ COMPLETION_PROMPT
)
TEXT_REPORT_ANALYST_PROMPT = "Text File Report Analyst. You exclusively use the write_file function on a summarized report."
JSON_REPORT_ANALYST_PROMPT = "Json Report Analyst. You exclusively use the write_json_file function on the report."
YML_REPORT_ANALYST_PROMPT = "Yaml Report Analyst. You exclusively use the write_yml_file function on the report."
# ------------------------ BUILD AGENT TEAMS ------------------------
def build_data_eng_team(instruments: PostgresAgentInstruments):
"""
Build a team of agents that can generate, execute, and report an SQL query
"""
# create a set of agents with specific roles
# admin user proxy agent - takes in the prompt and manages the group chat
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
# data engineer agent - generates the sql query
data_engineer = autogen.AssistantAgent(
name="Engineer",
llm_config=agent_config.base_config,
system_message=DATA_ENGINEER_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
sr_data_analyst = autogen.AssistantAgent(
name="Sr_Data_Analyst",
llm_config=agent_config.run_sql_config,
system_message=SR_DATA_ANALYST_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
function_map={
"run_sql": instruments.run_sql,
},
)
return [
user_proxy,
data_engineer,
sr_data_analyst,
]
def build_data_viz_team(instruments: PostgresAgentInstruments):
# admin user proxy agent - takes in the prompt and manages the group chat
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
# text report analyst - writes a summary report of the results and saves them to a local text file
text_report_analyst = autogen.AssistantAgent(
name="Text_Report_Analyst",
llm_config=agent_config.write_file_config,
system_message=TEXT_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_file": instruments.write_file,
},
)
# json report analyst - writes a summary report of the results and saves them to a local json file
json_report_analyst = autogen.AssistantAgent(
name="Json_Report_Analyst",
llm_config=agent_config.write_json_file_config,
system_message=JSON_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_json_file": instruments.write_json_file,
},
)
yaml_report_analyst = autogen.AssistantAgent(
name="Yml_Report_Analyst",
llm_config=agent_config.write_yaml_file_config,
system_message=YML_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_yml_file": instruments.write_yml_file,
},
)
return [
user_proxy,
text_report_analyst,
json_report_analyst,
yaml_report_analyst,
]
def build_scrum_master_team(instruments: PostgresAgentInstruments):
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
scrum_agent = DefensiveScrumMasterAgent(
name="Scrum_Master",
llm_config=agent_config.base_config,
system_message=GUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT,
human_input_mode="NEVER",
)
return [user_proxy, scrum_agent]
def build_insights_team(instruments: PostgresAgentInstruments):
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
insights_agent = InsightsAgent(
name="Insights",
llm_config=agent_config.base_config,
system_message=DATA_INSIGHTS_GUIDANCE_PROMPT,
human_input_mode="NEVER",
)
insights_data_reporter = autogen.AssistantAgent(
name="Insights_Data_Reporter",
llm_config=agent_config.write_innovation_file_config,
system_message=INSIGHTS_FILE_REPORTER_PROMPT,
human_input_mode="NEVER",
function_map={
"write_innovation_file": instruments.write_innovation_file,
},
)
return [user_proxy, insights_agent, insights_data_reporter]
# ------------------------ ORCHESTRATION ------------------------
def build_team_orchestrator(
team: str,
agent_instruments: PostgresAgentInstruments,
validate_results: callable = None, | ) -> orchestrator.Orchestrator: | 1 | 2023-11-04 20:15:46+00:00 | 4k |
OpenBMB/ProAgent | ProAgent/running_recorder.py | [
{
"identifier": "CONFIG",
"path": "ProAgent/config.py",
"snippet": "CONFIG = RPAgentConfig.get_default_config()"
},
{
"identifier": "ENVIRONMENT",
"path": "ProAgent/router/utils.py",
"snippet": "class ENVIRONMENT(Enum):\n '''\n 决定了 record cache 的访问形式\n - Development:不访问缓存,从头开始\n - Refine:访问缓存,但 user messages 必须一致,若不一致(例如节点返回值变化)则停止访问缓存\n - Production:无条件访问缓存,将 record 重播一遍\n '''\n # how to handle with different query?now it's loading the query defined in code instead of loading from cache.\n Development = auto() # ok\n Refine = auto() # ok\n Production = auto() # ok"
},
{
"identifier": "Action",
"path": "ProAgent/utils.py",
"snippet": "class Action():\n content: str = \"\"\n thought: str = \"\"\n plan: List[str] = field(default_factory=lambda: [])\n criticism: str = \"\"\n tool_name: str = \"\"\n tool_input: dict = field(default_factory=lambda: {})\n\n tool_output_status: ToolCallStatus = ToolCallStatus.ToolCallSuccess\n tool_output: str = \"\"\n\n def to_json(self):\n try:\n tool_output = json.loads(self.tool_output)\n except:\n tool_output = self.tool_output\n return {\n \"thought\": self.thought,\n \"plan\": self.plan,\n \"criticism\": self.criticism,\n \"tool_name\": self.tool_name,\n \"tool_input\": self.tool_input,\n \"tool_output_status\": self.tool_output_status.name,\n \"tool_output\": tool_output,\n }"
},
{
"identifier": "logger",
"path": "ProAgent/loggers/logs.py",
"snippet": "class JsonFileHandler(logging.FileHandler):\nclass JsonFormatter(logging.Formatter):\nclass Logger(metaclass=Singleton):\nclass TypingConsoleHandler(logging.StreamHandler):\nclass ConsoleHandler(logging.StreamHandler):\nclass AutoGptFormatter(logging.Formatter):\n def __init__(self, filename, mode=\"a\", encoding=None, delay=False):\n def emit(self, record):\n def format(self, record):\n def __init__(self):\n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def error(self, title, message=\"\"):\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n def set_level(self, level):\n def double_check(self, additionalText=None):\n def log_json(self, data: Any, file_name: str) -> None:\n def get_log_directory(self):\n def emit(self, record):\n def emit(self, record) -> None:\n def format(self, record: LogRecord) -> str:\ndef remove_color_codes(s: str) -> str:\ndef print_action_base(action: Action):\ndef print_action_tool(action: Action):"
}
] | import os
import time
import json
from colorama import Fore
from termcolor import colored
from ProAgent.config import CONFIG
from ProAgent.router.utils import ENVIRONMENT
from ProAgent.utils import Action
from ProAgent.loggers.logs import logger | 2,556 | Fore.RED,
record_dir,
)
self.newly_start = False
for dir_name in os.listdir(record_dir):
if dir_name == "LLM_inout_pair":
inout_pair_list = os.listdir(os.path.join(record_dir,dir_name))
inout_pair_list.sort()
for file_name in inout_pair_list:
with open(os.path.join(record_dir,dir_name,file_name), "r", encoding="utf-8") as reader:
llm_pair = json.load(reader)
self.llm_record_cache.append(llm_pair)
elif dir_name == "meta.meta":
with open(os.path.join(record_dir, "meta.meta"), "r", encoding="utf-8") as reader:
tool_call_log = json.load(reader)
def regist_llm_inout(self, base_kwargs, messages, functions, function_call, stop, other_args, output_data, uuid=""):
"""
Registers the LLM input and output data for the specified function call.
Args:
base_kwargs (dict): The base keyword arguments for the function call.
messages (list): The list of messages associated with the function call.
functions (list): The list of functions called during the function call.
function_call (str): The function call being registered.
stop (bool): A flag indicating whether the function call should stop.
other_args (list): The list of other arguments for the function call.
output_data (Any): The output data for the function call.
uuid (str, optional): The UUID associated with the function call. Defaults to "".
Returns:
None
Raises:
None
"""
with open(os.path.join(self.record_root_dir, "LLM_inout_pair", f"{self.llm_interface_id:05d}.json"), "w", encoding="utf-8") as writer:
llm_inout_record = {
"input": {
"base_kwargs": dump_common_things(base_kwargs),
"messages":dump_common_things(messages),
"functions":dump_common_things(functions),
"function_call":dump_common_things(function_call),
"stop":dump_common_things(stop),
"other_args":dump_common_things(other_args),
# 'uuid': dump_common_things(uuid)
},
"output": dump_common_things(output_data),
"llm_interface_id": self.llm_interface_id,
}
json.dump(llm_inout_record,writer,indent=2, ensure_ascii=False)
self.llm_server_cache.append(llm_inout_record)
self.llm_interface_id += 1
self.save_meta()
def query_llm_inout(self, restrict_cache_query, base_kwargs, messages, functions, function_call, stop, other_args, uuid=""):
"""
Query the LLM server for input and output data based on the given parameters.
Parameters:
- restrict_cache_query (bool): Whether to restrict the cache query.
- base_kwargs (dict): A dictionary of base keyword arguments.
- messages (list): A list of messages.
- functions (list): A list of functions.
- function_call (dict): A dictionary representing the function call.
- stop (bool): Whether to stop the query.
- other_args (dict): A dictionary of other arguments.
- uuid (str): A string representing the UUID (optional).
Returns:
- object: The output data from the LLM server, or None if not found.
"""
if CONFIG.environment == ENVIRONMENT.Development or self.newly_start:
self.is_cached = False
return None
elif CONFIG.environment == ENVIRONMENT.Refine:
input_data = {
"base_kwargs": dump_common_things(base_kwargs),
"messages":dump_common_things(messages),
"functions":dump_common_things(functions),
"function_call":dump_common_things(function_call),
"stop":dump_common_things(stop),
"other_args":dump_common_things(other_args),
}
for cache in self.llm_record_cache:
# compare user messages only
input_data_user_messages = [item for item in input_data['messages'] if item['role'] == 'user']
cache_data_user_messages = [item for item in cache["input"]['messages'] if item['role'] == 'user']
if input_data_user_messages == cache_data_user_messages:
if restrict_cache_query and self.llm_interface_id != cache["llm_interface_id"]:
continue
logger.typewriter_log(
f"get a llm_server response from Record {cache['llm_interface_id']}",
Fore.RED,
)
self.is_cached = True
return cache["output"]
self.is_cached = False
return None
elif CONFIG.environment == ENVIRONMENT.Production:
if self.llm_interface_id < len(self.llm_record_cache):
logger.typewriter_log(
"get a llm_server response from Record",
Fore.RED,
)
self.is_cached = True
return self.llm_record_cache[self.llm_interface_id]['output']
else:
self.is_cached = False
return None
else:
self.is_cached = False
return None
|
def dump_common_things(object):
"""
Generates a function comment for the given function body.
Args:
object: The object to be processed.
Returns:
The processed object.
"""
if type(object) in [str,int,float, bool]:
return object
if type(object) == dict:
return {dump_common_things(key): dump_common_things(value) for key,value in object.items()}
if type(object) == list:
return [dump_common_things(cont) for cont in object]
method = getattr(object, 'to_json', None)
if callable(method):
return method()
class RunningRecoder():
def __init__(self, record_base_dir = "./records"):
"""
Initializes the object with the given record base directory.
Parameters:
record_base_dir (str): The base directory for the records. Defaults to "./records".
Returns:
None
"""
self.llm_record_cache = [] # Get cached records
self.llm_interface_id = 0
self.llm_server_cache = [] # Runtime records
self.tool_call_id = 0
self.tool_call_cache = []
self.is_cached = True # Assume to be true at first
self.newly_start = True
now = int(round(time.time()*1000))
strip = time.strftime('%Y_%m_%d_%H_%M_%S',time.localtime(now/1000))
self.record_root_dir = os.path.join(record_base_dir,strip)
os.makedirs(self.record_root_dir,exist_ok=True)
print(colored(f"Recorder Mode: {CONFIG.environment.name}", color='yellow'))
for subdir_name in ["LLM_inout_pair","tool_call_logs"]:
os.makedirs(os.path.join(self.record_root_dir,subdir_name),exist_ok=True)
def save_meta(self):
"""
Saves the meta information of the record.
This function writes the meta information of the record to a file in the
record root directory. The meta information includes the tool call ID and
the LLM inference ID.
Parameters:
None
Returns:
None
"""
with open(os.path.join(self.record_root_dir, "meta.meta"), "w", encoding="utf-8") as writer:
tool_call_log = {
"tool_call_id": self.tool_call_id,
"llm_inference_id": self.llm_interface_id,
}
json.dump(tool_call_log,writer,indent=2, ensure_ascii=False)
def load_from_disk(self, record_dir: str, cfg):
"""
Load data from disk into memory cache.
Args:
record_dir (str): The directory path where the data is stored.
cfg: The configuration object.
Returns:
None
"""
logger.typewriter_log(
"load from a disk record",
Fore.RED,
record_dir,
)
self.newly_start = False
for dir_name in os.listdir(record_dir):
if dir_name == "LLM_inout_pair":
inout_pair_list = os.listdir(os.path.join(record_dir,dir_name))
inout_pair_list.sort()
for file_name in inout_pair_list:
with open(os.path.join(record_dir,dir_name,file_name), "r", encoding="utf-8") as reader:
llm_pair = json.load(reader)
self.llm_record_cache.append(llm_pair)
elif dir_name == "meta.meta":
with open(os.path.join(record_dir, "meta.meta"), "r", encoding="utf-8") as reader:
tool_call_log = json.load(reader)
def regist_llm_inout(self, base_kwargs, messages, functions, function_call, stop, other_args, output_data, uuid=""):
"""
Registers the LLM input and output data for the specified function call.
Args:
base_kwargs (dict): The base keyword arguments for the function call.
messages (list): The list of messages associated with the function call.
functions (list): The list of functions called during the function call.
function_call (str): The function call being registered.
stop (bool): A flag indicating whether the function call should stop.
other_args (list): The list of other arguments for the function call.
output_data (Any): The output data for the function call.
uuid (str, optional): The UUID associated with the function call. Defaults to "".
Returns:
None
Raises:
None
"""
with open(os.path.join(self.record_root_dir, "LLM_inout_pair", f"{self.llm_interface_id:05d}.json"), "w", encoding="utf-8") as writer:
llm_inout_record = {
"input": {
"base_kwargs": dump_common_things(base_kwargs),
"messages":dump_common_things(messages),
"functions":dump_common_things(functions),
"function_call":dump_common_things(function_call),
"stop":dump_common_things(stop),
"other_args":dump_common_things(other_args),
# 'uuid': dump_common_things(uuid)
},
"output": dump_common_things(output_data),
"llm_interface_id": self.llm_interface_id,
}
json.dump(llm_inout_record,writer,indent=2, ensure_ascii=False)
self.llm_server_cache.append(llm_inout_record)
self.llm_interface_id += 1
self.save_meta()
def query_llm_inout(self, restrict_cache_query, base_kwargs, messages, functions, function_call, stop, other_args, uuid=""):
"""
Query the LLM server for input and output data based on the given parameters.
Parameters:
- restrict_cache_query (bool): Whether to restrict the cache query.
- base_kwargs (dict): A dictionary of base keyword arguments.
- messages (list): A list of messages.
- functions (list): A list of functions.
- function_call (dict): A dictionary representing the function call.
- stop (bool): Whether to stop the query.
- other_args (dict): A dictionary of other arguments.
- uuid (str): A string representing the UUID (optional).
Returns:
- object: The output data from the LLM server, or None if not found.
"""
if CONFIG.environment == ENVIRONMENT.Development or self.newly_start:
self.is_cached = False
return None
elif CONFIG.environment == ENVIRONMENT.Refine:
input_data = {
"base_kwargs": dump_common_things(base_kwargs),
"messages":dump_common_things(messages),
"functions":dump_common_things(functions),
"function_call":dump_common_things(function_call),
"stop":dump_common_things(stop),
"other_args":dump_common_things(other_args),
}
for cache in self.llm_record_cache:
# compare user messages only
input_data_user_messages = [item for item in input_data['messages'] if item['role'] == 'user']
cache_data_user_messages = [item for item in cache["input"]['messages'] if item['role'] == 'user']
if input_data_user_messages == cache_data_user_messages:
if restrict_cache_query and self.llm_interface_id != cache["llm_interface_id"]:
continue
logger.typewriter_log(
f"get a llm_server response from Record {cache['llm_interface_id']}",
Fore.RED,
)
self.is_cached = True
return cache["output"]
self.is_cached = False
return None
elif CONFIG.environment == ENVIRONMENT.Production:
if self.llm_interface_id < len(self.llm_record_cache):
logger.typewriter_log(
"get a llm_server response from Record",
Fore.RED,
)
self.is_cached = True
return self.llm_record_cache[self.llm_interface_id]['output']
else:
self.is_cached = False
return None
else:
self.is_cached = False
return None
| def regist_tool_call(self, action: Action, now_code: str): | 2 | 2023-11-03 01:20:14+00:00 | 4k |
LLaVA-VL/LLaVA-Plus-Codebase | llava/eval/run_llava.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
},
{
"identifier": "IMAGE_PLACEHOLDER",
"path": "llava/constants.py",
"snippet": "IMAGE_PLACEHOLDER = \"<image-placeholder>\""
},
{
"identifier": "conv_templates",
"path": "llava/conversation.py",
"snippet": "def parse_tool_output(text):\ndef make_it_small_html(text):\ndef get_hr_html():\ndef get_placehold(text):\ndef parse_msg(msg):\n def get_prompt(self):\n def wrap_sys(msg): return f\"<<SYS>>\\n{msg}\\n<</SYS>>\\n\\n\"\n def wrap_inst(msg): return f\"[INST] {msg} [/INST]\"\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_raw_images(self, return_pil=False, image_process_mode=None):\n def tools_filter_msg(self, msg):\n def merge_output(self, ret, with_debug_parameter=False):\n def image_to_url(self, image):\n def to_gradio_chatbot(self, with_debug_parameter=False):\n def copy(self):\n def dict(self, force_str=False):\n def remove_pil(x, force_str):\nclass SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge"
},
{
"identifier": "load_pretrained_model",
"path": "llava/model/builder.py",
"snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if device != \"cuda\":\n kwargs['device_map'] = {\"\": device}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n\n if 'llava' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is None:\n warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n print('Loading LLaVA from base model...')\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional LLaVA weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading LLaVA from base model...')\n if 'mpt' in model_name.lower():\n if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):\n shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)\n cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n\n mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')\n mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}\n model.load_state_dict(mm_projector_weights, strict=False)\n else:\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n image_processor = None\n\n if 'llava' in model_name.lower():\n mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n mm_use_im_patch_token = getattr(model.config, \"mm_use_im_patch_token\", True)\n if mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n if mm_use_im_start_end:\n tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n model.resize_token_embeddings(len(tokenizer))\n\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device=device, dtype=torch.float16)\n image_processor = vision_tower.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len"
},
{
"identifier": "disable_torch_init",
"path": "llava/utils.py",
"snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)"
},
{
"identifier": "process_images",
"path": "llava/mm_utils.py",
"snippet": "def process_images(images, image_processor, model_cfg):\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255)\n for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')[\n 'pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images"
},
{
"identifier": "tokenizer_image_token",
"path": "llava/mm_utils.py",
"snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [\n tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "get_model_name_from_path",
"path": "llava/mm_utils.py",
"snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]"
},
{
"identifier": "KeywordsStoppingCriteria",
"path": "llava/mm_utils.py",
"snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n self.max_keyword_len = 0\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n if len(cur_keyword_ids) > self.max_keyword_len:\n self.max_keyword_len = len(cur_keyword_ids)\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n\n def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n offset = min(output_ids.shape[1] -\n self.start_len, self.max_keyword_len)\n self.keyword_ids = [keyword_id.to(\n output_ids.device) for keyword_id in self.keyword_ids]\n for keyword_id in self.keyword_ids:\n if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():\n return True\n outputs = self.tokenizer.batch_decode(\n output_ids[:, -offset:], skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False\n\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n outputs = []\n for i in range(output_ids.shape[0]):\n outputs.append(self.call_for_batch(\n output_ids[i].unsqueeze(0), scores))\n return all(outputs)"
}
] | import argparse
import torch
import requests
import re
from llava.constants import (
IMAGE_TOKEN_INDEX,
DEFAULT_IMAGE_TOKEN,
DEFAULT_IM_START_TOKEN,
DEFAULT_IM_END_TOKEN,
IMAGE_PLACEHOLDER,
)
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import (
process_images,
tokenizer_image_token,
get_model_name_from_path,
KeywordsStoppingCriteria,
)
from PIL import Image
from PIL import Image
from io import BytesIO | 3,462 |
def image_parser(args):
out = args.image_file.split(args.sep)
return out
def load_image(image_file):
if image_file.startswith("http") or image_file.startswith("https"):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert("RGB")
else:
image = Image.open(image_file).convert("RGB")
return image
def load_images(image_files):
out = []
for image_file in image_files:
image = load_image(image_file)
out.append(image)
return out
def eval_model(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(
args.model_path, args.model_base, model_name
)
qs = args.query
|
def image_parser(args):
out = args.image_file.split(args.sep)
return out
def load_image(image_file):
if image_file.startswith("http") or image_file.startswith("https"):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert("RGB")
else:
image = Image.open(image_file).convert("RGB")
return image
def load_images(image_files):
out = []
for image_file in image_files:
image = load_image(image_file)
out.append(image)
return out
def eval_model(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(
args.model_path, args.model_base, model_name
)
qs = args.query | image_token_se = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN | 1 | 2023-11-07 13:06:02+00:00 | 4k |
opendilab/LLMRiddles | llmriddles/questions/level4.py | [
{
"identifier": "register_question",
"path": "llmriddles/questions/question.py",
"snippet": "def register_question(text: Union[Mapping[str, str], str],\n checkers: Union[Mapping[str, SingleLangCheckerTyping], MultiLangCheckerTyping],\n name=Union[Mapping[str, str], str],\n level: int = 1, default_lang='cn'):\n \n checker = checkers if isinstance(checkers, Checker) else Checker(checkers)\n \n if isinstance(text, str):\n texts = {default_lang: text}\n else:\n texts = text\n\n if isinstance(name, str):\n names = {default_lang: name}\n else:\n names = name\n\n _KNOWN_PROBLEMS.append(Question(texts, checker, names, level))"
},
{
"identifier": "Checker",
"path": "llmriddles/questions/question.py",
"snippet": "class Checker:\n\n def __init__(self, checkers, required_input_keys=None) -> None:\n self._origin_checkers = checkers\n if isinstance(checkers, collections.abc.Mapping):\n self.checker = self._integrated_checker\n else:\n self.checker = checkers\n \n if required_input_keys == None:\n required_input_keys = ['question_text', 'user_text', 'answer_text', 'lang']\n self.required_input_keys = required_input_keys\n\n def _integrated_checker(self, question_text: str, user_text: str, answer_text: str, lang: str):\n return self._origin_checkers[lang](question_text, user_text, answer_text)\n\n def __call__(self, inputs):\n return self.checker(*[inputs[key] for key in self.required_input_keys])"
},
{
"identifier": "count_words",
"path": "llmriddles/questions/utility.py",
"snippet": "def count_words(text: str, contain_punctuation: bool = False):\n chinese_words = []\n english_words = []\n other_words = []\n temp_english_words = []\n for char in text:\n if '\\u4e00' <= char <= '\\u9fff':\n chinese_words.append(char)\n if len(temp_english_words) > 0:\n english_words.append(''.join(temp_english_words))\n temp_english_words = []\n else:\n if char.isalpha():\n temp_english_words.append(char)\n else:\n if len(temp_english_words) > 0:\n english_words.append(''.join(temp_english_words))\n temp_english_words = []\n other_words.append(char)\n if contain_punctuation:\n return len(chinese_words) + len(english_words) + len(other_words)\n else:\n return len(chinese_words) + len(english_words)"
},
{
"identifier": "word_reverse_text",
"path": "llmriddles/questions/utility.py",
"snippet": "def word_reverse_text(input_text, lang='cn'):\n if lang == 'cn':\n user_text = input_text[::-1]\n else:\n words = input_text.split()\n words.reverse()\n user_text = ' '.join(words)\n return user_text"
},
{
"identifier": "check_if_is_number",
"path": "llmriddles/questions/math_tools.py",
"snippet": "def check_if_is_number(text: str):\n try:\n int(text)\n return True\n except ValueError:\n return False"
},
{
"identifier": "get_all_numbers",
"path": "llmriddles/questions/math_tools.py",
"snippet": "def get_all_numbers(text: str):\n return get_all_numbers_in_a_sentence(text) + get_all_numbers_in_a_sentence_with_comma(text)"
}
] | import re
from .question import register_question
from .question import Checker
from typing import Callable
from .utility import count_words, word_reverse_text
from .math_tools import check_if_is_number, get_all_numbers
| 1,743 |
CN_TEXT_1 = """
第四章第一题(循环往复),请构造一个问题A,它的回复是不同于A的B,然后以B提问能再次得到A。
请在下面的输入框内填写你构造并点击按钮提交。
"""
EN_TEXT_1 = """
For the first question in chapter 4, please make a question A whose answer is B that is different from A, and then ask B to get A again.
Please enter your query below and click the submit button
"""
def _checker_1(question_text: str, user_text: str, answer_text: str, lang: str, llm_callback: Callable[[str], str]):
answer_text = answer_text.strip()
user_text = user_text.strip()
second_answer_text = llm_callback(answer_text)
if second_answer_text != user_text:
return False, f"B产生的回答和原问题A不一致:\n A: {user_text}\n B: {answer_text}\n Answer from B: {second_answer_text}" if lang == 'cn' else f'The answer from B is not the same as the original question A:\n A: {user_text}\n B: {answer_text}\n Answer from B: {second_answer_text}'
return True, None
register_question(
{
'cn': CN_TEXT_1,
'en': EN_TEXT_1,
},
checkers=Checker(_checker_1, required_input_keys=['question_text', 'user_text', 'answer_text', 'lang', 'llm_callback']),
name={'cn': '4-1 循环往复', 'en': '4-1'},
level=4,
)
CN_TEXT_2 = """
第四章第二题(惜字如金),本题可能没有答案,你可以自由的先去做其他的题。请输入一个字的问题,使模型的回答在16个字以内。
请在下面的输入框内填写你构造并点击按钮提交。
"""
EN_TEXT_2 = """
For the second question in chapter 4, please enter a question of one word so that the model's answer is within 16 words (The Chinese version of this problem may not have a correct answer, but the English version might).
Please enter your query below and click the submit button
"""
def _checker_2(question_text: str, user_text: str, answer_text: str, lang: str):
answer_text = answer_text.strip()
user_text = user_text.strip()
if count_words(user_text) > 1:
return False, "用户的问题长度应该在一个字以内" if lang == 'cn' else 'Question should be within one word.'
if count_words(answer_text) > 16:
return False, "大语言模型的答案应该在16个字以内" if lang == 'cn' else 'Answer should be within 16 words.'
else:
return True, None
register_question(
{
'cn': CN_TEXT_2,
'en': EN_TEXT_2,
},
checkers=_checker_2,
name={'cn': '4-2 惜字如金', 'en': '4-2'},
level=4,
)
CN_TEXT_3 = """
第四章第三题(自然之密),请输入一个大于一的正整数作为问题,使回答里包含和它刚好相差1的数。
请在下面的输入框内填写你构造并点击按钮提交。
"""
EN_TEXT_3 = """
For the third question in chapter 4, please enter a positive integer greater than one as the question so that the answer contains a number that is exactly 1 different from it.
Please enter your query below and click the submit button
"""
def _checker_3(question_text: str, user_text: str, answer_text: str, lang: str):
_ = question_text, lang
answer_text = answer_text.strip()
user_text = user_text.strip()
|
CN_TEXT_1 = """
第四章第一题(循环往复),请构造一个问题A,它的回复是不同于A的B,然后以B提问能再次得到A。
请在下面的输入框内填写你构造并点击按钮提交。
"""
EN_TEXT_1 = """
For the first question in chapter 4, please make a question A whose answer is B that is different from A, and then ask B to get A again.
Please enter your query below and click the submit button
"""
def _checker_1(question_text: str, user_text: str, answer_text: str, lang: str, llm_callback: Callable[[str], str]):
answer_text = answer_text.strip()
user_text = user_text.strip()
second_answer_text = llm_callback(answer_text)
if second_answer_text != user_text:
return False, f"B产生的回答和原问题A不一致:\n A: {user_text}\n B: {answer_text}\n Answer from B: {second_answer_text}" if lang == 'cn' else f'The answer from B is not the same as the original question A:\n A: {user_text}\n B: {answer_text}\n Answer from B: {second_answer_text}'
return True, None
register_question(
{
'cn': CN_TEXT_1,
'en': EN_TEXT_1,
},
checkers=Checker(_checker_1, required_input_keys=['question_text', 'user_text', 'answer_text', 'lang', 'llm_callback']),
name={'cn': '4-1 循环往复', 'en': '4-1'},
level=4,
)
CN_TEXT_2 = """
第四章第二题(惜字如金),本题可能没有答案,你可以自由的先去做其他的题。请输入一个字的问题,使模型的回答在16个字以内。
请在下面的输入框内填写你构造并点击按钮提交。
"""
EN_TEXT_2 = """
For the second question in chapter 4, please enter a question of one word so that the model's answer is within 16 words (The Chinese version of this problem may not have a correct answer, but the English version might).
Please enter your query below and click the submit button
"""
def _checker_2(question_text: str, user_text: str, answer_text: str, lang: str):
answer_text = answer_text.strip()
user_text = user_text.strip()
if count_words(user_text) > 1:
return False, "用户的问题长度应该在一个字以内" if lang == 'cn' else 'Question should be within one word.'
if count_words(answer_text) > 16:
return False, "大语言模型的答案应该在16个字以内" if lang == 'cn' else 'Answer should be within 16 words.'
else:
return True, None
register_question(
{
'cn': CN_TEXT_2,
'en': EN_TEXT_2,
},
checkers=_checker_2,
name={'cn': '4-2 惜字如金', 'en': '4-2'},
level=4,
)
CN_TEXT_3 = """
第四章第三题(自然之密),请输入一个大于一的正整数作为问题,使回答里包含和它刚好相差1的数。
请在下面的输入框内填写你构造并点击按钮提交。
"""
EN_TEXT_3 = """
For the third question in chapter 4, please enter a positive integer greater than one as the question so that the answer contains a number that is exactly 1 different from it.
Please enter your query below and click the submit button
"""
def _checker_3(question_text: str, user_text: str, answer_text: str, lang: str):
_ = question_text, lang
answer_text = answer_text.strip()
user_text = user_text.strip()
| if not check_if_is_number(user_text):
| 4 | 2023-11-07 03:09:55+00:00 | 4k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.