repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
zkarpinski/codeinsight-sdk-python | codeinsight_sdk/client.py | [
{
"identifier": "ProjectHandler",
"path": "codeinsight_sdk/handlers.py",
"snippet": "class ProjectHandler(Handler):\n def __init__(self, client):\n super().__init__(client)\n self.cls = Project\n\n def create(self, name:str, description:str = None, folder:str = None,\n scanProfileName:str = None,\n owner:str = None,\n risk:str = None,\n folderId:int = None,\n customFields:List[dict] = None,\n ) -> int:\n \"\"\"\n Creates a project.\n\n Args:\n name (str): The name of the project.\n description (str, optional): The description of the project. Defaults to None.\n folder (str, optional): The folder of the project. Defaults to None.\n\n Returns:\n Project: The created project id.\n \"\"\"\n path = \"projects\"\n data = {\"name\": name,\n \"description\": description,\n \"folderName\": folder,\n \"scanProfileName\": scanProfileName,\n \"owner\": owner,\n \"risk\": risk,\n \"folderId\": folderId,\n \"customFields\": customFields}\n resp = self.client.request(\"POST\", url_part=path, body=data)\n try:\n project_id = resp.json()['data']['id']\n except KeyError:\n raise CodeInsightError(resp)\n return project_id\n\n\n #Note API endpoints switch between projects and project...\n def all(self) -> List[Project]:\n \"\"\"\n Retrieves all projects from the server.\n\n Returns:\n A list of Project objects representing all the projects.\n \"\"\"\n\n path = \"projects\"\n resp = self.client.request(\"GET\", url_part=path)\n projects = []\n for project_data in resp.json()['data']:\n projects.append(self.cls.from_dict(project_data))\n return projects\n \n def get(self, id:int) -> Project:\n \"\"\"\n Retrieves a project by its ID.\n\n Args:\n id (int): The ID of the project requested.\n\n Returns:\n Project: The retrieved project.\n \"\"\"\n path = f\"projects/{id}\"\n resp = self.client.request(\"GET\", url_part=path)\n project_data = resp.json()['data']\n return self.cls.from_dict(project_data)\n \n def get_id(self, project_name:str) -> int:\n \"\"\"\n Retrieves the ID of a project based on its name.\n\n Args:\n projectName (str): The name of the project.\n\n Returns:\n int: The ID of the project.\n \"\"\"\n path = \"project/id\"\n params = {\"projectName\": project_name}\n resp = self.client.request(\"GET\", url_part=path, params=params)\n try:\n project_id = resp.json()['Content: '] # Yes, the key is called 'Content: ' ...\n except KeyError:\n raise CodeInsightError(resp)\n return project_id\n\n def get_inventory_summary(self, project_id:int,\n vulnerabilitySummary : bool = False,\n cvssVersion: str = 'ANY',\n published: str = 'ALL',\n offset:int = 1,\n limit:int = 25) -> List[ProjectInventoryItem]:\n \"\"\"\n Retrieves the inventory summary for a specific project.\n\n Args:\n project_id (int): The ID of the project.\n vulnerabilitySummary (bool, optional): Flag to include vulnerability summary. Defaults to False.\n cvssVersion (str, optional): The CVSS version to filter vulnerabilities. Defaults to 'ANY'.\n published (str, optional): The publication status. Defaults to 'ALL'.\n offset (int, optional): The offset for pagination. Defaults to 1.\n limit (int, optional): The maximum number of items to return. Defaults to 25.\n\n Returns:\n List[ProjectInventoryItem]: A list of ProjectInventoryItem objects representing the inventory summary.\n \"\"\"\n path = f\"projects/{project_id}/inventorySummary\"\n params = {\"vulnerabilitySummary\": vulnerabilitySummary,\n \"cvssVersion\": cvssVersion,\n \"published\": published,\n \"offset\": offset,\n \"limit\": limit \n }\n resp = self.client.request(\"GET\", url_part=path, params=params)\n current_page = int(resp.headers['current-page'])\n number_of_pages = int(resp.headers['number-of-pages'])\n total_records = int(resp.headers['total-records'])\n inventory = []\n for inv_item in resp.json()['data']:\n inventory.append(ProjectInventoryItem.from_dict(inv_item))\n \n # Iterate through all the pages\n if number_of_pages > offset:\n params.update({\"offset\": offset+1})\n chunk = self.get_inventory_summary(project_id, **params)\n # Only append the inventory records\n inventory.extend(chunk)\n return inventory\n \n def get_inventory(self,project_id:int,\n skip_vulnerabilities: bool = False,\n published:bool = True,\n vendor:str = None,\n product:str = None,\n page_size: int = 100,\n page: int = 1,\n review_status: str = None,\n alerts: str = None,\n include_files: bool = True\n ) -> ProjectInventory:\n path = f\"project/inventory/{project_id}\"\n params = {\"skipVulnerabilities\": skip_vulnerabilities,\n \"published\": published,\n \"vendor\": vendor,\n \"product\": product,\n \"page\": page,\n \"pageSize\": page_size,\n \"reviewStatus\": review_status,\n \"alerts\": alerts,\n \"includeFiles\": include_files}\n\n resp = self.client.request(\"GET\", url_part=path, params=params)\n project_inventory = resp.json()\n project = ProjectInventory.from_dict(project_inventory)\n\n # Iterate through all the pages\n if int(resp.headers['number-of-pages']) > page:\n chunk = self.get_inventory(project_id, page=page+1)\n # Only append the inventory records\n project.inventoryItems.extend(chunk.inventoryItems)\n\n return project"
},
{
"identifier": "Handler",
"path": "codeinsight_sdk/handlers.py",
"snippet": "class Handler(abc.ABC):\n def __init__(self, client):\n self.client = client\n self.cls = None\n \n @staticmethod\n def create(client, cls):\n k = cls.__name__\n handlers = {\"Project\": ProjectHandler,\n \"Report\": ReportHandler\n }\n handler = handlers.get(k)\n if handler is None:\n raise ValueError(f\"Handler not found for class '{k}'\")\n return handler(client)\n \n @abc.abstractmethod\n def get(self):\n pass"
},
{
"identifier": "ReportHandler",
"path": "codeinsight_sdk/handlers.py",
"snippet": "class ReportHandler(Handler):\n \"\"\"\n A class that handles operations related to reports.\n\n Args:\n client (Client): The client object used for making API requests.\n\n Attributes:\n cls (Report): The class representing a report.\n\n Methods:\n get(id): Retrieves a report by its ID.\n all(): Retrieves all reports.\n\n \"\"\"\n\n def __init__(self, client):\n super().__init__(client)\n self.cls = Report\n\n def get(self, id:int):\n \"\"\"\n Retrieves a report by its ID.\n\n Args:\n id (int): The ID of the report to retrieve.\n\n Returns:\n Report: The report object.\n\n \"\"\"\n path = f\"reports/{id}\"\n resp = self.client.request(\"GET\", url_part=path)\n report_data = resp.json()['data']\n report = self.cls.from_dict(report_data)\n return report\n\n def all(self):\n \"\"\"\n Retrieves all reports.\n\n Returns:\n list: A list of report objects.\n\n \"\"\"\n path = \"reports\"\n resp = self.client.request(\"GET\", url_part=path)\n reports = []\n for report_data in resp.json()['data']:\n reports.append(self.cls.from_dict(report_data))\n return reports"
},
{
"identifier": "Project",
"path": "codeinsight_sdk/models.py",
"snippet": "class Project(DataClassJsonMixin):\n id: int\n name: str\n status: Optional[str] = None\n owner: Optional[str] = None\n description: Optional[str] = None\n dateCreated: Optional[str] = None\n projectPath: Optional[str] = None\n # TODO: Should this be a dictionary or another class? This structure is reused in a few APIs\n vulnerabilities: Optional[Dict[str, Dict]] = None"
},
{
"identifier": "ProjectInventory",
"path": "codeinsight_sdk/models.py",
"snippet": "class ProjectInventory():\n projectId: int\n inventoryItems: List[ProjectInventoryItem]"
},
{
"identifier": "Report",
"path": "codeinsight_sdk/models.py",
"snippet": "class Report(DataClassJsonMixin):\n id: int\n name: str\n path: str\n default: bool\n enabled: bool\n enableProjectPicker: bool\n order: int\n createdDateTime: str\n updatedDateTime: str"
},
{
"identifier": "CodeInsightError",
"path": "codeinsight_sdk/exceptions.py",
"snippet": "class CodeInsightError(GenericError):\n \"\"\"Error class for code insight API errors.\"\"\"\n def __init__(self, response: requests.Response):\n try:\n resp = response.json()\n self.code = response.status_code\n self.message = resp['Error: ']\n self.arguments = resp['Arguments: ']\n self.error = resp['Key: ']\n self.add_note(f\"Arguments: {self.arguments}\")\n super().__init__(\"Error: %s - %s\" % (self.code, self.message))\n\n except KeyError:\n raise ValueError(f\"Error parsing response: {resp}\")\n except json.decoder.JSONDecodeError:\n raise ValueError(f\"Error decoding response: {resp}\")"
}
] | import requests
import logging
from .handlers import ProjectHandler, Handler, ReportHandler
from .models import Project, ProjectInventory, Report
from .exceptions import CodeInsightError | 2,652 |
logger = logging.getLogger(__name__)
class CodeInsightClient:
def __init__(self,
base_url: str,
api_token: str,
timeout: int = 60,
verify_ssl: bool = True
):
self.base_url = base_url
self.api_url = f"{base_url}/codeinsight/api"
self.__api_token = api_token
self.__api_headers = {
'Content-Type': 'application/json',
"Authorization": "Bearer %s" % self.__api_token,
"User-Agent": "codeinsight_sdk_python",
}
self.__timeout = timeout
self.__verify_ssl = verify_ssl
def request(self, method, url_part: str, params: dict = None, body: any = None ):
url = f"{self.api_url}/{url_part}"
# Iterate over params and remove any that are None (Empty)
if(params):
for k, v in list(params.items()):
if v is None:
del params[k]
response = requests.request(method, url,
headers=self.__api_headers, params=params, json=body,
timeout=self.__timeout, verify=self.__verify_ssl)
if not response.ok:
logger.error(f"Error: {response.status_code} - {response.reason}", exc_info=True)
logger.error(response.text)
raise CodeInsightError(response)
return response
@property
|
logger = logging.getLogger(__name__)
class CodeInsightClient:
def __init__(self,
base_url: str,
api_token: str,
timeout: int = 60,
verify_ssl: bool = True
):
self.base_url = base_url
self.api_url = f"{base_url}/codeinsight/api"
self.__api_token = api_token
self.__api_headers = {
'Content-Type': 'application/json',
"Authorization": "Bearer %s" % self.__api_token,
"User-Agent": "codeinsight_sdk_python",
}
self.__timeout = timeout
self.__verify_ssl = verify_ssl
def request(self, method, url_part: str, params: dict = None, body: any = None ):
url = f"{self.api_url}/{url_part}"
# Iterate over params and remove any that are None (Empty)
if(params):
for k, v in list(params.items()):
if v is None:
del params[k]
response = requests.request(method, url,
headers=self.__api_headers, params=params, json=body,
timeout=self.__timeout, verify=self.__verify_ssl)
if not response.ok:
logger.error(f"Error: {response.status_code} - {response.reason}", exc_info=True)
logger.error(response.text)
raise CodeInsightError(response)
return response
@property | def projects(self) -> ProjectHandler: | 0 | 2023-12-29 00:49:12+00:00 | 4k |
daswer123/rvc-python | rvc_python/lib/infer_pack/modules.py | [
{
"identifier": "commons",
"path": "rvc_python/lib/infer_pack/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef convert_pad_shape(pad_shape):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef slice_segments2(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape):\ndef shift_1d(x):\ndef sequence_mask(length, max_length=None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):"
},
{
"identifier": "init_weights",
"path": "rvc_python/lib/infer_pack/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "rvc_python/lib/infer_pack/commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)"
},
{
"identifier": "piecewise_rational_quadratic_transform",
"path": "rvc_python/lib/infer_pack/transforms.py",
"snippet": "def piecewise_rational_quadratic_transform(\n inputs,\n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None,\n tail_bound=1.0,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE,\n):\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\"tails\": tails, \"tail_bound\": tail_bound}\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet"
}
] | import copy
import math
import numpy as np
import scipy
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm
from rvc_python.lib.infer_pack import commons
from rvc_python.lib.infer_pack.commons import init_weights, get_padding
from rvc_python.lib.infer_pack.transforms import piecewise_rational_quadratic_transform | 2,889 | class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels, 1))
self.logs = nn.Parameter(torch.zeros(channels, 1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1, 2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=p_dropout,
gin_channels=gin_channels,
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
def remove_weight_norm(self):
self.enc.remove_weight_norm()
class ConvFlow(nn.Module):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
n_layers,
num_bins=10,
tail_bound=5.0,
):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
self.proj = nn.Conv1d(
filter_channels, self.half_channels * (num_bins * 3 - 1), 1
)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
self.filter_channels
)
unnormalized_derivatives = h[..., 2 * self.num_bins :]
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(
nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
]
)
self.convs2.apply(init_weights)
def forward(self, x, x_mask=None):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c2(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.convs = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
]
)
self.convs.apply(init_weights)
def forward(self, x, x_mask=None):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels, 1))
self.logs = nn.Parameter(torch.zeros(channels, 1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1, 2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=p_dropout,
gin_channels=gin_channels,
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
def remove_weight_norm(self):
self.enc.remove_weight_norm()
class ConvFlow(nn.Module):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
n_layers,
num_bins=10,
tail_bound=5.0,
):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
self.proj = nn.Conv1d(
filter_channels, self.half_channels * (num_bins * 3 - 1), 1
)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
self.filter_channels
)
unnormalized_derivatives = h[..., 2 * self.num_bins :]
| x1, logabsdet = piecewise_rational_quadratic_transform( | 3 | 2023-12-26 19:05:42+00:00 | 4k |
Eeems-Org/remarkable-update-fuse | remarkable_update_fuse/fuse.py | [
{
"identifier": "UpdateImage",
"path": "remarkable_update_fuse/image.py",
"snippet": "class UpdateImage(io.RawIOBase):\n _manifest = None\n _offset = -1\n _size = 0\n _pos = 0\n\n def __init__(self, update_file, cache_size=500, cache_ttl=60):\n self.update_file = update_file\n self.cache_size = cache_size\n self._cache = BlockCache(\n maxsize=cache_size * 1024 * 1024,\n ttl=cache_ttl,\n )\n with open(self.update_file, \"rb\") as f:\n magic = f.read(4)\n if magic != b\"CrAU\":\n raise UpdateImageException(\"Wrong header\")\n\n major = struct.unpack(\">Q\", f.read(8))[0]\n if major != 1:\n raise UpdateImageException(\"Unsupported version\")\n\n size = struct.unpack(\">Q\", f.read(8))[0]\n data = f.read(size)\n self._manifest = DeltaArchiveManifest.FromString(data)\n self._offset = f.tell()\n\n for blob, offset, length, f in self._blobs:\n self._size += length\n\n def verify(self, publickey):\n _publickey = load_pem_public_key(publickey)\n with open(self.update_file, \"rb\") as f:\n data = f.read(self._offset + self._manifest.signatures_offset)\n\n actual_hash = sha256(data).digest()\n signed_hash = _publickey.recover_data_from_signature(\n self.signature,\n PKCS1v15(),\n SHA256,\n )\n if actual_hash != signed_hash:\n raise UpdateImageSignatureException(\n \"Actual hash does not match signed hash\", signed_hash, actual_hash\n )\n\n @property\n def block_size(self):\n return self._manifest.block_size\n\n @property\n def signature(self):\n for signature in self._signatures:\n if signature.version == 2:\n return signature.data\n\n return None\n\n @property\n def _signatures(self):\n with open(self.update_file, \"rb\") as f:\n f.seek(self._offset + self._manifest.signatures_offset)\n for signature in Signatures.FromString(\n f.read(self._manifest.signatures_size)\n ).signatures:\n yield signature\n\n @property\n def _blobs(self):\n with open(self.update_file, \"rb\") as f:\n for blob in self._manifest.partition_operations:\n f.seek(self._offset + blob.data_offset)\n dst_offset = blob.dst_extents[0].start_block * self.block_size\n dst_length = blob.dst_extents[0].num_blocks * self.block_size\n if blob.type not in (0, 1):\n raise UpdateImageException(f\"Unsupported type {blob.type}\")\n\n yield blob, dst_offset, dst_length, f\n\n self.expire()\n\n def _read_blob(self, blob, blob_offset, blob_length, f):\n if blob_offset in self._cache:\n return self._cache[blob_offset]\n\n if blob.type not in (\n InstallOperation.Type.REPLACE,\n InstallOperation.Type.REPLACE_BZ,\n ):\n raise NotImplementedError(\n f\"Error: {InstallOperation.Type.keys()[blob.type]} has not been implemented yet\"\n )\n\n blob_data = f.read(blob.data_length)\n if sha256(blob_data).digest() != blob.data_sha256_hash:\n raise UpdateImageException(\"Error: Data has wrong sha256sum\")\n\n if blob.type == InstallOperation.Type.REPLACE_BZ:\n try:\n blob_data = bz2.decompress(blob_data)\n\n except ValueError as err:\n raise UpdateImageException(f\"Error: {err}\") from err\n\n if blob_length - len(blob_data) < 0:\n raise UpdateImageException(\n f\"Error: Bz2 compressed data was the wrong length {len(blob_data)}\"\n )\n\n try:\n self._cache[blob_offset] = blob_data\n except ValueError as err:\n if str(err) != \"value too large\":\n raise err\n\n return blob_data\n\n @property\n def cache(self):\n return self._cache\n\n @property\n def size(self):\n return self._size\n\n def expire(self):\n self._cache.expire()\n\n def writable(self):\n return False\n\n def seekable(self):\n return False\n\n def readable(self):\n return True\n\n def seek(self, offset, whence=os.SEEK_SET):\n if whence not in (os.SEEK_SET, os.SEEK_CUR, os.SEEK_END):\n raise OSError(\"Not supported whence\")\n if whence == os.SEEK_SET and offset < 0:\n raise ValueError(\"offset can't be negative\")\n if whence == os.SEEK_END and offset > 0:\n raise ValueError(\"offset can't be positive\")\n\n if whence == os.SEEK_SET:\n self._pos = min(max(offset, 0), self._size)\n elif whence == os.SEEK_CUR:\n self._pos = min(max(self._pos + offset, 0), self._size)\n elif whence == os.SEEK_END:\n self._pos = min(max(self._pos + offset + self._size, 0), self._size)\n return self._pos\n\n def tell(self):\n return self._pos\n\n def read(self, size=-1):\n res = self.peek(size)\n self.seek(len(res), whence=os.SEEK_CUR)\n return res\n\n def peek(self, size=0):\n offset = self._pos\n if offset >= self._size:\n return b\"\"\n\n if size <= 0 or offset + size > self._size:\n size = self._size - offset\n\n res = bytearray(size)\n for blob, blob_offset, blob_length, f in self._blobs:\n if offset < blob_offset:\n continue\n if offset >= blob_offset + blob_length:\n continue\n\n blob_data = self._read_blob(blob, blob_offset, blob_length, f)\n blob_start_offset = max(offset - blob_offset, 0)\n blob_end_offset = min(offset - blob_offset + size, blob_length)\n data = blob_data[blob_start_offset:blob_end_offset]\n\n assert blob_start_offset >= 0\n assert blob_end_offset <= blob_length\n assert blob_end_offset - blob_start_offset == len(data)\n\n start_offset = blob_offset + blob_start_offset - offset\n end_offset = blob_offset + blob_end_offset - offset\n res[start_offset:end_offset] = data\n\n assert start_offset >= 0\n assert start_offset < len(res)\n assert end_offset < blob_offset + blob_length\n assert end_offset - start_offset == len(data)\n assert end_offset <= len(res)\n assert res[start_offset:end_offset] == data\n\n assert len(res) == size\n return bytes(res)"
},
{
"identifier": "UpdateImageSignatureException",
"path": "remarkable_update_fuse/image.py",
"snippet": "class UpdateImageSignatureException(UpdateImageException):\n def __init__(self, message, signed_hash, actual_hash):\n super().__init__(self, message)\n self.signed_hash = signed_hash\n self.actual_hash = actual_hash"
},
{
"identifier": "KillableThread",
"path": "remarkable_update_fuse/threads.py",
"snippet": "class KillableThread(threading.Thread):\n def run(self):\n try:\n super().run()\n\n except SystemExit:\n pass\n\n def kill(self):\n if not self.is_alive():\n return\n\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_ulong(self.ident),\n ctypes.py_object(SystemExit),\n )\n\n if res == 0:\n raise ValueError(f\"Invalid thread id: {self.ident}\")\n\n if res == 1:\n return\n\n # \"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_ulong(self.ident),\n None,\n )\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")"
}
] | import errno
import os
import queue
import sys
import threading
import time
import warnings
import fuse
import ext4
from pathlib import PurePosixPath
from .image import UpdateImage
from .image import UpdateImageSignatureException
from .threads import KillableThread | 2,927 | f" {self.modifiers}",
" -o ",
]
)
+ ",\n ".join(self._str_core())
+ " >"
)
class FuseOptParse(fuse.FuseOptParse):
def __init__(self, *args, **kw):
fuse.FuseOptParse.__init__(self, *args, **kw)
def parse_args(self, args=None, values=None):
_opts, _args = fuse.FuseOptParse.parse_args(self, args, values)
if _args:
self.fuse_args.update_file = os.path.realpath(_args.pop())
return _opts, _args
class Stat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class UpdateFS(fuse.Fuse):
version = "%prog " + fuse.__version__
fusage = "%prog update_file mountpoint [options]"
dash_s_do = "setsingle"
disable_path_cache = False
cache_debug = False
cache_size = 500
cache_ttl = 60
image = None
volume = None
inode_cache = {}
queue = None
exit_threads = False
def __init__(self, *args, **kw):
fuse.Fuse.__init__(
self,
*args,
fuse_args=FuseArgs(),
parser_class=FuseOptParse,
**kw,
)
self.parser.add_option(
mountopt="disable_path_cache",
action="store_true",
help="Disable path caching",
)
self.parser.add_option(
mountopt="cache_debug",
action="store_true",
help="Debug output for path caching",
)
self.parser.add_option(
mountopt="cache_size",
default=500,
type="int",
help="Size in MB of memory cache for speeding up filesytem access [default: %default]",
)
self.parser.add_option(
mountopt="cache_ttl",
default=60,
type="int",
help="Seconds before the memory cache will evict unused chunks [default: %default]",
)
@property
def update_file(self):
return self.fuse_args.update_file
@property
def mountpoint(self):
return self.fuse_args.mountpoint
def fuse_error(self, msg):
print(msg, file=sys.stderr)
self.fuse_args.setmod("showhelp")
fuse.Fuse.main(self, self.args)
sys.exit(1)
def main(self, args=None):
self.args = args
if self.fuse_args.getmod("showhelp"):
fuse.Fuse.main(self, args)
return
if self.update_file is None:
self.fuse_error("fuse: missing update_file parameter")
if not os.path.exists(self.update_file):
self.fuse_error(f"fuse: File does not exist {self.update_file}")
self.image = UpdateImage(
self.update_file,
cache_size=self.cache_size,
cache_ttl=self.cache_ttl,
)
self.volume = ext4.Volume(self.image, offset=0)
print("Verifying signature...")
try:
self.image.verify(
self.get_inode("/usr/share/update_engine/update-payload-key.pub.pem")
.open()
.read()
)
print("Signature verified")
|
fuse.fuse_python_api = (0, 2)
class ImageException(Exception):
pass
class FuseArgs(fuse.FuseArgs):
def __init__(self):
fuse.FuseArgs.__init__(self)
self.update_file = None
def __str__(self):
return (
"\n".join(
[
f"< {self.update_file} on {self.mountpoint}:",
f" {self.modifiers}",
" -o ",
]
)
+ ",\n ".join(self._str_core())
+ " >"
)
class FuseOptParse(fuse.FuseOptParse):
def __init__(self, *args, **kw):
fuse.FuseOptParse.__init__(self, *args, **kw)
def parse_args(self, args=None, values=None):
_opts, _args = fuse.FuseOptParse.parse_args(self, args, values)
if _args:
self.fuse_args.update_file = os.path.realpath(_args.pop())
return _opts, _args
class Stat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class UpdateFS(fuse.Fuse):
version = "%prog " + fuse.__version__
fusage = "%prog update_file mountpoint [options]"
dash_s_do = "setsingle"
disable_path_cache = False
cache_debug = False
cache_size = 500
cache_ttl = 60
image = None
volume = None
inode_cache = {}
queue = None
exit_threads = False
def __init__(self, *args, **kw):
fuse.Fuse.__init__(
self,
*args,
fuse_args=FuseArgs(),
parser_class=FuseOptParse,
**kw,
)
self.parser.add_option(
mountopt="disable_path_cache",
action="store_true",
help="Disable path caching",
)
self.parser.add_option(
mountopt="cache_debug",
action="store_true",
help="Debug output for path caching",
)
self.parser.add_option(
mountopt="cache_size",
default=500,
type="int",
help="Size in MB of memory cache for speeding up filesytem access [default: %default]",
)
self.parser.add_option(
mountopt="cache_ttl",
default=60,
type="int",
help="Seconds before the memory cache will evict unused chunks [default: %default]",
)
@property
def update_file(self):
return self.fuse_args.update_file
@property
def mountpoint(self):
return self.fuse_args.mountpoint
def fuse_error(self, msg):
print(msg, file=sys.stderr)
self.fuse_args.setmod("showhelp")
fuse.Fuse.main(self, self.args)
sys.exit(1)
def main(self, args=None):
self.args = args
if self.fuse_args.getmod("showhelp"):
fuse.Fuse.main(self, args)
return
if self.update_file is None:
self.fuse_error("fuse: missing update_file parameter")
if not os.path.exists(self.update_file):
self.fuse_error(f"fuse: File does not exist {self.update_file}")
self.image = UpdateImage(
self.update_file,
cache_size=self.cache_size,
cache_ttl=self.cache_ttl,
)
self.volume = ext4.Volume(self.image, offset=0)
print("Verifying signature...")
try:
self.image.verify(
self.get_inode("/usr/share/update_engine/update-payload-key.pub.pem")
.open()
.read()
)
print("Signature verified") | except UpdateImageSignatureException: | 1 | 2023-12-28 06:13:21+00:00 | 4k |
run-llama/rags | core/param_cache.py | [
{
"identifier": "load_data",
"path": "core/utils.py",
"snippet": "def load_data(\n file_names: Optional[List[str]] = None,\n directory: Optional[str] = None,\n urls: Optional[List[str]] = None,\n) -> List[Document]:\n \"\"\"Load data.\"\"\"\n file_names = file_names or []\n directory = directory or \"\"\n urls = urls or []\n\n # get number depending on whether specified\n num_specified = sum(1 for v in [file_names, urls, directory] if v)\n\n if num_specified == 0:\n raise ValueError(\"Must specify either file_names or urls or directory.\")\n elif num_specified > 1:\n raise ValueError(\"Must specify only one of file_names or urls or directory.\")\n elif file_names:\n reader = SimpleDirectoryReader(input_files=file_names)\n docs = reader.load_data()\n elif directory:\n reader = SimpleDirectoryReader(input_dir=directory)\n docs = reader.load_data()\n elif urls:\n from llama_hub.web.simple_web.base import SimpleWebPageReader\n\n # use simple web page reader from llamahub\n loader = SimpleWebPageReader()\n docs = loader.load_data(urls=urls)\n else:\n raise ValueError(\"Must specify either file_names or urls or directory.\")\n\n return docs"
},
{
"identifier": "get_tool_objects",
"path": "core/utils.py",
"snippet": "def get_tool_objects(tool_names: List[str]) -> List:\n \"\"\"Get tool objects from tool names.\"\"\"\n # construct additional tools\n tool_objs = []\n for tool_name in tool_names:\n if tool_name == \"web_search\":\n # build web agent\n tool_objs.append(get_web_agent_tool())\n else:\n raise ValueError(f\"Tool {tool_name} not recognized.\")\n\n return tool_objs"
},
{
"identifier": "construct_agent",
"path": "core/utils.py",
"snippet": "def construct_agent(\n system_prompt: str,\n rag_params: RAGParams,\n docs: List[Document],\n vector_index: Optional[VectorStoreIndex] = None,\n additional_tools: Optional[List] = None,\n) -> Tuple[BaseChatEngine, Dict]:\n \"\"\"Construct agent from docs / parameters / indices.\"\"\"\n extra_info = {}\n additional_tools = additional_tools or []\n\n # first resolve llm and embedding model\n embed_model = resolve_embed_model(rag_params.embed_model)\n # llm = resolve_llm(rag_params.llm)\n # TODO: use OpenAI for now\n # llm = OpenAI(model=rag_params.llm)\n llm = _resolve_llm(rag_params.llm)\n\n # first let's index the data with the right parameters\n service_context = ServiceContext.from_defaults(\n chunk_size=rag_params.chunk_size,\n llm=llm,\n embed_model=embed_model,\n )\n\n if vector_index is None:\n vector_index = VectorStoreIndex.from_documents(\n docs, service_context=service_context\n )\n else:\n pass\n\n extra_info[\"vector_index\"] = vector_index\n\n vector_query_engine = vector_index.as_query_engine(\n similarity_top_k=rag_params.top_k\n )\n all_tools = []\n vector_tool = QueryEngineTool(\n query_engine=vector_query_engine,\n metadata=ToolMetadata(\n name=\"vector_tool\",\n description=(\"Use this tool to answer any user question over any data.\"),\n ),\n )\n all_tools.append(vector_tool)\n if rag_params.include_summarization:\n summary_index = SummaryIndex.from_documents(\n docs, service_context=service_context\n )\n summary_query_engine = summary_index.as_query_engine()\n summary_tool = QueryEngineTool(\n query_engine=summary_query_engine,\n metadata=ToolMetadata(\n name=\"summary_tool\",\n description=(\n \"Use this tool for any user questions that ask \"\n \"for a summarization of content\"\n ),\n ),\n )\n all_tools.append(summary_tool)\n\n # then we add tools\n all_tools.extend(additional_tools)\n\n # build agent\n if system_prompt is None:\n return \"System prompt not set yet. Please set system prompt first.\"\n\n agent = load_agent(\n all_tools,\n llm=llm,\n system_prompt=system_prompt,\n verbose=True,\n extra_kwargs={\"vector_index\": vector_index, \"rag_params\": rag_params},\n )\n return agent, extra_info"
},
{
"identifier": "RAGParams",
"path": "core/utils.py",
"snippet": "class RAGParams(BaseModel):\n \"\"\"RAG parameters.\n\n Parameters used to configure a RAG pipeline.\n\n \"\"\"\n\n include_summarization: bool = Field(\n default=False,\n description=(\n \"Whether to include summarization in the RAG pipeline. (only for GPT-4)\"\n ),\n )\n top_k: int = Field(\n default=2, description=\"Number of documents to retrieve from vector store.\"\n )\n chunk_size: int = Field(default=1024, description=\"Chunk size for vector store.\")\n embed_model: str = Field(\n default=\"default\", description=\"Embedding model to use (default is OpenAI)\"\n )\n llm: str = Field(\n default=\"gpt-4-1106-preview\", description=\"LLM to use for summarization.\"\n )"
},
{
"identifier": "construct_mm_agent",
"path": "core/utils.py",
"snippet": "def construct_mm_agent(\n system_prompt: str,\n rag_params: RAGParams,\n docs: List[Document],\n mm_vector_index: Optional[VectorStoreIndex] = None,\n additional_tools: Optional[List] = None,\n) -> Tuple[BaseChatEngine, Dict]:\n \"\"\"Construct agent from docs / parameters / indices.\n\n NOTE: system prompt isn't used right now\n\n \"\"\"\n extra_info = {}\n additional_tools = additional_tools or []\n\n # first resolve llm and embedding model\n embed_model = resolve_embed_model(rag_params.embed_model)\n # TODO: use OpenAI for now\n os.environ[\"OPENAI_API_KEY\"] = st.secrets.openai_key\n openai_mm_llm = OpenAIMultiModal(model=\"gpt-4-vision-preview\", max_new_tokens=1500)\n\n # first let's index the data with the right parameters\n service_context = ServiceContext.from_defaults(\n chunk_size=rag_params.chunk_size,\n embed_model=embed_model,\n )\n\n if mm_vector_index is None:\n mm_vector_index = MultiModalVectorStoreIndex.from_documents(\n docs, service_context=service_context\n )\n else:\n pass\n\n mm_retriever = mm_vector_index.as_retriever(similarity_top_k=rag_params.top_k)\n mm_query_engine = SimpleMultiModalQueryEngine(\n cast(MultiModalVectorIndexRetriever, mm_retriever),\n multi_modal_llm=openai_mm_llm,\n )\n\n extra_info[\"vector_index\"] = mm_vector_index\n\n # use condense + context chat engine\n agent = MultimodalChatEngine(mm_query_engine)\n\n return agent, extra_info"
}
] | from pydantic import BaseModel, Field
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from typing import List, cast, Optional
from llama_index.chat_engine.types import BaseChatEngine
from pathlib import Path
from core.utils import (
load_data,
get_tool_objects,
construct_agent,
RAGParams,
construct_mm_agent,
)
from llama_index.indices.multi_modal.base import MultiModalVectorStoreIndex
import json
import uuid | 2,616 | """Param cache."""
class ParamCache(BaseModel):
"""Cache for RAG agent builder.
Created a wrapper class around a dict in case we wanted to more explicitly
type different items in the cache.
"""
# arbitrary types
class Config:
arbitrary_types_allowed = True
# system prompt
system_prompt: Optional[str] = Field(
default=None, description="System prompt for RAG agent."
)
# data
file_names: List[str] = Field(
default_factory=list, description="File names as data source (if specified)"
)
urls: List[str] = Field(
default_factory=list, description="URLs as data source (if specified)"
)
directory: Optional[str] = Field(
default=None, description="Directory as data source (if specified)"
)
docs: List = Field(default_factory=list, description="Documents for RAG agent.")
# tools
tools: List = Field(
default_factory=list, description="Additional tools for RAG agent (e.g. web)"
)
# RAG params
rag_params: RAGParams = Field(
default_factory=RAGParams, description="RAG parameters for RAG agent."
)
# agent params
builder_type: str = Field(
default="default", description="Builder type (default, multimodal)."
)
vector_index: Optional[VectorStoreIndex] = Field(
default=None, description="Vector index for RAG agent."
)
agent_id: str = Field(
default_factory=lambda: f"Agent_{str(uuid.uuid4())}",
description="Agent ID for RAG agent.",
)
agent: Optional[BaseChatEngine] = Field(default=None, description="RAG agent.")
def save_to_disk(self, save_dir: str) -> None:
"""Save cache to disk."""
# NOTE: more complex than just calling dict() because we want to
# only store serializable fields and be space-efficient
dict_to_serialize = {
"system_prompt": self.system_prompt,
"file_names": self.file_names,
"urls": self.urls,
"directory": self.directory,
# TODO: figure out tools
"tools": self.tools,
"rag_params": self.rag_params.dict(),
"builder_type": self.builder_type,
"agent_id": self.agent_id,
}
# store the vector store within the agent
if self.vector_index is None:
raise ValueError("Must specify vector index in order to save.")
self.vector_index.storage_context.persist(Path(save_dir) / "storage")
# if save_path directories don't exist, create it
if not Path(save_dir).exists():
Path(save_dir).mkdir(parents=True)
with open(Path(save_dir) / "cache.json", "w") as f:
json.dump(dict_to_serialize, f)
@classmethod
def load_from_disk(
cls,
save_dir: str,
) -> "ParamCache":
"""Load cache from disk."""
with open(Path(save_dir) / "cache.json", "r") as f:
cache_dict = json.load(f)
storage_context = StorageContext.from_defaults(
persist_dir=str(Path(save_dir) / "storage")
)
if cache_dict["builder_type"] == "multimodal":
vector_index: VectorStoreIndex = cast(
MultiModalVectorStoreIndex, load_index_from_storage(storage_context)
)
else:
vector_index = cast(
VectorStoreIndex, load_index_from_storage(storage_context)
)
# replace rag params with RAGParams object
cache_dict["rag_params"] = RAGParams(**cache_dict["rag_params"])
# add in the missing fields
# load docs
cache_dict["docs"] = load_data(
file_names=cache_dict["file_names"],
urls=cache_dict["urls"],
directory=cache_dict["directory"],
)
# load agent from index
| """Param cache."""
class ParamCache(BaseModel):
"""Cache for RAG agent builder.
Created a wrapper class around a dict in case we wanted to more explicitly
type different items in the cache.
"""
# arbitrary types
class Config:
arbitrary_types_allowed = True
# system prompt
system_prompt: Optional[str] = Field(
default=None, description="System prompt for RAG agent."
)
# data
file_names: List[str] = Field(
default_factory=list, description="File names as data source (if specified)"
)
urls: List[str] = Field(
default_factory=list, description="URLs as data source (if specified)"
)
directory: Optional[str] = Field(
default=None, description="Directory as data source (if specified)"
)
docs: List = Field(default_factory=list, description="Documents for RAG agent.")
# tools
tools: List = Field(
default_factory=list, description="Additional tools for RAG agent (e.g. web)"
)
# RAG params
rag_params: RAGParams = Field(
default_factory=RAGParams, description="RAG parameters for RAG agent."
)
# agent params
builder_type: str = Field(
default="default", description="Builder type (default, multimodal)."
)
vector_index: Optional[VectorStoreIndex] = Field(
default=None, description="Vector index for RAG agent."
)
agent_id: str = Field(
default_factory=lambda: f"Agent_{str(uuid.uuid4())}",
description="Agent ID for RAG agent.",
)
agent: Optional[BaseChatEngine] = Field(default=None, description="RAG agent.")
def save_to_disk(self, save_dir: str) -> None:
"""Save cache to disk."""
# NOTE: more complex than just calling dict() because we want to
# only store serializable fields and be space-efficient
dict_to_serialize = {
"system_prompt": self.system_prompt,
"file_names": self.file_names,
"urls": self.urls,
"directory": self.directory,
# TODO: figure out tools
"tools": self.tools,
"rag_params": self.rag_params.dict(),
"builder_type": self.builder_type,
"agent_id": self.agent_id,
}
# store the vector store within the agent
if self.vector_index is None:
raise ValueError("Must specify vector index in order to save.")
self.vector_index.storage_context.persist(Path(save_dir) / "storage")
# if save_path directories don't exist, create it
if not Path(save_dir).exists():
Path(save_dir).mkdir(parents=True)
with open(Path(save_dir) / "cache.json", "w") as f:
json.dump(dict_to_serialize, f)
@classmethod
def load_from_disk(
cls,
save_dir: str,
) -> "ParamCache":
"""Load cache from disk."""
with open(Path(save_dir) / "cache.json", "r") as f:
cache_dict = json.load(f)
storage_context = StorageContext.from_defaults(
persist_dir=str(Path(save_dir) / "storage")
)
if cache_dict["builder_type"] == "multimodal":
vector_index: VectorStoreIndex = cast(
MultiModalVectorStoreIndex, load_index_from_storage(storage_context)
)
else:
vector_index = cast(
VectorStoreIndex, load_index_from_storage(storage_context)
)
# replace rag params with RAGParams object
cache_dict["rag_params"] = RAGParams(**cache_dict["rag_params"])
# add in the missing fields
# load docs
cache_dict["docs"] = load_data(
file_names=cache_dict["file_names"],
urls=cache_dict["urls"],
directory=cache_dict["directory"],
)
# load agent from index | additional_tools = get_tool_objects(cache_dict["tools"]) | 1 | 2023-11-16 07:49:44+00:00 | 4k |
open-mmlab/Amphion | models/tts/naturalspeech2/prior_encoder.py | [
{
"identifier": "TransformerEncoder",
"path": "modules/naturalpseech2/transformers.py",
"snippet": "class TransformerEncoder(nn.Module):\n def __init__(\n self,\n enc_emb_tokens=None,\n encoder_layer=None,\n encoder_hidden=None,\n encoder_head=None,\n conv_filter_size=None,\n conv_kernel_size=None,\n encoder_dropout=None,\n use_cln=None,\n cfg=None,\n ):\n super().__init__()\n\n self.encoder_layer = (\n encoder_layer if encoder_layer is not None else cfg.encoder_layer\n )\n self.encoder_hidden = (\n encoder_hidden if encoder_hidden is not None else cfg.encoder_hidden\n )\n self.encoder_head = (\n encoder_head if encoder_head is not None else cfg.encoder_head\n )\n self.conv_filter_size = (\n conv_filter_size if conv_filter_size is not None else cfg.conv_filter_size\n )\n self.conv_kernel_size = (\n conv_kernel_size if conv_kernel_size is not None else cfg.conv_kernel_size\n )\n self.encoder_dropout = (\n encoder_dropout if encoder_dropout is not None else cfg.encoder_dropout\n )\n self.use_cln = use_cln if use_cln is not None else cfg.use_cln\n\n if enc_emb_tokens != None:\n self.use_enc_emb = True\n self.enc_emb_tokens = enc_emb_tokens\n else:\n self.use_enc_emb = False\n\n self.position_emb = PositionalEncoding(\n self.encoder_hidden, self.encoder_dropout\n )\n\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [\n TransformerEncoderLayer(\n self.encoder_hidden,\n self.encoder_head,\n self.conv_filter_size,\n self.conv_kernel_size,\n self.encoder_dropout,\n self.use_cln,\n )\n for i in range(self.encoder_layer)\n ]\n )\n\n if self.use_cln:\n self.last_ln = StyleAdaptiveLayerNorm(self.encoder_hidden)\n else:\n self.last_ln = nn.LayerNorm(self.encoder_hidden)\n\n def forward(self, x, key_padding_mask, condition=None):\n if len(x.shape) == 2 and self.use_enc_emb:\n x = self.enc_emb_tokens(x)\n x = self.position_emb(x)\n else:\n x = self.position_emb(x) # (B, T, d)\n\n for layer in self.layers:\n x = layer(x, key_padding_mask, condition)\n\n if self.use_cln:\n x = self.last_ln(x, condition)\n else:\n x = self.last_ln(x)\n\n return x"
},
{
"identifier": "DurationPredictor",
"path": "modules/naturalpseech2/transformers.py",
"snippet": "class DurationPredictor(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.input_size = cfg.input_size\n self.filter_size = cfg.filter_size\n self.kernel_size = cfg.kernel_size\n self.conv_layers = cfg.conv_layers\n self.cross_attn_per_layer = cfg.cross_attn_per_layer\n self.attn_head = cfg.attn_head\n self.drop_out = cfg.drop_out\n\n self.conv = nn.ModuleList()\n self.cattn = nn.ModuleList()\n\n for idx in range(self.conv_layers):\n in_dim = self.input_size if idx == 0 else self.filter_size\n self.conv += [\n nn.Sequential(\n nn.Conv1d(\n in_dim,\n self.filter_size,\n self.kernel_size,\n padding=self.kernel_size // 2,\n ),\n nn.ReLU(),\n nn.LayerNorm(self.filter_size),\n nn.Dropout(self.drop_out),\n )\n ]\n if idx % self.cross_attn_per_layer == 0:\n self.cattn.append(\n torch.nn.Sequential(\n nn.MultiheadAttention(\n self.filter_size,\n self.attn_head,\n batch_first=True,\n kdim=self.filter_size,\n vdim=self.filter_size,\n ),\n nn.LayerNorm(self.filter_size),\n nn.Dropout(0.2),\n )\n )\n\n self.linear = nn.Linear(self.filter_size, 1)\n self.linear.weight.data.normal_(0.0, 0.02)\n\n def forward(self, x, mask, ref_emb, ref_mask):\n \"\"\"\n input:\n x: (B, N, d)\n mask: (B, N), mask is 0\n ref_emb: (B, d, T')\n ref_mask: (B, T'), mask is 0\n\n output:\n dur_pred: (B, N)\n dur_pred_log: (B, N)\n dur_pred_round: (B, N)\n \"\"\"\n\n input_ref_mask = ~(ref_mask.bool()) # (B, T')\n # print(input_ref_mask)\n\n x = x.transpose(1, -1) # (B, N, d) -> (B, d, N)\n\n for idx, (conv, act, ln, dropout) in enumerate(self.conv):\n res = x\n # print(torch.min(x), torch.max(x))\n if idx % self.cross_attn_per_layer == 0:\n attn_idx = idx // self.cross_attn_per_layer\n attn, attn_ln, attn_drop = self.cattn[attn_idx]\n\n attn_res = y_ = x.transpose(1, 2) # (B, d, N) -> (B, N, d)\n\n y_ = attn_ln(y_)\n # print(torch.min(y_), torch.min(y_))\n # print(torch.min(ref_emb), torch.max(ref_emb))\n y_, _ = attn(\n y_,\n ref_emb.transpose(1, 2),\n ref_emb.transpose(1, 2),\n key_padding_mask=input_ref_mask,\n )\n # y_, _ = attn(y_, ref_emb.transpose(1, 2), ref_emb.transpose(1, 2))\n # print(torch.min(y_), torch.min(y_))\n y_ = attn_drop(y_)\n y_ = (y_ + attn_res) / math.sqrt(2.0)\n\n x = y_.transpose(1, 2)\n\n x = conv(x)\n # print(torch.min(x), torch.max(x))\n x = act(x)\n x = ln(x.transpose(1, 2))\n # print(torch.min(x), torch.max(x))\n x = x.transpose(1, 2)\n\n x = dropout(x)\n\n if idx != 0:\n x += res\n\n if mask is not None:\n x = x * mask.to(x.dtype)[:, None, :]\n\n x = self.linear(x.transpose(1, 2))\n x = torch.squeeze(x, -1)\n\n dur_pred = x.exp() - 1\n dur_pred_round = torch.clamp(torch.round(x.exp() - 1), min=0).long()\n\n return {\n \"dur_pred_log\": x,\n \"dur_pred\": dur_pred,\n \"dur_pred_round\": dur_pred_round,\n }"
},
{
"identifier": "PitchPredictor",
"path": "modules/naturalpseech2/transformers.py",
"snippet": "class PitchPredictor(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.input_size = cfg.input_size\n self.filter_size = cfg.filter_size\n self.kernel_size = cfg.kernel_size\n self.conv_layers = cfg.conv_layers\n self.cross_attn_per_layer = cfg.cross_attn_per_layer\n self.attn_head = cfg.attn_head\n self.drop_out = cfg.drop_out\n\n self.conv = nn.ModuleList()\n self.cattn = nn.ModuleList()\n\n for idx in range(self.conv_layers):\n in_dim = self.input_size if idx == 0 else self.filter_size\n self.conv += [\n nn.Sequential(\n nn.Conv1d(\n in_dim,\n self.filter_size,\n self.kernel_size,\n padding=self.kernel_size // 2,\n ),\n nn.ReLU(),\n nn.LayerNorm(self.filter_size),\n nn.Dropout(self.drop_out),\n )\n ]\n if idx % self.cross_attn_per_layer == 0:\n self.cattn.append(\n torch.nn.Sequential(\n nn.MultiheadAttention(\n self.filter_size,\n self.attn_head,\n batch_first=True,\n kdim=self.filter_size,\n vdim=self.filter_size,\n ),\n nn.LayerNorm(self.filter_size),\n nn.Dropout(0.2),\n )\n )\n\n self.linear = nn.Linear(self.filter_size, 1)\n self.linear.weight.data.normal_(0.0, 0.02)\n\n def forward(self, x, mask, ref_emb, ref_mask):\n \"\"\"\n input:\n x: (B, N, d)\n mask: (B, N), mask is 0\n ref_emb: (B, d, T')\n ref_mask: (B, T'), mask is 0\n\n output:\n pitch_pred: (B, T)\n \"\"\"\n\n input_ref_mask = ~(ref_mask.bool()) # (B, T')\n\n x = x.transpose(1, -1) # (B, N, d) -> (B, d, N)\n\n for idx, (conv, act, ln, dropout) in enumerate(self.conv):\n res = x\n if idx % self.cross_attn_per_layer == 0:\n attn_idx = idx // self.cross_attn_per_layer\n attn, attn_ln, attn_drop = self.cattn[attn_idx]\n\n attn_res = y_ = x.transpose(1, 2) # (B, d, N) -> (B, N, d)\n\n y_ = attn_ln(y_)\n y_, _ = attn(\n y_,\n ref_emb.transpose(1, 2),\n ref_emb.transpose(1, 2),\n key_padding_mask=input_ref_mask,\n )\n # y_, _ = attn(y_, ref_emb.transpose(1, 2), ref_emb.transpose(1, 2))\n y_ = attn_drop(y_)\n y_ = (y_ + attn_res) / math.sqrt(2.0)\n\n x = y_.transpose(1, 2)\n\n x = conv(x)\n x = act(x)\n x = ln(x.transpose(1, 2))\n x = x.transpose(1, 2)\n\n x = dropout(x)\n\n if idx != 0:\n x += res\n\n x = self.linear(x.transpose(1, 2))\n x = torch.squeeze(x, -1)\n\n return x"
},
{
"identifier": "LengthRegulator",
"path": "modules/naturalpseech2/transformers.py",
"snippet": "class LengthRegulator(nn.Module):\n \"\"\"Length Regulator\"\"\"\n\n def __init__(self):\n super(LengthRegulator, self).__init__()\n\n def LR(self, x, duration, max_len):\n device = x.device\n output = list()\n mel_len = list()\n for batch, expand_target in zip(x, duration):\n expanded = self.expand(batch, expand_target)\n output.append(expanded)\n mel_len.append(expanded.shape[0])\n\n if max_len is not None:\n output = pad(output, max_len)\n else:\n output = pad(output)\n\n return output, torch.LongTensor(mel_len).to(device)\n\n def expand(self, batch, predicted):\n out = list()\n\n for i, vec in enumerate(batch):\n expand_size = predicted[i].item()\n out.append(vec.expand(max(int(expand_size), 0), -1))\n out = torch.cat(out, 0)\n\n return out\n\n def forward(self, x, duration, max_len):\n output, mel_len = self.LR(x, duration, max_len)\n return output, mel_len"
}
] | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from modules.naturalpseech2.transformers import (
TransformerEncoder,
DurationPredictor,
PitchPredictor,
LengthRegulator,
) | 3,057 | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class PriorEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.enc_emb_tokens = nn.Embedding(
cfg.vocab_size, cfg.encoder.encoder_hidden, padding_idx=0
)
self.enc_emb_tokens.weight.data.normal_(mean=0.0, std=1e-5)
self.encoder = TransformerEncoder(
enc_emb_tokens=self.enc_emb_tokens, cfg=cfg.encoder
)
self.duration_predictor = DurationPredictor(cfg.duration_predictor)
| # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class PriorEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.enc_emb_tokens = nn.Embedding(
cfg.vocab_size, cfg.encoder.encoder_hidden, padding_idx=0
)
self.enc_emb_tokens.weight.data.normal_(mean=0.0, std=1e-5)
self.encoder = TransformerEncoder(
enc_emb_tokens=self.enc_emb_tokens, cfg=cfg.encoder
)
self.duration_predictor = DurationPredictor(cfg.duration_predictor) | self.pitch_predictor = PitchPredictor(cfg.pitch_predictor) | 2 | 2023-11-15 09:19:27+00:00 | 4k |
KwaiKEG/KwaiAgents | kwaiagents/agents/prompts.py | [
{
"identifier": "get_current_time_and_date",
"path": "kwaiagents/utils/date_utils.py",
"snippet": "def get_current_time_and_date(lang=\"en\"):\n solar = Solar.fromDate(datetime.now())\n lunar = solar.getLunar()\n if lang == \"zh\":\n rst = f'''\n当前阳历日期和时间: {str(datetime.now())}\n当前星期: 星期{str(solar.getWeekInChinese())}\n当前农历日期: {str(lunar.toString())}\n当前时辰: {str(lunar.getTimeZhi())}时\n'''.strip()\n else:\n rst = f'''\nCurrent Gregorian date and time: {str(datetime.now())}\nCurrent day of the week: 星期{str(solar.getWeekInChinese())}\nCurrent lunar date: {str(lunar.toString())}\nCurrent Chinese time unit: {str(lunar.getTimeZhi())}时\n'''.strip()\n return rst"
},
{
"identifier": "transform_to_openai_function",
"path": "kwaiagents/utils/function_utils.py",
"snippet": "def transform_to_openai_function(func):\n parsed = docstring_parser.parse(func.__doc__)\n\n # Extract descriptions, args, and returns\n description = parsed.short_description\n\n args = {}\n for param in parsed.params:\n args[param.arg_name] = {\n \"type\": param.type_name,\n \"description\": param.description\n }\n\n returns = {\n \"description\": parsed.returns.description if hasattr(parsed.returns, \"returns\") else \"\",\n \"type\": parsed.returns.type_name if hasattr(parsed.returns, \"type_name\") else \"\"\n }\n\n return {\n \"name\": func.name if hasattr(func, \"name\") else func.__name__,\n \"description\": description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": args\n },\n \"returns\": returns,\n \"required\": list(args.keys())\n }"
}
] | import json
from kwaiagents.utils.date_utils import get_current_time_and_date
from kwaiagents.utils.function_utils import transform_to_openai_function | 1,925 | planning_prompt_template = """
你是{agent_name},{agent_bio}
{agent_instructions}
当前阶段是任务规划阶段,你将给定目标或问题,你的决策将独立执行而不依赖于人类的帮助,请发挥LLM的优势并且追求高效的策略进行任务规划。
1.你有~4000字的短期记忆
2.不需要用户的帮助
3.规划的时候可以用参考工具中提到的工具
4.互联网搜索、信息聚合和鉴别真伪的能力
5.保持谦逊,对自己没把握的问题,尽可能调用command,但尽量少调用,不能重复调用
6.当你从自身知识或者历史记忆中能得出结论,请聪明且高效,完成任务并得出结论
7.经常建设性地自我批评整个行为大局,反思过去的决策和策略,以改进你的方法
8.你最多只能进行{max_iter_num}步思考,规划{max_iter_num}个任务,所以尽可能高效规划任务
9.你有反思能力,如果已完成的任务和结果暂不能得到回答问题所需信息或尚不能完成目标,应继续规划,但不能跟之前任务重复
{tool_specification}
{current_date_and_time}
{memory}
GOAL:{goal}
\n根据目标和已有任务,规划一个新Task(不能重复),你只能以以下json列表的格式生成Task
{{
"task_name": "任务描述",
"command":{{
"name":"command name",
"args":{{
"arg name":"value"
}}
}}
}}
确保Task可以被Python的json.loads解析
当已完成的Tasks已经能够帮助回答这个目标,则尽可能生成任务完成Task,否则生成一个其他Task。一个新Task:
""".strip()
planning_prompt_template_en = """
You are a {agent_name},{agent_bio}
{agent_instructions}
Currently, you are in the task planning phase, where you will be given specific goals or problems to address. \
Your decisions will be executed independently without relying on human assistance. \
Please utilize LLM's advantages and pursue efficient strategies for task planning.\
1. You have a short-term memory of approximately 4,000 characters.
2. You do not require assistance from users.
3. You can use the reference tools mentioned when planning.
4. You have the abilities to perform internet searches, aggregate information, and discern between genuine and fake information.
5. Remain humble and, if unsure about an issue, make use of commands when possible but minimize their usage and avoid repetition.
6. When drawing conclusions from your knowledge or historical memory, be clever and efficient in task completion and conclusion.
7. Regularly engage in constructive self-criticism to reflect on past decisions and strategies and improve your approach.
8. You can think and plan up to {max_iter_num} steps, so strive to plan tasks as efficiently as possible.
9. You have the capability for reflection; if a completed task and its results cannot provide the necessary information to answer a question or achieve a goal, continue planning but avoid repeating previous tasks.
{tool_specification}
{current_date_and_time}
{memory}
GOAL:{goal}
\nBased on the goal and existing tasks, plan a new Task (no repetitions), and you can only generate the Task in the following json list format:
{{
"task_name": "task description",
"command":{{
"name":"command name",
"args":{{
"arg name":"value"
}}
}}
}}
Ensure that the Task can be parsed by Python's json.loads function.
If the already completed Tasks are sufficient to answer the goal, then try to generate the Task to complete it as much as possible. Otherwise, create another Task.
A new Task:
""".strip()
conclusion_prompt_template = """
你是{agent_name},{agent_bio},{agent_instructions}
当前阶段是总结阶段,在前几次交互中,对于用户给定的目标和问题,你已经通过自己搜寻出了一定信息,你需要整合这些信息用中文给出最终的结论。
1. 搜寻的信息从很多工具中获取,会出现冗余
2. 当不同工具获取的信息冲突的时候,你应该遵循一定的优先级(Wiki > search)去解决冲突
{current_date_and_time}
{memory}
问题或目标:{goal}\n生成对用户有帮助的中文回答:
"""
conclusion_prompt_template_en = """
You are a {agent_name},{agent_bio},{agent_instructions}
The current stage is the concluding stage. In the previous interactions, \
you have already found some information by searching on your own for the user's given goals and problems. \
You need to integrate this information and provide the final conclusion in Chinese.
If there is information from Knowledge info, and the information can answer the question, \
you can use the Knowledge info information as much as possible to answer the question without using external tool results or creating your own content.
1. The information you search for comes from many sources and may be redundant.
2. When the information obtained from different tools conflicts, you should follow a certain priority (Knowledge info > Wiki > search) to resolve the conflict.
{current_date_and_time}
{memory}
Goal: {goal}
Generate helpful answers **in English** for users:
"""
def make_planning_prompt(agent_profile, goal, used_tools, memory, max_tokens_num, tokenizer, lang="en"):
tool_spec = make_tool_specification(used_tools, lang)
template = planning_prompt_template if lang == "zh" else planning_prompt_template_en
prompt = template.format(**{
"agent_name": agent_profile.name,
"agent_bio": agent_profile.bio,
"agent_instructions": agent_profile.instructions,
"max_iter_num": agent_profile.max_iter_num,
"tool_specification": tool_spec,
|
planning_prompt_template = """
你是{agent_name},{agent_bio}
{agent_instructions}
当前阶段是任务规划阶段,你将给定目标或问题,你的决策将独立执行而不依赖于人类的帮助,请发挥LLM的优势并且追求高效的策略进行任务规划。
1.你有~4000字的短期记忆
2.不需要用户的帮助
3.规划的时候可以用参考工具中提到的工具
4.互联网搜索、信息聚合和鉴别真伪的能力
5.保持谦逊,对自己没把握的问题,尽可能调用command,但尽量少调用,不能重复调用
6.当你从自身知识或者历史记忆中能得出结论,请聪明且高效,完成任务并得出结论
7.经常建设性地自我批评整个行为大局,反思过去的决策和策略,以改进你的方法
8.你最多只能进行{max_iter_num}步思考,规划{max_iter_num}个任务,所以尽可能高效规划任务
9.你有反思能力,如果已完成的任务和结果暂不能得到回答问题所需信息或尚不能完成目标,应继续规划,但不能跟之前任务重复
{tool_specification}
{current_date_and_time}
{memory}
GOAL:{goal}
\n根据目标和已有任务,规划一个新Task(不能重复),你只能以以下json列表的格式生成Task
{{
"task_name": "任务描述",
"command":{{
"name":"command name",
"args":{{
"arg name":"value"
}}
}}
}}
确保Task可以被Python的json.loads解析
当已完成的Tasks已经能够帮助回答这个目标,则尽可能生成任务完成Task,否则生成一个其他Task。一个新Task:
""".strip()
planning_prompt_template_en = """
You are a {agent_name},{agent_bio}
{agent_instructions}
Currently, you are in the task planning phase, where you will be given specific goals or problems to address. \
Your decisions will be executed independently without relying on human assistance. \
Please utilize LLM's advantages and pursue efficient strategies for task planning.\
1. You have a short-term memory of approximately 4,000 characters.
2. You do not require assistance from users.
3. You can use the reference tools mentioned when planning.
4. You have the abilities to perform internet searches, aggregate information, and discern between genuine and fake information.
5. Remain humble and, if unsure about an issue, make use of commands when possible but minimize their usage and avoid repetition.
6. When drawing conclusions from your knowledge or historical memory, be clever and efficient in task completion and conclusion.
7. Regularly engage in constructive self-criticism to reflect on past decisions and strategies and improve your approach.
8. You can think and plan up to {max_iter_num} steps, so strive to plan tasks as efficiently as possible.
9. You have the capability for reflection; if a completed task and its results cannot provide the necessary information to answer a question or achieve a goal, continue planning but avoid repeating previous tasks.
{tool_specification}
{current_date_and_time}
{memory}
GOAL:{goal}
\nBased on the goal and existing tasks, plan a new Task (no repetitions), and you can only generate the Task in the following json list format:
{{
"task_name": "task description",
"command":{{
"name":"command name",
"args":{{
"arg name":"value"
}}
}}
}}
Ensure that the Task can be parsed by Python's json.loads function.
If the already completed Tasks are sufficient to answer the goal, then try to generate the Task to complete it as much as possible. Otherwise, create another Task.
A new Task:
""".strip()
conclusion_prompt_template = """
你是{agent_name},{agent_bio},{agent_instructions}
当前阶段是总结阶段,在前几次交互中,对于用户给定的目标和问题,你已经通过自己搜寻出了一定信息,你需要整合这些信息用中文给出最终的结论。
1. 搜寻的信息从很多工具中获取,会出现冗余
2. 当不同工具获取的信息冲突的时候,你应该遵循一定的优先级(Wiki > search)去解决冲突
{current_date_and_time}
{memory}
问题或目标:{goal}\n生成对用户有帮助的中文回答:
"""
conclusion_prompt_template_en = """
You are a {agent_name},{agent_bio},{agent_instructions}
The current stage is the concluding stage. In the previous interactions, \
you have already found some information by searching on your own for the user's given goals and problems. \
You need to integrate this information and provide the final conclusion in Chinese.
If there is information from Knowledge info, and the information can answer the question, \
you can use the Knowledge info information as much as possible to answer the question without using external tool results or creating your own content.
1. The information you search for comes from many sources and may be redundant.
2. When the information obtained from different tools conflicts, you should follow a certain priority (Knowledge info > Wiki > search) to resolve the conflict.
{current_date_and_time}
{memory}
Goal: {goal}
Generate helpful answers **in English** for users:
"""
def make_planning_prompt(agent_profile, goal, used_tools, memory, max_tokens_num, tokenizer, lang="en"):
tool_spec = make_tool_specification(used_tools, lang)
template = planning_prompt_template if lang == "zh" else planning_prompt_template_en
prompt = template.format(**{
"agent_name": agent_profile.name,
"agent_bio": agent_profile.bio,
"agent_instructions": agent_profile.instructions,
"max_iter_num": agent_profile.max_iter_num,
"tool_specification": tool_spec, | "current_date_and_time": get_current_time_and_date(lang), | 0 | 2023-11-13 03:37:02+00:00 | 4k |
EnVision-Research/LucidDreamer | scene/gaussian_model.py | [
{
"identifier": "inverse_sigmoid",
"path": "utils/general_utils.py",
"snippet": "def inverse_sigmoid(x):\n return torch.log(x/(1-x))"
},
{
"identifier": "get_expon_lr_func",
"path": "utils/general_utils.py",
"snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper"
},
{
"identifier": "build_rotation",
"path": "utils/general_utils.py",
"snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n\n R[:, 0, 0] = 1 - 2 * (y*y + z*z)\n R[:, 0, 1] = 2 * (x*y - r*z)\n R[:, 0, 2] = 2 * (x*z + r*y)\n R[:, 1, 0] = 2 * (x*y + r*z)\n R[:, 1, 1] = 1 - 2 * (x*x + z*z)\n R[:, 1, 2] = 2 * (y*z - r*x)\n R[:, 2, 0] = 2 * (x*z - r*y)\n R[:, 2, 1] = 2 * (y*z + r*x)\n R[:, 2, 2] = 1 - 2 * (x*x + y*y)\n return R"
},
{
"identifier": "mkdir_p",
"path": "utils/system_utils.py",
"snippet": "def mkdir_p(folder_path):\n # Creates a directory. equivalent to using mkdir -p on the command line\n try:\n makedirs(folder_path)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and path.isdir(folder_path):\n pass\n else:\n raise"
},
{
"identifier": "RGB2SH",
"path": "utils/sh_utils.py",
"snippet": "def RGB2SH(rgb):\n return (rgb - 0.5) / C0"
},
{
"identifier": "SH2RGB",
"path": "utils/sh_utils.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
},
{
"identifier": "BasicPointCloud",
"path": "utils/graphics_utils.py",
"snippet": "class BasicPointCloud(NamedTuple):\n points : np.array\n colors : np.array\n normals : np.array"
},
{
"identifier": "strip_symmetric",
"path": "utils/general_utils.py",
"snippet": "def strip_symmetric(sym):\n return strip_lowerdiag(sym)"
},
{
"identifier": "build_scaling_rotation",
"path": "utils/general_utils.py",
"snippet": "def build_scaling_rotation(s, r):\n L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device=\"cuda\")\n R = build_rotation(r)\n\n L[:,0,0] = s[:,0]\n L[:,1,1] = s[:,1]\n L[:,2,2] = s[:,2]\n\n L = R @ L\n return L"
}
] | import torch
import numpy as np
import os
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
from torch import nn
from utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from utils.sh_utils import RGB2SH,SH2RGB
from simple_knn._C import distCUDA2
from utils.graphics_utils import BasicPointCloud
from utils.general_utils import strip_symmetric, build_scaling_rotation | 3,525 |
print("Number of points at initialisation : ", fused_point_cloud.shape[0])
dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
rots[:, 0] = 1
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
self._scaling = nn.Parameter(scales.requires_grad_(True))
self._rotation = nn.Parameter(rots.requires_grad_(True))
self._opacity = nn.Parameter(opacities.requires_grad_(True))
self._background = nn.Parameter(torch.zeros((3,1,1), device="cuda").requires_grad_(True))
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
def training_setup(self, training_args):
self.percent_dense = training_args.percent_dense
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
l = [
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"},
{'params': [self._background], 'lr': training_args.feature_lr, "name": "background"},
]
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
lr_final=training_args.position_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
self.rotation_scheduler_args = get_expon_lr_func(lr_init=training_args.rotation_lr,
lr_final=training_args.rotation_lr_final,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
self.scaling_scheduler_args = get_expon_lr_func(lr_init=training_args.scaling_lr,
lr_final=training_args.scaling_lr_final,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
self.feature_scheduler_args = get_expon_lr_func(lr_init=training_args.feature_lr,
lr_final=training_args.feature_lr_final,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
def update_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "xyz":
lr = self.xyz_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def update_feature_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "f_dc":
lr = self.feature_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def update_rotation_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "rotation":
lr = self.rotation_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def update_scaling_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "scaling":
lr = self.scaling_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def construct_list_of_attributes(self):
l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
# All channels except the 3 DC
for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
l.append('f_dc_{}'.format(i))
for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
l.append('f_rest_{}'.format(i))
l.append('opacity')
for i in range(self._scaling.shape[1]):
l.append('scale_{}'.format(i))
for i in range(self._rotation.shape[1]):
l.append('rot_{}'.format(i))
return l
def save_ply(self, path):
mkdir_p(os.path.dirname(path))
xyz = self._xyz.detach().cpu().numpy()
normals = np.zeros_like(xyz)
f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
opacities = self._opacity.detach().cpu().numpy()
scale = self._scaling.detach().cpu().numpy()
rotation = self._rotation.detach().cpu().numpy()
dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
elements = np.empty(xyz.shape[0], dtype=dtype_full)
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
elements[:] = list(map(tuple, attributes))
el = PlyElement.describe(elements, 'vertex')
PlyData([el]).write(path)
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
# from .resnet import *
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
def __init__(self, sh_degree : int):
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self._background = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
self.optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
self.setup_functions()
def capture(self):
return (
self.active_sh_degree,
self._xyz,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
self.xyz_gradient_accum,
self.denom,
self.optimizer.state_dict(),
self.spatial_lr_scale,
)
def restore(self, model_args, training_args):
(self.active_sh_degree,
self._xyz,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
xyz_gradient_accum,
denom,
opt_dict,
self.spatial_lr_scale) = model_args
self.training_setup(training_args)
self.xyz_gradient_accum = xyz_gradient_accum
self.denom = denom
self.optimizer.load_state_dict(opt_dict)
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_background(self):
return torch.sigmoid(self._background)
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier = 1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def oneupSHdegree(self):
if self.active_sh_degree < self.max_sh_degree:
self.active_sh_degree += 1
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
self.spatial_lr_scale = spatial_lr_scale
fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors))).float().cuda() #RGB2SH(
features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
features[:, :3, 0 ] = fused_color
features[:, 3:, 1:] = 0.0
print("Number of points at initialisation : ", fused_point_cloud.shape[0])
dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
rots[:, 0] = 1
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
self._scaling = nn.Parameter(scales.requires_grad_(True))
self._rotation = nn.Parameter(rots.requires_grad_(True))
self._opacity = nn.Parameter(opacities.requires_grad_(True))
self._background = nn.Parameter(torch.zeros((3,1,1), device="cuda").requires_grad_(True))
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
def training_setup(self, training_args):
self.percent_dense = training_args.percent_dense
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
l = [
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"},
{'params': [self._background], 'lr': training_args.feature_lr, "name": "background"},
]
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
lr_final=training_args.position_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
self.rotation_scheduler_args = get_expon_lr_func(lr_init=training_args.rotation_lr,
lr_final=training_args.rotation_lr_final,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
self.scaling_scheduler_args = get_expon_lr_func(lr_init=training_args.scaling_lr,
lr_final=training_args.scaling_lr_final,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
self.feature_scheduler_args = get_expon_lr_func(lr_init=training_args.feature_lr,
lr_final=training_args.feature_lr_final,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.iterations)
def update_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "xyz":
lr = self.xyz_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def update_feature_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "f_dc":
lr = self.feature_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def update_rotation_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "rotation":
lr = self.rotation_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def update_scaling_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "scaling":
lr = self.scaling_scheduler_args(iteration)
param_group['lr'] = lr
return lr
def construct_list_of_attributes(self):
l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
# All channels except the 3 DC
for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
l.append('f_dc_{}'.format(i))
for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
l.append('f_rest_{}'.format(i))
l.append('opacity')
for i in range(self._scaling.shape[1]):
l.append('scale_{}'.format(i))
for i in range(self._rotation.shape[1]):
l.append('rot_{}'.format(i))
return l
def save_ply(self, path):
mkdir_p(os.path.dirname(path))
xyz = self._xyz.detach().cpu().numpy()
normals = np.zeros_like(xyz)
f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
opacities = self._opacity.detach().cpu().numpy()
scale = self._scaling.detach().cpu().numpy()
rotation = self._rotation.detach().cpu().numpy()
dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
elements = np.empty(xyz.shape[0], dtype=dtype_full)
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
elements[:] = list(map(tuple, attributes))
el = PlyElement.describe(elements, 'vertex')
PlyData([el]).write(path) | np.savetxt(os.path.join(os.path.split(path)[0],"point_cloud_rgb.txt"),np.concatenate((xyz, SH2RGB(f_dc)), axis=1)) | 5 | 2023-11-18 08:05:50+00:00 | 4k |
VRSEN/agency-swarm | agency_swarm/tools/browsing/SelectDropdown.py | [
{
"identifier": "BaseTool",
"path": "agency_swarm/tools/base_tool.py",
"snippet": "class BaseTool(OpenAISchema, ABC):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n # # Exclude 'run' method from Pydantic model fields\n # self.model_fields.pop(\"run\", None)\n\n @abstractmethod\n def run(self, **kwargs):\n pass"
},
{
"identifier": "get_b64_screenshot",
"path": "agency_swarm/tools/browsing/util/get_b64_screenshot.py",
"snippet": "def get_b64_screenshot(wd, element=None):\n # Create a temporary file name but don't open it\n _, tmpfile_name = tempfile.mkstemp(suffix='.png')\n\n try:\n if element:\n element.screenshot(tmpfile_name)\n else:\n wd.get_screenshot_as_file(tmpfile_name)\n\n with open(tmpfile_name, 'rb') as f:\n screenshot = f.read()\n screenshot_b64 = base64.b64encode(screenshot).decode()\n\n return screenshot_b64\n finally:\n # Clean up the temporary file\n os.remove(tmpfile_name)"
},
{
"identifier": "get_web_driver",
"path": "agency_swarm/tools/browsing/util/selenium.py",
"snippet": "def get_web_driver():\n try:\n from selenium import webdriver\n from selenium.webdriver.chrome.service import Service as ChromeService\n except ImportError:\n print(\"Selenium not installed. Please install it with pip install selenium\")\n raise ImportError\n\n try:\n from webdriver_manager.chrome import ChromeDriverManager\n except ImportError:\n print(\"webdriver_manager not installed. Please install it with pip install webdriver-manager\")\n raise ImportError\n\n try:\n from selenium_stealth import stealth\n except ImportError:\n print(\"selenium_stealth not installed. Please install it with pip install selenium-stealth\")\n raise ImportError\n\n global wd\n\n if wd:\n return wd\n\n global selenium_config\n chrome_profile_path = selenium_config.get(\"chrome_profile_path\", None)\n profile_directory = None\n user_data_dir = None\n if isinstance(chrome_profile_path, str) and os.path.exists(chrome_profile_path):\n profile_directory = os.path.split(chrome_profile_path)[-1].strip(\"\\\\\").rstrip(\"/\")\n user_data_dir = os.path.split(chrome_profile_path)[0].strip(\"\\\\\").rstrip(\"/\")\n print(f\"Using Chrome profile: {profile_directory}\")\n print(f\"Using Chrome user data dir: {user_data_dir}\")\n print(f\"Using Chrome profile path: {chrome_profile_path}\")\n\n chrome_options = webdriver.ChromeOptions()\n # Removed headless and other options for debugging purposes\n\n chrome_driver_path = ChromeDriverManager().install()\n\n if selenium_config.get(\"headless\", False):\n chrome_options.add_argument('--headless')\n chrome_options.add_argument(\"--window-size=960,1080\")\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--disable-popup-blocking\")\n chrome_options.add_argument(\"--disable-web-security\")\n chrome_options.add_argument(\"--allow-running-insecure-content\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n chrome_options.add_experimental_option('useAutomationExtension', False)\n if user_data_dir and profile_directory:\n chrome_options.add_argument(f\"user-data-dir={user_data_dir}\")\n chrome_options.add_argument(f\"profile-directory={profile_directory}\")\n\n try:\n wd = webdriver.Chrome(service=ChromeService(chrome_driver_path), options=chrome_options)\n print(\"WebDriver initialized successfully.\")\n # Print the actual profile path being used\n if wd.capabilities['chrome']['userDataDir']:\n print(f\"Profile path in use: {wd.capabilities['chrome']['userDataDir']}\")\n except Exception as e:\n print(f\"Error initializing WebDriver: {e}\")\n raise\n\n stealth(\n wd,\n languages=[\"en-US\", \"en\"],\n vendor=\"Google Inc.\",\n platform=\"Win32\",\n webgl_vendor=\"Intel Inc.\",\n renderer=\"Intel Iris OpenGL Engine\",\n fix_hairline=True,\n )\n\n # wd.set_window_size(960, 1080)\n wd.implicitly_wait(3)\n\n return wd"
},
{
"identifier": "set_web_driver",
"path": "agency_swarm/tools/browsing/util/selenium.py",
"snippet": "def set_web_driver(new_wd):\n global wd\n wd = remove_highlight_and_labels(wd)\n wd = new_wd"
},
{
"identifier": "highlight_elements_with_labels",
"path": "agency_swarm/tools/browsing/util/highlights.py",
"snippet": "def highlight_elements_with_labels(driver, selector):\n \"\"\"\n This function highlights clickable elements like buttons, links, and certain divs and spans\n that match the given CSS selector on the webpage with a red border and ensures that labels are visible and positioned\n correctly within the viewport.\n\n :param driver: Instance of Selenium WebDriver.\n :param selector: CSS selector for the elements to be highlighted.\n \"\"\"\n script = f\"\"\"\n // Helper function to check if an element is visible\n function isElementVisible(element) {{\n var rect = element.getBoundingClientRect();\n if (rect.width <= 0 || rect.height <= 0 || \n rect.top >= (window.innerHeight || document.documentElement.clientHeight) || \n rect.bottom <= 0 || \n rect.left >= (window.innerWidth || document.documentElement.clientWidth) || \n rect.right <= 0) {{\n return false;\n }}\n // Check if any parent element is hidden, which would hide this element as well\n var parent = element;\n while (parent) {{\n var style = window.getComputedStyle(parent);\n if (style.display === 'none' || style.visibility === 'hidden') {{\n return false;\n }}\n parent = parent.parentElement;\n }}\n return true;\n }}\n\n // Remove previous labels and styles if they exist\n document.querySelectorAll('.highlight-label').forEach(function(label) {{\n label.remove();\n }});\n document.querySelectorAll('.highlighted-element').forEach(function(element) {{\n element.classList.remove('highlighted-element');\n element.removeAttribute('data-highlighted');\n }});\n\n // Inject custom style for highlighting elements\n var styleElement = document.getElementById('highlight-style');\n if (!styleElement) {{\n styleElement = document.createElement('style');\n styleElement.id = 'highlight-style';\n document.head.appendChild(styleElement);\n }}\n styleElement.textContent = `\n .highlighted-element {{ \n border: 2px solid red !important; \n position: relative; \n box-sizing: border-box; \n }}\n .highlight-label {{ \n position: absolute; \n z-index: 2147483647; \n background: yellow; \n color: black; \n font-size: 25px; \n padding: 3px 5px; \n border: 1px solid black; \n border-radius: 3px; \n white-space: nowrap; \n box-shadow: 0px 0px 2px #000; \n top: -25px; \n left: 0; \n display: none;\n }}\n `;\n\n // Function to create and append a label to the body\n function createAndAdjustLabel(element, index) {{\n if (!isElementVisible(element)) return;\n\n element.classList.add('highlighted-element');\n var label = document.createElement('div');\n label.className = 'highlight-label';\n label.textContent = index.toString();\n label.style.display = 'block'; // Make the label visible\n\n // Calculate label position\n var rect = element.getBoundingClientRect();\n var top = rect.top + window.scrollY - 25; // Position label above the element\n var left = rect.left + window.scrollX;\n\n label.style.top = top + 'px';\n label.style.left = left + 'px';\n\n document.body.appendChild(label); // Append the label to the body\n }}\n\n // Select all clickable elements and apply the styles\n var allElements = document.querySelectorAll('{selector}');\n var index = 1;\n allElements.forEach(function(element) {{\n // Check if the element is not already highlighted and is visible\n if (!element.dataset.highlighted && isElementVisible(element)) {{\n element.dataset.highlighted = 'true';\n createAndAdjustLabel(element, index++);\n }}\n }});\n \"\"\"\n\n driver.execute_script(script)\n\n return driver"
},
{
"identifier": "get_openai_client",
"path": "agency_swarm/util/oai.py",
"snippet": "def get_openai_client():\n global client\n with client_lock:\n if client is None:\n # Check if the API key is set\n api_key = openai.api_key or os.getenv('OPENAI_API_KEY')\n if api_key is None:\n raise ValueError(\"OpenAI API key is not set. Please set it using set_openai_key.\")\n client = instructor.patch(openai.OpenAI(api_key=api_key,\n max_retries=5))\n return client"
}
] | import json
from pydantic import Field
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from agency_swarm.tools import BaseTool
from agency_swarm.tools.browsing.util import get_b64_screenshot
from agency_swarm.tools.browsing.util import get_web_driver, set_web_driver
from agency_swarm.tools.browsing.util.highlights import highlight_elements_with_labels
from agency_swarm.util import get_openai_client | 2,335 |
class SelectDropdown(BaseTool):
"""
This tool selects an option in a dropdown on the current web page based on the description of that element and which option to select.
"""
description: str = Field(
..., description="Description of which option to select and for which dropdown on the page, clearly stated in natural langauge.",
examples=["Select Germany option in the 'Country' dropdown."]
)
def run(self):
wd = get_web_driver()
client = get_openai_client()
wd = highlight_elements_with_labels(wd, 'select')
|
class SelectDropdown(BaseTool):
"""
This tool selects an option in a dropdown on the current web page based on the description of that element and which option to select.
"""
description: str = Field(
..., description="Description of which option to select and for which dropdown on the page, clearly stated in natural langauge.",
examples=["Select Germany option in the 'Country' dropdown."]
)
def run(self):
wd = get_web_driver()
client = get_openai_client()
wd = highlight_elements_with_labels(wd, 'select')
| screenshot = get_b64_screenshot(wd) | 1 | 2023-11-16 02:29:26+00:00 | 4k |
resemble-ai/resemble-enhance | resemble_enhance/enhancer/lcfm/lcfm.py | [
{
"identifier": "CFM",
"path": "resemble_enhance/enhancer/lcfm/cfm.py",
"snippet": "class CFM(nn.Module):\n \"\"\"\n This mixin is for general diffusion models.\n\n ψ0 stands for the gaussian noise, and ψ1 is the data point.\n\n Here we follow the CFM style:\n The generation process (reverse process) is from t=0 to t=1.\n The forward process is from t=1 to t=0.\n \"\"\"\n\n cond_dim: int\n output_dim: int\n time_emb_dim: int = 128\n viz_name: str = \"cfm\"\n solver_nfe: int = 32\n solver_method: str = \"midpoint\"\n time_mapping_divisor: int = 4\n\n def __post_init__(self):\n super().__init__()\n self.solver = Solver(\n viz_name=self.viz_name,\n viz_every=1,\n nfe=self.solver_nfe,\n method=self.solver_method,\n time_mapping_divisor=self.time_mapping_divisor,\n )\n self.emb = SinusodialTimeEmbedding(self.time_emb_dim)\n self.net = WN(\n input_dim=self.output_dim,\n output_dim=self.output_dim,\n local_dim=self.cond_dim,\n global_dim=self.time_emb_dim,\n )\n\n def _perturb(self, ψ1: Tensor, t: Tensor | None = None):\n \"\"\"\n Perturb ψ1 to ψt.\n \"\"\"\n raise NotImplementedError\n\n def _sample_ψ0(self, x: Tensor):\n \"\"\"\n Args:\n x: (b c t), which implies the shape of ψ0\n \"\"\"\n shape = list(x.shape)\n shape[1] = self.output_dim\n if self.training:\n g = None\n else:\n g = torch.Generator(device=x.device)\n g.manual_seed(0) # deterministic sampling during eval\n ψ0 = torch.randn(shape, device=x.device, dtype=x.dtype, generator=g)\n return ψ0\n\n @property\n def sigma(self):\n return 1e-4\n\n def _to_ψt(self, *, ψ1: Tensor, ψ0: Tensor, t: Tensor):\n \"\"\"\n Eq (22)\n \"\"\"\n while t.dim() < ψ1.dim():\n t = t.unsqueeze(-1)\n μ = t * ψ1 + (1 - t) * ψ0\n return μ + torch.randn_like(μ) * self.sigma\n\n def _to_u(self, *, ψ1, ψ0: Tensor):\n \"\"\"\n Eq (21)\n \"\"\"\n return ψ1 - ψ0\n\n def _to_v(self, *, ψt, x, t: float | Tensor):\n \"\"\"\n Args:\n ψt: (b c t)\n x: (b c t)\n t: (b)\n Returns:\n v: (b c t)\n \"\"\"\n if isinstance(t, (float, int)):\n t = torch.full(ψt.shape[:1], t).to(ψt)\n t = t.clamp(0, 1) # [0, 1)\n g = self.emb(t) # (b d)\n v = self.net(ψt, l=x, g=g)\n return v\n\n def compute_losses(self, x, y, ψ0) -> dict:\n \"\"\"\n Args:\n x: (b c t)\n y: (b c t)\n Returns:\n losses: dict\n \"\"\"\n t = torch.rand(len(x), device=x.device, dtype=x.dtype)\n t = self.solver.time_mapping(t)\n\n if ψ0 is None:\n ψ0 = self._sample_ψ0(x)\n\n ψt = self._to_ψt(ψ1=y, t=t, ψ0=ψ0)\n\n v = self._to_v(ψt=ψt, t=t, x=x)\n u = self._to_u(ψ1=y, ψ0=ψ0)\n\n losses = dict(l1=F.l1_loss(v, u))\n\n return losses\n\n @torch.inference_mode()\n def sample(self, x, ψ0=None, t0=0.0):\n \"\"\"\n Args:\n x: (b c t)\n Returns:\n y: (b ... t)\n \"\"\"\n if ψ0 is None:\n ψ0 = self._sample_ψ0(x)\n f = lambda t, ψt, dt: self._to_v(ψt=ψt, t=t, x=x)\n ψ1 = self.solver(f=f, ψ0=ψ0, t0=t0)\n return ψ1\n\n def forward(self, x: Tensor, y: Tensor | None = None, ψ0: Tensor | None = None, t0=0.0):\n if y is None:\n y = self.sample(x, ψ0=ψ0, t0=t0)\n else:\n self.losses = self.compute_losses(x, y, ψ0=ψ0)\n return y"
},
{
"identifier": "IRMAE",
"path": "resemble_enhance/enhancer/lcfm/irmae.py",
"snippet": "class IRMAE(nn.Module):\n def __init__(\n self,\n input_dim,\n output_dim,\n latent_dim,\n hidden_dim=1024,\n num_irms=4,\n ):\n \"\"\"\n Args:\n input_dim: input dimension\n output_dim: output dimension\n latent_dim: latent dimension\n hidden_dim: hidden layer dimension\n num_irm_matrics: number of implicit rank minimization matrices\n norm: normalization layer\n \"\"\"\n self.input_dim = input_dim\n super().__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv1d(input_dim, hidden_dim, 3, padding=\"same\"),\n *[ResBlock(hidden_dim) for _ in range(4)],\n # Try to obtain compact representation (https://proceedings.neurips.cc/paper/2020/file/a9078e8653368c9c291ae2f8b74012e7-Paper.pdf)\n *[nn.Conv1d(hidden_dim if i == 0 else latent_dim, latent_dim, 1, bias=False) for i in range(num_irms)],\n nn.Tanh(),\n )\n\n self.decoder = nn.Sequential(\n nn.Conv1d(latent_dim, hidden_dim, 3, padding=\"same\"),\n *[ResBlock(hidden_dim) for _ in range(4)],\n nn.Conv1d(hidden_dim, output_dim, 1),\n )\n\n self.head = nn.Sequential(\n nn.Conv1d(output_dim, hidden_dim, 3, padding=\"same\"),\n nn.GELU(),\n nn.Conv1d(hidden_dim, input_dim, 1),\n )\n\n self.estimator = Normalizer()\n\n def encode(self, x):\n \"\"\"\n Args:\n x: (b c t) tensor\n \"\"\"\n z = self.encoder(x) # (b c t)\n _ = self.estimator(z) # Estimate the glboal mean and std of z\n self.stats = {}\n self.stats[\"z_mean\"] = z.mean().item()\n self.stats[\"z_std\"] = z.std().item()\n self.stats[\"z_abs_68\"] = z.abs().quantile(0.6827).item()\n self.stats[\"z_abs_95\"] = z.abs().quantile(0.9545).item()\n self.stats[\"z_abs_99\"] = z.abs().quantile(0.9973).item()\n return z\n\n def decode(self, z):\n \"\"\"\n Args:\n z: (b c t) tensor\n \"\"\"\n return self.decoder(z)\n\n def forward(self, x, skip_decoding=False):\n \"\"\"\n Args:\n x: (b c t) tensor\n skip_decoding: if True, skip the decoding step\n \"\"\"\n z = self.encode(x) # q(z|x)\n\n if skip_decoding:\n # This speeds up the training in cfm only mode\n decoded = None\n else:\n decoded = self.decode(z) # p(x|z)\n predicted = self.head(decoded)\n self.losses = dict(mse=F.mse_loss(predicted, x))\n\n return IRMAEOutput(latent=z, decoded=decoded)"
},
{
"identifier": "IRMAEOutput",
"path": "resemble_enhance/enhancer/lcfm/irmae.py",
"snippet": "class IRMAEOutput:\n latent: Tensor # latent vector\n decoded: Tensor | None # decoder output, include extra dim"
}
] | import logging
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from enum import Enum
from torch import Tensor, nn
from .cfm import CFM
from .irmae import IRMAE, IRMAEOutput
from ...utils.train_loop import TrainLoop | 2,245 |
logger = logging.getLogger(__name__)
def freeze_(module):
for p in module.parameters():
p.requires_grad_(False)
class LCFM(nn.Module):
class Mode(Enum):
AE = "ae"
|
logger = logging.getLogger(__name__)
def freeze_(module):
for p in module.parameters():
p.requires_grad_(False)
class LCFM(nn.Module):
class Mode(Enum):
AE = "ae" | CFM = "cfm" | 0 | 2023-11-15 08:15:51+00:00 | 4k |
PKU-YuanGroup/Chat-UniVi | visualization.py | [
{
"identifier": "CLIPVisionTower",
"path": "ChatUniVi/model/multimodal_encoder/clip_encoder.py",
"snippet": "class CLIPVisionTower(nn.Module):\n def __init__(self, vision_tower, args=None, delay_load=False):\n super().__init__()\n\n self.is_loaded = False\n\n self.vision_tower_name = vision_tower\n if args is None:\n self.select_layer = -2\n self.select_feature = 'patch'\n else:\n self.select_layer = args.mm_vision_select_layer\n self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch')\n\n if not delay_load:\n self.load_model()\n else:\n self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)\n\n def load_model(self):\n self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)\n self.image_eval_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)\n self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name)\n self.vision_tower.requires_grad_(False)\n\n self.is_loaded = True\n\n def feature_select(self, image_forward_outs, select_feature='patch'):\n image_features = image_forward_outs.hidden_states[self.select_layer]\n if select_feature == 'patch':\n image_features = image_features[:, 1:]\n elif select_feature == 'cls_patch':\n image_features = image_features\n else:\n raise ValueError(f'Unexpected select feature: {self.select_feature}')\n return image_features\n\n @torch.no_grad()\n def forward(self, images, select_feature='patch'):\n if type(images) is list:\n image_features = []\n for image in images:\n image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)\n image_feature = self.feature_select(image_forward_out, select_feature).to(image.dtype)\n image_features.append(image_feature)\n else:\n image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)\n image_features = self.feature_select(image_forward_outs, select_feature).to(images.dtype)\n\n return image_features\n\n @property\n def dummy_feature(self):\n return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)\n\n @property\n def dtype(self):\n return self.vision_tower.dtype\n\n @property\n def device(self):\n return self.vision_tower.device\n\n @property\n def config(self):\n if self.is_loaded:\n return self.vision_tower.config\n else:\n return self.cfg_only\n\n @property\n def hidden_size(self):\n return self.config.hidden_size\n\n @property\n def num_patches(self):\n return (self.config.image_size // self.config.patch_size) ** 2"
},
{
"identifier": "CTM",
"path": "ChatUniVi/model/cluster.py",
"snippet": "class CTM(nn.Module):\n def __init__(self, sample_ratio, embed_dim, dim_out, k=5):\n super().__init__()\n self.sample_ratio = sample_ratio\n self.dim_out = dim_out\n self.k = k\n\n def forward(self, token_dict, sample_ratio=None):\n x = token_dict[\"x\"]\n B, N, C = x.shape\n\n token_weight = x.new_ones(B, N)\n\n if token_dict[\"mask\"] is not None:\n token_weight.masked_fill_((1 - token_dict[\"mask\"]).to(torch.bool), float(\"-inf\"))\n token_weight = token_weight.unsqueeze(2)\n token_dict['x'] = x\n\n if sample_ratio is not None:\n cluster_num = max(math.ceil(N * sample_ratio), 1)\n elif self.sample_ratio > 1:\n cluster_num = max(math.ceil(self.sample_ratio), 1)\n else:\n cluster_num = max(math.ceil(N * self.sample_ratio), 1)\n\n k = min(3, max(cluster_num//2, 1)) if self.k > cluster_num else self.k\n idx_cluster, cluster_num = cluster_dpc_knn(\n token_dict, cluster_num, k, token_mask=token_dict[\"mask\"])\n\n down_dict = merge_tokens(token_dict, idx_cluster, cluster_num, token_weight)\n return down_dict, token_dict"
},
{
"identifier": "TCBlock",
"path": "ChatUniVi/model/cluster.py",
"snippet": "class TCBlock(nn.Module):\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, use_sr_layer=False):\n super().__init__()\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n elif isinstance(m, nn.Conv2d):\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, inputs):\n if isinstance(inputs, tuple) or isinstance(inputs, list):\n q_dict, kv_dict = inputs\n else:\n q_dict, kv_dict = inputs, None\n\n x = q_dict['x']\n return q_dict"
}
] | import numpy as np
import math
import os
import torch
from PIL import Image
from ChatUniVi.model.multimodal_encoder.clip_encoder import CLIPVisionTower
from ChatUniVi.model.cluster import CTM, TCBlock | 2,188 |
def split(image, patch_size=14, idx=None):
img = np.asarray(image, dtype=np.uint8).copy()
h, w, _ = img.shape
horizontal_lines = [i for i in range(patch_size, h, patch_size)]
vertical_lines = [i for i in range(patch_size, w, patch_size)]
for i in horizontal_lines:
for j in range(w):
img[i, j, :] = 0
for j in vertical_lines:
for i in range(h):
img[i, j, :] = 0
image = Image.fromarray(img, 'RGB')
return image
def merge(image, token_dict, patch_size=14, alpha=0.2, line_color=np.array([200, 200, 200])):
img = np.asarray(image, dtype=np.uint8).copy()
h, w, _ = img.shape
patch_num_h, patch_num_w = w // patch_size, w // patch_size
color_map = {}
idx = token_dict["idx_token"].tolist()[0]
for id, i in enumerate(idx):
color_map[i] = color_map[i] if i in color_map else {"id": [], "color": []}
color_map[i]["id"].append(id)
for _h in range(patch_size):
for _w in range(patch_size):
color_map[i]["color"].append(img[_h + patch_size * math.floor(id / patch_num_w),
_w + patch_size * (id % patch_num_h)])
for i in color_map:
color_map[i]["color"] = np.mean(np.stack(color_map[i]["color"], axis=0), axis=0)
for id in color_map[i]["id"]:
for _h in range(patch_size):
for _w in range(patch_size):
color = img[_h + patch_size * math.floor(id / patch_num_w), _w + patch_size * (
id % patch_num_h)] * alpha + color_map[i]["color"] * (1 - alpha)
img[_h + patch_size * math.floor(id / patch_num_w), _w + patch_size * (id % patch_num_h)] = color
for id, i in enumerate(idx):
if math.floor(id / patch_num_w) > 0:
if idx[id - patch_num_w] != i:
for _w in range(patch_size * (id % patch_num_h), patch_size * (id % patch_num_h + 1)):
img[patch_size * math.floor(id / patch_num_w), _w, :] = line_color
if (id % patch_num_h) > 0:
if idx[id - 1] != i:
for _h in range(patch_size * math.floor(id / patch_num_w), patch_size * (math.floor(id / patch_num_w) + 1)):
img[_h, patch_size * (id % patch_num_h), :] = line_color
image = Image.fromarray(img, 'RGB')
return image
if __name__ == '__main__':
image_path = "figures/COCO_val2014_000000214293.jpg"
clip_vit_14_path = ${openai_clip_path}
output_file = "figures"
if not os.path.exists(output_file):
os.makedirs(output_file)
vision_tower = CLIPVisionTower(clip_vit_14_path)
image = Image.open(os.path.join(image_path)).resize((224, 224))
|
def split(image, patch_size=14, idx=None):
img = np.asarray(image, dtype=np.uint8).copy()
h, w, _ = img.shape
horizontal_lines = [i for i in range(patch_size, h, patch_size)]
vertical_lines = [i for i in range(patch_size, w, patch_size)]
for i in horizontal_lines:
for j in range(w):
img[i, j, :] = 0
for j in vertical_lines:
for i in range(h):
img[i, j, :] = 0
image = Image.fromarray(img, 'RGB')
return image
def merge(image, token_dict, patch_size=14, alpha=0.2, line_color=np.array([200, 200, 200])):
img = np.asarray(image, dtype=np.uint8).copy()
h, w, _ = img.shape
patch_num_h, patch_num_w = w // patch_size, w // patch_size
color_map = {}
idx = token_dict["idx_token"].tolist()[0]
for id, i in enumerate(idx):
color_map[i] = color_map[i] if i in color_map else {"id": [], "color": []}
color_map[i]["id"].append(id)
for _h in range(patch_size):
for _w in range(patch_size):
color_map[i]["color"].append(img[_h + patch_size * math.floor(id / patch_num_w),
_w + patch_size * (id % patch_num_h)])
for i in color_map:
color_map[i]["color"] = np.mean(np.stack(color_map[i]["color"], axis=0), axis=0)
for id in color_map[i]["id"]:
for _h in range(patch_size):
for _w in range(patch_size):
color = img[_h + patch_size * math.floor(id / patch_num_w), _w + patch_size * (
id % patch_num_h)] * alpha + color_map[i]["color"] * (1 - alpha)
img[_h + patch_size * math.floor(id / patch_num_w), _w + patch_size * (id % patch_num_h)] = color
for id, i in enumerate(idx):
if math.floor(id / patch_num_w) > 0:
if idx[id - patch_num_w] != i:
for _w in range(patch_size * (id % patch_num_h), patch_size * (id % patch_num_h + 1)):
img[patch_size * math.floor(id / patch_num_w), _w, :] = line_color
if (id % patch_num_h) > 0:
if idx[id - 1] != i:
for _h in range(patch_size * math.floor(id / patch_num_w), patch_size * (math.floor(id / patch_num_w) + 1)):
img[_h, patch_size * (id % patch_num_h), :] = line_color
image = Image.fromarray(img, 'RGB')
return image
if __name__ == '__main__':
image_path = "figures/COCO_val2014_000000214293.jpg"
clip_vit_14_path = ${openai_clip_path}
output_file = "figures"
if not os.path.exists(output_file):
os.makedirs(output_file)
vision_tower = CLIPVisionTower(clip_vit_14_path)
image = Image.open(os.path.join(image_path)).resize((224, 224))
| ctm0 = CTM(sample_ratio=64, embed_dim=1024, dim_out=1024, k=32) | 1 | 2023-11-13 11:52:56+00:00 | 4k |
tatsu-lab/gpt_paper_assistant | filter_papers.py | [
{
"identifier": "Paper",
"path": "arxiv_scraper.py",
"snippet": "class Paper:\n # paper class should track the list of authors, paper title, abstract, arxiv id\n authors: List[str]\n title: str\n abstract: str\n arxiv_id: str\n\n # add a hash function using arxiv_id\n def __hash__(self):\n return hash(self.arxiv_id)"
},
{
"identifier": "EnhancedJSONEncoder",
"path": "arxiv_scraper.py",
"snippet": "class EnhancedJSONEncoder(json.JSONEncoder):\n def default(self, o):\n if dataclasses.is_dataclass(o):\n return dataclasses.asdict(o)\n return super().default(o)"
}
] | import configparser
import dataclasses
import json
import os
import re
import retry
from collections import defaultdict
from typing import List
from openai import OpenAI
from tqdm import tqdm
from arxiv_scraper import Paper
from arxiv_scraper import EnhancedJSONEncoder | 2,395 | + "Abstract: "
+ paper_entry.abstract[:4000]
)
return new_str
def batched(items, batch_size):
# takes a list and returns a list of list with batch_size
return [items[i : i + batch_size] for i in range(0, len(items), batch_size)]
def filter_papers_by_title(
papers: List[Paper], base_prompt: str, criterion: str
) -> List[Paper]:
filter_postfix = "Please identify any papers that you are absolutely sure your friend will not enjoy, formatted as a list of arxiv ids like [ID1, ID2, ID3..]"
batches_of_papers = batched(papers, 20)
final_list = []
for batch in batches_of_papers:
papers_string = "".join([paper_to_titles(paper) for paper in batch])
full_prompt = (
base_prompt + "\n " + criterion + "\n" + papers_string + filter_postfix
)
completion = call_chatgpt(full_prompt, "gpt-4")
cost = calc_price("gpt-4", completion.usage)
out_text = completion.choices[0].message.content
try:
filtered_set = set(json.loads(out_text))
for paper in batch:
if paper.arxiv_id not in filtered_set:
final_list.append(paper)
except Exception as ex:
print("Exception happened " + str(ex))
print("Failed to parse LM output as list " + out_text)
print(completion)
continue
return final_list, cost
def paper_to_titles(paper_entry: Paper) -> str:
return "ArXiv ID: " + paper_entry.arxiv_id + " Title: " + paper_entry.title + "\n"
def run_on_batch(
paper_batch, base_prompt, criterion, postfix_prompt, openai_client, config
):
batch_str = [paper_to_string(paper) for paper in paper_batch]
full_prompt = "\n".join(
[
base_prompt,
criterion + "\n",
"\n\n".join(batch_str) + "\n",
postfix_prompt,
]
)
json_dicts, cost = run_and_parse_chatgpt(full_prompt, openai_client, config)
return json_dicts, cost
def filter_by_gpt(
all_authors, papers, config, openai_client, all_papers, selected_papers, sort_dict
):
# deal with config parsing
with open("configs/base_prompt.txt", "r") as f:
base_prompt = f.read()
with open("configs/paper_topics.txt", "r") as f:
criterion = f.read()
with open("configs/postfix_prompt.txt", "r") as f:
postfix_prompt = f.read()
all_cost = 0
if config["SELECTION"].getboolean("run_openai"):
# filter first by hindex of authors to reduce costs.
paper_list = filter_papers_by_hindex(all_authors, papers, config)
if config["OUTPUT"].getboolean("debug_messages"):
print(str(len(paper_list)) + " papers after hindex filtering")
cost = 0
# paper_list, cost = filter_papers_by_title(paper_list, base_prompt, criterion)
if config["OUTPUT"].getboolean("debug_messages"):
print(
str(len(paper_list))
+ " papers after title filtering with cost of $"
+ str(cost)
)
all_cost += cost
# batch the remaining papers and invoke GPT
batch_of_papers = batched(paper_list, int(config["SELECTION"]["batch_size"]))
scored_batches = []
for batch in tqdm(batch_of_papers):
scored_in_batch = []
json_dicts, cost = run_on_batch(
batch, base_prompt, criterion, postfix_prompt, openai_client, config
)
all_cost += cost
for jdict in json_dicts:
if (
int(jdict["RELEVANCE"])
>= int(config["FILTERING"]["relevance_cutoff"])
and jdict["NOVELTY"] >= int(config["FILTERING"]["novelty_cutoff"])
and jdict["ARXIVID"] in all_papers
):
selected_papers[jdict["ARXIVID"]] = {
**dataclasses.asdict(all_papers[jdict["ARXIVID"]]),
**jdict,
}
## take the max of author match and gpt score
sort_dict[jdict["ARXIVID"]] = max(
jdict["RELEVANCE"] + jdict["NOVELTY"],
sort_dict.get(jdict["ARXIVID"], 0),
)
scored_in_batch.append(
{
**dataclasses.asdict(all_papers[jdict["ARXIVID"]]),
**jdict,
}
)
scored_batches.append(scored_in_batch)
if config["OUTPUT"].getboolean("dump_debug_file"):
with open(
config["OUTPUT"]["output_path"] + "gpt_paper_batches.debug.json", "w"
) as outfile:
|
def filter_by_author(all_authors, papers, author_targets, config):
# filter and parse the papers
selected_papers = {} # pass to output
all_papers = {} # dict for later filtering
sort_dict = {} # dict storing key and score
# author based selection
for paper in papers:
all_papers[paper.arxiv_id] = paper
if config["FILTERING"].getboolean("author_match"):
for author in paper.authors:
if author in all_authors:
for alias in all_authors[author]:
if alias["authorId"] in author_targets:
selected_papers[paper.arxiv_id] = {
**dataclasses.asdict(paper),
**{"COMMENT": "Author match"},
}
sort_dict[paper.arxiv_id] = float(
config["SELECTION"]["author_match_score"]
)
break
return selected_papers, all_papers, sort_dict
def filter_papers_by_hindex(all_authors, papers, config):
# filters papers by checking to see if there's at least one author with > hcutoff hindex
paper_list = []
for paper in papers:
max_h = 0
for author in paper.authors:
if author in all_authors:
max_h = max(
max_h, max([alias["hIndex"] for alias in all_authors[author]])
)
if max_h >= float(config["FILTERING"]["hcutoff"]):
paper_list.append(paper)
return paper_list
def calc_price(model, usage):
if model == "gpt-4-1106-preview":
return (0.01 * usage.prompt_tokens + 0.03 * usage.completion_tokens) / 1000.0
if model == "gpt-4":
return (0.03 * usage.prompt_tokens + 0.06 * usage.completion_tokens) / 1000.0
if (model == "gpt-3.5-turbo") or (model == "gpt-3.5-turbo-1106"):
return (0.0015 * usage.prompt_tokens + 0.002 * usage.completion_tokens) / 1000.0
@retry.retry(tries=3, delay=2)
def call_chatgpt(full_prompt, openai_client, model, num_samples):
return openai_client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": full_prompt}],
temperature=0.0,
n=int(num_samples),
seed=0,
)
def run_and_parse_chatgpt(full_prompt, openai_client, config):
# just runs the chatgpt prompt, tries to parse the resulting JSON
completion = call_chatgpt(
full_prompt,
openai_client,
config["SELECTION"]["model"],
config["FILTERING"]["num_samples"],
)
json_dicts = defaultdict(list)
for choice in completion.choices:
out_text = choice.message.content
out_text = re.sub("```jsonl\n", "", out_text)
out_text = re.sub("```", "", out_text)
out_text = re.sub(r"\n+", "\n", out_text)
out_text = re.sub("},", "}", out_text).strip()
# split out_text line by line and parse each as a json.
for line in out_text.split("\n"):
# try catch block to attempt to parse json
try:
loaded_output = json.loads(line)
json_dicts[loaded_output["ARXIVID"]].append(loaded_output)
except Exception as ex:
if config["OUTPUT"].getboolean("debug_messages"):
print("Exception happened " + str(ex))
print("Failed to parse LM output as json")
print(out_text)
print("RAW output")
print(completion.choices[0].message.content)
continue
all_dict = []
for id, json_list in json_dicts.items():
rel_score = sum([float(jdict["RELEVANCE"]) for jdict in json_list]) / float(
len(json_list)
)
nov_score = sum([float(jdict["NOVELTY"]) for jdict in json_list]) / float(
len(json_list)
)
new_dict = {
"ARXIVID": json_list[0]["ARXIVID"],
"COMMENT": json_list[0]["COMMENT"],
"RELEVANCE": rel_score,
"NOVELTY": nov_score,
}
all_dict.append(new_dict)
return all_dict, calc_price(config["SELECTION"]["model"], completion.usage)
def paper_to_string(paper_entry: Paper) -> str:
# renders each paper into a string to be processed by GPT
new_str = (
"ArXiv ID: "
+ paper_entry.arxiv_id
+ "\n"
+ "Title: "
+ paper_entry.title
+ "\n"
+ "Authors: "
+ " and ".join(paper_entry.authors)
+ "\n"
+ "Abstract: "
+ paper_entry.abstract[:4000]
)
return new_str
def batched(items, batch_size):
# takes a list and returns a list of list with batch_size
return [items[i : i + batch_size] for i in range(0, len(items), batch_size)]
def filter_papers_by_title(
papers: List[Paper], base_prompt: str, criterion: str
) -> List[Paper]:
filter_postfix = "Please identify any papers that you are absolutely sure your friend will not enjoy, formatted as a list of arxiv ids like [ID1, ID2, ID3..]"
batches_of_papers = batched(papers, 20)
final_list = []
for batch in batches_of_papers:
papers_string = "".join([paper_to_titles(paper) for paper in batch])
full_prompt = (
base_prompt + "\n " + criterion + "\n" + papers_string + filter_postfix
)
completion = call_chatgpt(full_prompt, "gpt-4")
cost = calc_price("gpt-4", completion.usage)
out_text = completion.choices[0].message.content
try:
filtered_set = set(json.loads(out_text))
for paper in batch:
if paper.arxiv_id not in filtered_set:
final_list.append(paper)
except Exception as ex:
print("Exception happened " + str(ex))
print("Failed to parse LM output as list " + out_text)
print(completion)
continue
return final_list, cost
def paper_to_titles(paper_entry: Paper) -> str:
return "ArXiv ID: " + paper_entry.arxiv_id + " Title: " + paper_entry.title + "\n"
def run_on_batch(
paper_batch, base_prompt, criterion, postfix_prompt, openai_client, config
):
batch_str = [paper_to_string(paper) for paper in paper_batch]
full_prompt = "\n".join(
[
base_prompt,
criterion + "\n",
"\n\n".join(batch_str) + "\n",
postfix_prompt,
]
)
json_dicts, cost = run_and_parse_chatgpt(full_prompt, openai_client, config)
return json_dicts, cost
def filter_by_gpt(
all_authors, papers, config, openai_client, all_papers, selected_papers, sort_dict
):
# deal with config parsing
with open("configs/base_prompt.txt", "r") as f:
base_prompt = f.read()
with open("configs/paper_topics.txt", "r") as f:
criterion = f.read()
with open("configs/postfix_prompt.txt", "r") as f:
postfix_prompt = f.read()
all_cost = 0
if config["SELECTION"].getboolean("run_openai"):
# filter first by hindex of authors to reduce costs.
paper_list = filter_papers_by_hindex(all_authors, papers, config)
if config["OUTPUT"].getboolean("debug_messages"):
print(str(len(paper_list)) + " papers after hindex filtering")
cost = 0
# paper_list, cost = filter_papers_by_title(paper_list, base_prompt, criterion)
if config["OUTPUT"].getboolean("debug_messages"):
print(
str(len(paper_list))
+ " papers after title filtering with cost of $"
+ str(cost)
)
all_cost += cost
# batch the remaining papers and invoke GPT
batch_of_papers = batched(paper_list, int(config["SELECTION"]["batch_size"]))
scored_batches = []
for batch in tqdm(batch_of_papers):
scored_in_batch = []
json_dicts, cost = run_on_batch(
batch, base_prompt, criterion, postfix_prompt, openai_client, config
)
all_cost += cost
for jdict in json_dicts:
if (
int(jdict["RELEVANCE"])
>= int(config["FILTERING"]["relevance_cutoff"])
and jdict["NOVELTY"] >= int(config["FILTERING"]["novelty_cutoff"])
and jdict["ARXIVID"] in all_papers
):
selected_papers[jdict["ARXIVID"]] = {
**dataclasses.asdict(all_papers[jdict["ARXIVID"]]),
**jdict,
}
## take the max of author match and gpt score
sort_dict[jdict["ARXIVID"]] = max(
jdict["RELEVANCE"] + jdict["NOVELTY"],
sort_dict.get(jdict["ARXIVID"], 0),
)
scored_in_batch.append(
{
**dataclasses.asdict(all_papers[jdict["ARXIVID"]]),
**jdict,
}
)
scored_batches.append(scored_in_batch)
if config["OUTPUT"].getboolean("dump_debug_file"):
with open(
config["OUTPUT"]["output_path"] + "gpt_paper_batches.debug.json", "w"
) as outfile: | json.dump(scored_batches, outfile, cls=EnhancedJSONEncoder, indent=4) | 1 | 2023-11-13 15:19:38+00:00 | 4k |
BobaZooba/xllm | tests/unit/collators/test_completion.py | [
{
"identifier": "enums",
"path": "src/xllm/enums.py",
"snippet": "class General:\nclass Transformers:\nclass Registry:\nclass Datasets:\nclass Collators:\nclass Trainers:\nclass Experiments:\nclass EnvironmentVariables:\nclass LogLevel:"
},
{
"identifier": "CompletionCollator",
"path": "src/xllm/collators/completion.py",
"snippet": "class CompletionCollator(BaseCollator):\n \"\"\"\n `CompletionCollator` is a specialized collator class extending `BaseCollator`. It is designed to prepare\n data specifically for text completion tasks, such as language model fine-tuning and sentence completion,\n where a model generates text based on a given prompt.\n\n This class takes care of tokenizing the text input, generating the necessary attention masks, and creating\n targets for the language model, ensuring that the data is presented in a way that is consistent with the\n expectations of the model during training.\n\n This collator is needed for cases when we want to calculate the loss only for the last text in the list.\n For example, these are instances of interacting with an assistant. We don't want to train the model on\n how the user speaks. We don't need the model to be able to imitate the user, so we construct the dataset\n in such a way that at the end of the list of texts (dialogue), there is a phrase by the assistant.\n Essentially, we will be training the model to generate these completions by the assistant.\n\n Key features and methods provided by `CompletionCollator`:\n\n - `__init__`: Initializes a new `CompletionCollator` with a tokenizer, maximum sequence length, and optional\n special markers for the distinction between text prompt and completion.\n\n - `parse_sample`:\n Tokenizes individual text parts, differentiating between the prompt and completion sections,\n and prepares the input and target tokens.\n\n - `parse_batch`: Aggregates multiple samples into a single batch, padding the sequences to the same length and\n generating attention masks.\n\n Attributes:\n - `prefix_end` (`Optional[str]`): A special marker used to identify the end of the prefix or prompt in the text.\n If provided, the tokens following this marker are treated as the completion section for which the model\n generates predictions during training.\n\n - Other attributes inherited from `BaseCollator`, like `tokenizer`, `max_length`, and `separator`.\n\n By providing a structured way to prepare batches of data specifically for completion tasks, the `CompletionCollator`\n facilitates efficient training workflows and ensures the data adheres to the format required for\n effective fine-tuning of language models.\n\n The `CompletionCollator` should be employed when building training loops for models that generate completions\n to given text inputs, as it automates much of the otherwise manual batch preparation processes.\n \"\"\"\n\n def __init__(\n self,\n tokenizer: PreTrainedTokenizer,\n max_length: int,\n prefix_end: Optional[str] = None,\n separator: str = \"\\n\",\n ):\n super().__init__(tokenizer=tokenizer, max_length=max_length, separator=separator)\n\n self.prefix_end = prefix_end\n\n def parse_sample(self, sample: List[str]) -> Tuple[List[int], List[int]]:\n \"\"\"\n Tokenizes a single text sample and prepares individual input and target sequences for text completion tasks.\n\n This method takes a list of text parts, representing the segments of a single raw text sample,\n and processes them to generate sequences of token IDs suitable for model training. It identifies\n a prefix within the text (if specified) and ensures that targets are generated only for the completion section.\n\n Args:\n sample (`List[str]`):\n A list of strings where each element is a part of the text to be tokenized. The last text part\n is considered the completion target when a `prefix_end` is provided.\n\n Returns:\n `Tuple[List[int], List[int]]`: A tuple containing two lists:\n - The first list represents the `input_ids`, token IDs for the model's input sequence.\n - The second list represents the `targets`, corresponding token IDs for labels used during\n loss computation.\n\n The `parse_sample` method performs the following steps for the given text sample:\n\n - Appends a separator to each text part and tokenizes them, generating a list of token indices.\n - Constructs a mask to identify the target tokens within the completion section of the text.\n - Aligns the token indices and the mask to create `input_ids` and `targets`. Padding tokens are used\n for non-target positions in the `targets` list.\n\n The separation between the prefix and completion sections is defined by `self.prefix_end`.\n When this marker is present in the last text part, it distinguishes the section of the text where model\n predictions should align with the provided text (targets for the model).\n\n This method is an integral part of the `CompletionCollator` class, enabling fine-grained control over\n the tokenization and preparation of each text sample prior to batch collation.\n \"\"\"\n\n text_parts = [text + self.separator for text in sample]\n tokenized = self.tokenizer(text_parts)[enums.Transformers.input_ids]\n\n token_indices = list()\n mask = list()\n\n if self.prefix_end and self.prefix_end in text_parts[-1]:\n prefix = text_parts[-1][: text_parts[-1].index(self.prefix_end) + len(self.prefix_end)]\n n_prefix = len(self.tokenizer(prefix)[enums.Transformers.input_ids]) - 1\n else:\n n_prefix = 0\n\n for n_sample, text_indices in enumerate(tokenized):\n if n_sample > 0:\n text_indices = text_indices[1:]\n if n_sample == len(tokenized) - 1:\n sample_mask = [0] * n_prefix + [1] * (len(text_indices) - n_prefix)\n else:\n sample_mask = [0] * len(text_indices)\n\n token_indices.extend(text_indices)\n mask.extend(sample_mask)\n\n input_ids = token_indices[:-1]\n\n targets = list()\n\n for token_index, flag in zip(token_indices[1:], mask[1:]):\n if flag:\n targets.append(token_index)\n else:\n targets.append(self.tokenizer.pad_token_id)\n\n return input_ids, targets\n\n def parse_batch(self, raw_batch: List[RawSample]) -> Batch:\n \"\"\"\n Processes a batch of raw samples and converts them into the format expected by language models\n for completion tasks.\n\n This method is an implementation of the `parse_batch` abstract method from `BaseCollator`.\n It is responsible for tokenizing the text parts within each raw sample, preparing the tokens\n as model inputs (`input_ids`), and generating corresponding targets.\n\n Args:\n raw_batch (`List[RawSample]`):\n A list of dictionaries, each representing a raw text sample. Each sample is expected to contain\n a key-value pair, where the key is specified by `enums.General.text_parts` and the value is a list\n of text parts to be tokenized and processed.\n\n Returns:\n `Batch`: A dictionary that contains three keys (`input_ids`, `attention_mask`, `labels`) with their\n respective tensors, ready for use as input to a language model.\n - `input_ids` and `attention_mask` are used by the model to compute the outputs.\n - `labels` are used to calculate the loss during training.\n\n The `parse_batch` method performs several steps for each raw sample in the batch:\n\n - Splits the sample into text parts and tokenizes them.\n - Determines the prefix length to differentiate between prefix and completion parts according\n to `self.prefix_end`.\n - Constructs the `input_ids` and `labels` by including tokens from the text and assigning padding\n token IDs where necessary.\n - Generates the `attention_mask` to inform the model which tokens to pay attention to during training.\n - Pads the sequences in the batch to match the length of the longest sequence, ensuring batch\n uniformity in size.\n\n This collation logic is essential for text completion tasks, where models typically predict the\n continuation of a given text prompt. By handling the tokenization and the setup of inputs and targets,\n the `CompletionCollator` enables seamless integration with the model's expected input format.\n \"\"\"\n\n input_ids = list()\n targets = list()\n attention_masks = list()\n\n batch_max_length = 0\n\n for sample in raw_batch:\n text_parts = sample[enums.General.text_parts]\n\n if isinstance(text_parts, list):\n sample_input_ids, sample_targets = self.parse_sample(sample=[str(item) for item in text_parts])\n sample_length = len(sample_input_ids)\n\n input_ids.append(sample_input_ids)\n targets.append(sample_targets)\n\n if sample_length > batch_max_length:\n batch_max_length = sample_length\n\n attention_masks.append([1] * sample_length)\n\n for n_sample in range(len(input_ids)):\n pad_sequence = [self.tokenizer.pad_token_id] * (batch_max_length - len(input_ids[n_sample]))\n if pad_sequence:\n additional_attention_mask = [0] * len(pad_sequence)\n if self.tokenizer.padding_side == \"left\":\n input_ids[n_sample] = pad_sequence + input_ids[n_sample]\n targets[n_sample] = pad_sequence + targets[n_sample]\n attention_masks[n_sample] = additional_attention_mask + attention_masks[n_sample]\n else:\n input_ids[n_sample] += pad_sequence\n targets[n_sample] += pad_sequence\n attention_masks[n_sample] += additional_attention_mask\n\n batch = {\n enums.Transformers.input_ids: torch.tensor(input_ids),\n enums.Transformers.attention_mask: torch.tensor(attention_masks),\n enums.Transformers.labels: torch.tensor(targets),\n }\n\n return batch"
},
{
"identifier": "DATA",
"path": "tests/helpers/dummy_data.py",
"snippet": "DATA = [\n {\n enums.General.text_parts: [\n \"Person 1: Hello\",\n \"Person 2: It's me\",\n \"Person 1: I was wondering\",\n ]\n },\n {\n enums.General.text_parts: [\n \"You are a sith lord\",\n \"Kenobi: Hello there\",\n \"General Grievous: General Kenobi\",\n ]\n },\n]"
}
] | from typing import Optional
from torch import Tensor
from transformers import PreTrainedTokenizer
from src.xllm import enums
from src.xllm.collators.completion import CompletionCollator
from tests.helpers.dummy_data import DATA
import pytest | 2,698 | # Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@pytest.mark.parametrize("prefix_end", [None, ":"])
def test_completion_collator(llama_tokenizer: PreTrainedTokenizer, prefix_end: Optional[str]):
collator = CompletionCollator(tokenizer=llama_tokenizer, max_length=128, prefix_end=prefix_end)
batch = collator(DATA)
for _key, value in batch.items():
assert isinstance(value, Tensor)
| # Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@pytest.mark.parametrize("prefix_end", [None, ":"])
def test_completion_collator(llama_tokenizer: PreTrainedTokenizer, prefix_end: Optional[str]):
collator = CompletionCollator(tokenizer=llama_tokenizer, max_length=128, prefix_end=prefix_end)
batch = collator(DATA)
for _key, value in batch.items():
assert isinstance(value, Tensor)
| condition_result = (batch[enums.Transformers.labels][:, :2] == llama_tokenizer.pad_token_id).unique() | 0 | 2023-11-10 17:55:03+00:00 | 4k |
banodoco/Steerable-Motion | imports/AdvancedControlNet/weight_nodes.py | [
{
"identifier": "TimestepKeyframeImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class TimestepKeyframeImport:\n def __init__(self,\n start_percent: float = 0.0,\n strength: float = 1.0,\n interpolation: str = StrengthInterpolationImport.NONE,\n control_weights: ControlWeightsImport = None,\n latent_keyframes: LatentKeyframeGroupImport = None,\n null_latent_kf_strength: float = 0.0,\n inherit_missing: bool = True,\n guarantee_usage: bool = True,\n mask_hint_orig: Tensor = None) -> None:\n self.start_percent = start_percent\n self.start_t = 999999999.9\n self.strength = strength\n self.interpolation = interpolation\n self.control_weights = control_weights\n self.latent_keyframes = latent_keyframes\n self.null_latent_kf_strength = null_latent_kf_strength\n self.inherit_missing = inherit_missing\n self.guarantee_usage = guarantee_usage\n self.mask_hint_orig = mask_hint_orig\n\n def has_control_weights(self):\n return self.control_weights is not None\n \n def has_latent_keyframes(self):\n return self.latent_keyframes is not None\n \n def has_mask_hint(self):\n return self.mask_hint_orig is not None\n \n \n @classmethod\n def default(cls) -> 'TimestepKeyframeImport':\n return cls(0.0)"
},
{
"identifier": "TimestepKeyframeGroupImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class TimestepKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[TimestepKeyframeImport] = []\n self.keyframes.append(TimestepKeyframeImport.default())\n\n def add(self, keyframe: TimestepKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same start_percent\n for i in range(len(self.keyframes)):\n if self.keyframes[i].start_percent == keyframe.start_percent:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.start_percent)\n\n def get_index(self, index: int) -> Union[TimestepKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def has_index(self, index: int) -> int:\n return index >=0 and index < len(self.keyframes)\n\n def __getitem__(self, index) -> TimestepKeyframeImport:\n return self.keyframes[index]\n \n def __len__(self) -> int:\n return len(self.keyframes)\n\n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n \n def clone(self) -> 'TimestepKeyframeGroupImport':\n cloned = TimestepKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned\n \n @classmethod\n def default(cls, keyframe: TimestepKeyframeImport) -> 'TimestepKeyframeGroupImport':\n group = cls()\n group.keyframes[0] = keyframe\n return group"
},
{
"identifier": "ControlWeightsImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class ControlWeightsImport:\n def __init__(self, weight_type: str, base_multiplier: float=1.0, flip_weights: bool=False, weights: list[float]=None, weight_mask: Tensor=None):\n self.weight_type = weight_type\n self.base_multiplier = base_multiplier\n self.flip_weights = flip_weights\n self.weights = weights\n if self.weights is not None and self.flip_weights:\n self.weights.reverse()\n self.weight_mask = weight_mask\n\n def get(self, idx: int) -> Union[float, Tensor]:\n # if weights is not none, return index\n if self.weights is not None:\n return self.weights[idx]\n return 1.0\n\n @classmethod\n def default(cls):\n return cls(ControlWeightTypeImport.DEFAULT)\n\n @classmethod\n def universal(cls, base_multiplier: float, flip_weights: bool=False):\n return cls(ControlWeightTypeImport.UNIVERSAL, base_multiplier=base_multiplier, flip_weights=flip_weights)\n \n @classmethod\n def universal_mask(cls, weight_mask: Tensor):\n return cls(ControlWeightTypeImport.UNIVERSAL, weight_mask=weight_mask)\n\n @classmethod\n def t2iadapter(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*12\n return cls(ControlWeightTypeImport.T2IADAPTER, weights=weights,flip_weights=flip_weights)\n\n @classmethod\n def controlnet(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*13\n return cls(ControlWeightTypeImport.CONTROLNET, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllora(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*10\n return cls(ControlWeightTypeImport.CONTROLLORA, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllllite(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n # TODO: make this have a real value\n weights = [1.0]*200\n return cls(ControlWeightTypeImport.CONTROLLLLITE, weights=weights, flip_weights=flip_weights)"
},
{
"identifier": "get_properly_arranged_t2i_weights",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def get_properly_arranged_t2i_weights(initial_weights: list[float]):\n new_weights = []\n new_weights.extend([initial_weights[0]]*3)\n new_weights.extend([initial_weights[1]]*3)\n new_weights.extend([initial_weights[2]]*3)\n new_weights.extend([initial_weights[3]]*3)\n return new_weights"
},
{
"identifier": "linear_conversion",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def linear_conversion(x, x_min=0.0, x_max=1.0, new_min=0.0, new_max=1.0):\n return (((x - x_min)/(x_max - x_min)) * (new_max - new_min)) + new_min"
},
{
"identifier": "logger",
"path": "imports/AdvancedControlNet/logger.py",
"snippet": "class ColoredFormatter(logging.Formatter):\n COLORS = {\n \"DEBUG\": \"\\033[0;36m\", # CYAN\n \"INFO\": \"\\033[0;32m\", # GREEN\n \"WARNING\": \"\\033[0;33m\", # YELLOW\n \"ERROR\": \"\\033[0;31m\", # RED\n \"CRITICAL\": \"\\033[0;37;41m\", # WHITE ON RED\n \"RESET\": \"\\033[0m\", # RESET COLOR\n }\n def format(self, record):"
}
] | from torch import Tensor
from .control import TimestepKeyframeImport, TimestepKeyframeGroupImport, ControlWeightsImport, get_properly_arranged_t2i_weights, linear_conversion
from .logger import logger
import torch | 1,814 |
WEIGHTS_RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT")
class DefaultWeightsImport:
@classmethod
def INPUT_TYPES(s):
return {
}
RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",)
RETURN_NAMES = WEIGHTS_RETURN_NAMES
FUNCTION = "load_weights"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights"
def load_weights(self):
weights = ControlWeightsImport.default()
|
WEIGHTS_RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT")
class DefaultWeightsImport:
@classmethod
def INPUT_TYPES(s):
return {
}
RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",)
RETURN_NAMES = WEIGHTS_RETURN_NAMES
FUNCTION = "load_weights"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights"
def load_weights(self):
weights = ControlWeightsImport.default() | return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) | 1 | 2023-11-11 01:26:26+00:00 | 4k |
x0rzavi/github-readme-terminal | gifos/gifos.py | [
{
"identifier": "ConvertAnsiEscape",
"path": "gifos/utils/convert_ansi_escape.py",
"snippet": "class ConvertAnsiEscape:\n \"\"\"A class for converting ANSI escape codes to color values.\"\"\"\n\n __color_scheme = gifos_settings.get(\"general\", {}).get(\"color_scheme\")\n\n @staticmethod\n def __get_color(color_dict, color_name, def_color):\n \"\"\"Get the color value from the color dictionary.\n\n This method takes a color dictionary, a color name, and a default color as\n input. If the color dictionary is indeed a dictionary and contains the color\n name, it returns the corresponding color value. Otherwise, it returns the\n default color.\n\n :param color_dict: The color dictionary to get the color value from.\n :type color_dict: dict\n :param color_name: The name of the color to get.\n :type color_name: str\n :param def_color: The default color to return if the color name is not in the\n color dictionary.\n :type def_color: str\n :return: The color value corresponding to the color name if it's in the color\n dictionary, otherwise the default color.\n :rtype: str\n \"\"\"\n return (\n color_dict.get(color_name, def_color)\n if isinstance(color_dict, dict)\n else def_color\n )\n\n # fmt: off\n ANSI_ESCAPE_MAP_TXT_COLOR = {\n # normal color mode\n \"30\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"black\", \"#232526\"),\n \"31\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"red\", \"#df5b61\"),\n \"32\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"green\", \"#78b892\"),\n \"33\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"yellow\", \"#de8f78\"),\n \"34\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"blue\", \"#6791c9\"),\n \"35\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"magenta\", \"#bc83e3\"),\n \"36\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"cyan\", \"#67afc1\"),\n \"37\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"white\", \"#e4e6e7\"),\n \"39\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"default_colors\"), \"fg\", \"#edeff0\"),\n # bright color mode\n \"90\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"black\", \"#2c2e2f\"),\n \"91\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"red\", \"#e8646a\"),\n \"92\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"green\", \"#81c19b\"),\n \"93\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"yellow\", \"#e79881\"),\n \"94\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"blue\", \"#709ad2\"),\n \"95\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"magenta\", \"#c58cec\"),\n \"96\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"cyan\", \"#70b8ca\"),\n \"97\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"white\", \"#f2f4f5\"),\n } \n\n ANSI_ESCAPE_MAP_BG_COLOR = {\n # normal color mode\n \"40\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"black\", \"#232526\"),\n \"41\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"red\", \"#df5b61\"),\n \"42\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"green\", \"#78b892\"),\n \"43\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"yellow\", \"#de8f78\"),\n \"44\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"blue\", \"#6791c9\"),\n \"45\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"magenta\", \"#bc83e3\"),\n \"46\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"cyan\", \"#67afc1\"),\n \"47\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"normal_colors\"), \"white\", \"#e4e6e7\"),\n \"49\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"default_colors\"), \"bg\", \"#0c0e0f\"),\n # bright color mode\n \"100\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"black\", \"#2c2e2f\"),\n \"101\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"red\", \"#e8646a\"),\n \"102\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"green\", \"#81c19b\"),\n \"103\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"yellow\", \"#e79881\"),\n \"104\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"blue\", \"#709ad2\"),\n \"105\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"magenta\", \"#c58cec\"),\n \"106\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"cyan\", \"#70b8ca\"),\n \"107\": __get_color(ansi_escape_colors.get(__color_scheme, {}).get(\"bright_colors\"), \"white\", \"#f2f4f5\"),\n }\n # fmt: on\n\n @classmethod\n def convert(cls, escape_code) -> AnsiEscape:\n \"\"\"Convert an ANSI escape code to a color value.\n\n This method takes an ANSI escape code as input and returns an `AnsiEscape`\n object containing the corresponding color value and operation (text color or\n background color). The method uses two dictionaries `ANSI_ESCAPE_MAP_TXT_COLOR`\n and `ANSI_ESCAPE_MAP_BG_COLOR` to map ANSI escape codes to color values for text\n and background colors respectively.\n\n :param escape_code: The ANSI escape code to convert.\n :type escape_code: str\n :return: An `AnsiEscape` object containing the color value and operation if the\n escape code is found in the dictionaries, otherwise None.\n :rtype: AnsiEscape or None\n \"\"\"\n txt_color = cls.ANSI_ESCAPE_MAP_TXT_COLOR.get(escape_code)\n if txt_color:\n return AnsiEscape(data=txt_color, oper=\"txt_color\")\n\n bg_color = cls.ANSI_ESCAPE_MAP_BG_COLOR.get(escape_code)\n if bg_color:\n return AnsiEscape(data=bg_color, oper=\"bg_color\")\n\n return None"
},
{
"identifier": "gifos_settings",
"path": "gifos/utils/load_config.py",
"snippet": "def load_toml(file_name: str) -> dict:\n def __update_config_with_env_vars(config, prefix=\"GIFOS\"):"
}
] | import os
import random
import re
import sys
from math import ceil
from pathlib import Path
from shutil import rmtree
from icecream import ic
from PIL import Image, ImageDraw, ImageFont
from gifos.utils.convert_ansi_escape import ConvertAnsiEscape
from gifos.utils.load_config import gifos_settings | 2,826 | # TODO:
# [] Documentation
# [] proper file paths
# [] incremental text effect
# [] Better implementations for non monospace fonts
# [] Support all ANSI escape sequence forms
# [] Optimization + better code quality
# [] Test cases
# [] GIF maker implementation
# [] Scriptable input file
frame_base_name = gifos_settings.get("files", {}).get("frame_base_name") or "frame_"
frame_folder_name = gifos_settings.get("files", {}).get("frame_folder_name") or "./frames"
output_gif_name = gifos_settings.get("files", {}).get("output_gif_name") or "output"
try:
os.remove(output_gif_name + ".gif")
except Exception:
pass
rmtree(frame_folder_name, ignore_errors=True)
os.mkdir(frame_folder_name)
font_path = Path(__file__).parent / "fonts"
class Terminal:
"""A class to represent a terminal.
This class represents a terminal with a specified width, height, padding, and font.
Attributes:
width: The width of the terminal.
height: The height of the terminal.
xpad: The horizontal padding of the terminal.
ypad: The vertical padding of the terminal.
font_file: The file path of the font to use for the terminal. Defaults to "gohufont-uni-14.pil".
font_size: The size of the font to use for the terminal. Defaults to 16.
line_spacing: The line spacing to use for the terminal. Defaults to 4.
curr_row: The current row of the cursor in terminal.
curr_col: The current column of the cursor in terminal.
num_rows: The number of rows in the terminal.
num_cols: The number of columns in the terminal.
image_col: The column number of the last image pasted in the terminal.
Methods:
set_txt_color: Set the text color to be used.
set_bg_color: Set the background color to be used.
set_font: Set the font to be used.
toggle_show_cursor: Toggle the visibility of the cursor.
toggle_blink_cursor: Toggle the blinking of the cursor.
save_frame: Save the current frame of the terminal.
clear_frame: Clear the current frame of the terminal.
clone_frame: Clone the current frame of the terminal.
cursor_to_box: Move the cursor to a specified box (coordinate) in the terminal.
gen_text: Generate text on the terminal.
gen_typing_text: Generate text on the terminal as if it is being typed.
set_prompt: Set the prompt text to be used.
gen_prompt: Generate the prompt text on the terminal.
scroll_up: Scroll up the terminal.
delete_row: Delete a row in the terminal.
paste_image: Paste an image on the terminal.
set_fps: Set the FPS of the GIF to be generated.
gen_gif: Generate the GIF from the frames.
"""
def __init__(
self,
width: int,
height: int,
xpad: int,
ypad: int,
font_file: str = f"{font_path}/gohufont-uni-14.pil",
font_size: int = 16,
line_spacing: int = 4,
) -> None:
"""Initialize a Terminal object.
:param width: The width of the terminal.
:type width: int
:param height: The height of the terminal.
:type height: int
:param xpad: The horizontal padding of the terminal.
:type xpad: int
:param ypad: The vertical padding of the terminal.
:type ypad: int
:param font_file: The file path of the font to use for the terminal.
:type font_file: str, optional
:param font_size: The size of the font to use for the terminal. Defaults to 16.
:type font_size: int, optional
:param line_spacing: The line spacing to use for the terminal. Defaults to 4.
:type line_spacing: int, optional
"""
ic.configureOutput(includeContext=True)
self.__width = width
self.__height = height
self.__xpad = xpad
self.__ypad = ypad
self.__font_file = font_file
self.__font_size = font_size
self.__debug = gifos_settings.get("general", {}).get("debug") or False
if not self.__debug:
ic.disable()
| # TODO:
# [] Documentation
# [] proper file paths
# [] incremental text effect
# [] Better implementations for non monospace fonts
# [] Support all ANSI escape sequence forms
# [] Optimization + better code quality
# [] Test cases
# [] GIF maker implementation
# [] Scriptable input file
frame_base_name = gifos_settings.get("files", {}).get("frame_base_name") or "frame_"
frame_folder_name = gifos_settings.get("files", {}).get("frame_folder_name") or "./frames"
output_gif_name = gifos_settings.get("files", {}).get("output_gif_name") or "output"
try:
os.remove(output_gif_name + ".gif")
except Exception:
pass
rmtree(frame_folder_name, ignore_errors=True)
os.mkdir(frame_folder_name)
font_path = Path(__file__).parent / "fonts"
class Terminal:
"""A class to represent a terminal.
This class represents a terminal with a specified width, height, padding, and font.
Attributes:
width: The width of the terminal.
height: The height of the terminal.
xpad: The horizontal padding of the terminal.
ypad: The vertical padding of the terminal.
font_file: The file path of the font to use for the terminal. Defaults to "gohufont-uni-14.pil".
font_size: The size of the font to use for the terminal. Defaults to 16.
line_spacing: The line spacing to use for the terminal. Defaults to 4.
curr_row: The current row of the cursor in terminal.
curr_col: The current column of the cursor in terminal.
num_rows: The number of rows in the terminal.
num_cols: The number of columns in the terminal.
image_col: The column number of the last image pasted in the terminal.
Methods:
set_txt_color: Set the text color to be used.
set_bg_color: Set the background color to be used.
set_font: Set the font to be used.
toggle_show_cursor: Toggle the visibility of the cursor.
toggle_blink_cursor: Toggle the blinking of the cursor.
save_frame: Save the current frame of the terminal.
clear_frame: Clear the current frame of the terminal.
clone_frame: Clone the current frame of the terminal.
cursor_to_box: Move the cursor to a specified box (coordinate) in the terminal.
gen_text: Generate text on the terminal.
gen_typing_text: Generate text on the terminal as if it is being typed.
set_prompt: Set the prompt text to be used.
gen_prompt: Generate the prompt text on the terminal.
scroll_up: Scroll up the terminal.
delete_row: Delete a row in the terminal.
paste_image: Paste an image on the terminal.
set_fps: Set the FPS of the GIF to be generated.
gen_gif: Generate the GIF from the frames.
"""
def __init__(
self,
width: int,
height: int,
xpad: int,
ypad: int,
font_file: str = f"{font_path}/gohufont-uni-14.pil",
font_size: int = 16,
line_spacing: int = 4,
) -> None:
"""Initialize a Terminal object.
:param width: The width of the terminal.
:type width: int
:param height: The height of the terminal.
:type height: int
:param xpad: The horizontal padding of the terminal.
:type xpad: int
:param ypad: The vertical padding of the terminal.
:type ypad: int
:param font_file: The file path of the font to use for the terminal.
:type font_file: str, optional
:param font_size: The size of the font to use for the terminal. Defaults to 16.
:type font_size: int, optional
:param line_spacing: The line spacing to use for the terminal. Defaults to 4.
:type line_spacing: int, optional
"""
ic.configureOutput(includeContext=True)
self.__width = width
self.__height = height
self.__xpad = xpad
self.__ypad = ypad
self.__font_file = font_file
self.__font_size = font_size
self.__debug = gifos_settings.get("general", {}).get("debug") or False
if not self.__debug:
ic.disable()
| self.__txt_color = self.__def_txt_color = ConvertAnsiEscape.convert("39").data | 0 | 2023-11-17 06:21:18+00:00 | 4k |
Zaloog/kanban-python | src/kanban_python/interface.py | [
{
"identifier": "cfg",
"path": "src/kanban_python/config.py",
"snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def active_board(self, new_board):\n def kanban_boards(self) -> list:\n def kanban_boards_dict(self) -> dict:\n def kanban_boards_dict(self, board_name: str) -> dict:\n def active_board_path(self) -> str:\n def show_footer(self):\n def show_footer(self, visible):\n def col_min_width(self) -> int:\n def col_min_width(self, new_width: int) -> None:\n def kanban_columns_dict(self) -> dict:\n def kanban_columns_dict(self, updated_dict) -> dict:\n def vis_cols(self) -> list:\n def done_limit(self) -> int:\n def done_limit(self, new_limit: int) -> None:\n def scanned_files(self) -> list:\n def scanned_files(self, new_files_to_scan: str) -> None:\n def scanned_patterns(self) -> list:\n def scanned_patterns(self, new_patterns_to_scan: str) -> None:\ndef create_init_config(conf_path=CONFIG_PATH, data_path=DATA_PATH):\ndef delete_current_folder_board_from_config(\n cfg=cfg, curr_path: str = str(Path.cwd())\n) -> None:\ndef check_if_board_name_exists_in_config(boardname: str, cfg=cfg) -> bool:\ndef check_if_current_active_board_in_board_list(cfg=cfg) -> bool:\ndef delete_board_from_config(board_name, cfg=cfg) -> None:\ndef check_config_exists(path=CONFIG_FILE_PATH) -> bool:\ndef get_json_path(boardname: str):"
},
{
"identifier": "BOARD_CAPTION_STRING",
"path": "src/kanban_python/constants.py",
"snippet": "BOARD_CAPTION_STRING = \"Tasks have the following Structure:\\\n [[cyan]ID[/]] ([orange3]TAG[/]) [white]Task Title[/] |[red]Days Left[/]|\""
},
{
"identifier": "COLOR_DICT",
"path": "src/kanban_python/constants.py",
"snippet": "COLOR_DICT = {\n \"Ready\": \"[red]Ready[/]\",\n \"Doing\": \"[yellow]Doing[/]\",\n \"Done\": \"[green]Done[/]\",\n \"Deleted\": \"[deep_pink4]Deleted[/]\",\n \"Archived\": \"[dark_goldenrod]Archived[/]\",\n}"
},
{
"identifier": "CONFIG_FILE_PATH",
"path": "src/kanban_python/constants.py",
"snippet": "CONFIG_FILE_PATH = CONFIG_PATH / CONFIG_FILE_NAME"
},
{
"identifier": "FOOTER",
"path": "src/kanban_python/constants.py",
"snippet": "FOOTER = [FOOTER_FIRST, FOOTER_LAST]"
},
{
"identifier": "REPORT_COLORS",
"path": "src/kanban_python/constants.py",
"snippet": "REPORT_COLORS = [\"#161b22\", \"#0e4429\", \"#006d32\", \"#26a641\", \"#39d353\"]"
},
{
"identifier": "calculate_days_left_till_due",
"path": "src/kanban_python/utils.py",
"snippet": "def get_motivational_quote() -> str:\ndef current_time_to_str() -> str:\ndef calculate_time_delta_str(start_time_str: str, end_time_str: str) -> float:\ndef create_status_dict_for_rows(data: dict, vis_cols: list) -> dict:\ndef check_if_done_col_leq_X(cfg, data: dict) -> bool:\ndef check_if_there_are_visible_tasks_in_board(data: dict, vis_cols: list) -> bool:\ndef move_first_done_task_to_archive(data: dict):\ndef delete_json_file(db_path: str) -> None:\ndef check_board_name_valid(boardname: str):\ndef scan_files(path=Path.cwd(), endings: list = [\".py\"]):\n def recursive_search(path, file_list: list, progress):\ndef scan_for_todos(\n file_paths: list, rel_path=Path.cwd(), patterns: list = [\"#TODO\", \"# TODO\"]\n) -> list:\ndef split_todo_in_tag_and_title(todo: str, patterns: list):\ndef get_tag_id_choices(data_dict: dict, vis_cols: list) -> list:\ndef check_scanner_files_valid(files: str) -> bool:\ndef check_scanner_patterns_valid(patterns: str) -> bool:\ndef get_iso_calender_info(date_str: str):\ndef create_dict_for_report_view(completed_tasks: list):\ndef create_color_mapping(amount_list: list, max_val: int):\ndef create_report_document(boards_dict: dict):\ndef check_due_date_format(date_str: str) -> bool:\ndef due_date_datetime_to_date(date_datetime: str) -> str:\ndef due_date_date_to_datetime(date_str: str) -> str:\ndef calculate_days_left_till_due(due_date: str):"
}
] | import calendar
from datetime import datetime
from itertools import zip_longest
from rich.prompt import Confirm, IntPrompt, Prompt
from rich.table import Table
from .config import cfg
from .constants import (
BOARD_CAPTION_STRING,
COLOR_DICT,
CONFIG_FILE_PATH,
FOOTER,
REPORT_COLORS,
)
from .utils import (
calculate_days_left_till_due,
calculate_time_delta_str,
check_due_date_format,
console,
create_color_mapping,
create_dict_for_report_view,
create_status_dict_for_rows,
current_time_to_str,
due_date_date_to_datetime,
due_date_datetime_to_date,
) | 1,978 |
# Board
#####################################################################################
def create_table(data: dict) -> Table:
status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)
table_name = cfg.active_board
table = Table(
title=f"[blue]Active Board: {table_name}[/]",
highlight=True,
show_header=True,
show_footer=True if cfg.show_footer == "True" else False,
caption=BOARD_CAPTION_STRING,
)
for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):
table.add_column(
header=category + f"\t({len(status_dict[cfg.vis_cols[i]])} Task/s)",
header_style="bold",
justify="left",
overflow="fold",
footer=FOOTER[0]
if i == 0
else FOOTER[1]
if i == len(cfg.vis_cols) - 1
else "",
min_width=cfg.col_min_width,
)
for row_tasks in zip_longest(*status_dict.values()):
table.add_row(*row_tasks)
return table
# Board Action selection
def input_ask_for_action():
console.print(
"[yellow]Whats up!?[/], how can I help you being productive today :rocket:?"
)
console.print(
"\t[1] :clipboard: [green]Create new Task[/]"
+ 2 * "\t"
+ "[2] :clockwise_vertical_arrows: [bold cornflower_blue]Update/Check Task[/]"
)
console.print(
"\t[3] :bookmark_tabs: [bold yellow]Change Kanban Board[/]"
+ "\t"
+ "[4] :magnifying_glass_tilted_left: [bold blue]Show Task Details[/]"
)
console.print(
"\t[5] :cross_mark: [red]Delete Kanban Board[/]"
+ "\t"
+ "[6] :hammer_and_wrench: [grey69]Show Current Settings[/]"
)
action = IntPrompt.ask(
prompt="Choose wisely :books:",
choices=[
"1",
"2",
"3",
"4",
"5",
"6",
],
show_choices=False,
)
return action
# Action 1: New Task
def input_create_new_task() -> dict:
title = Prompt.ask(
prompt="[1/5] Add Task Title",
)
description = Prompt.ask(
prompt="[2/5] Add Task Description",
show_default=True,
default="",
)
tag = Prompt.ask(
prompt="[3/5] Add a Tag",
show_default=True,
default="ETC",
)
while True:
due_date = Prompt.ask(
prompt="[4/5] Add a Due Date (YYYY-MM-DD)",
show_default=True,
default="",
)
if not due_date or check_due_date_format(date_str=due_date):
break
else:
console.print(
f":warning: '{due_date}' has [red]not[/] "
+ "the right format YYYY-MM-DD"
)
console.print(f"\t[1] {COLOR_DICT['Ready']}")
console.print(f"\t[2] {COLOR_DICT['Doing']}")
status = IntPrompt.ask(
prompt="[5/5] Status of Task",
show_choices=False,
choices=["1", "2"],
show_default=True,
default="1",
)
new_task = {
"Title": title,
"Description": description,
"Status": "Ready" if str(status) == "1" else "Doing",
"Tag": tag.upper(),
|
# Board
#####################################################################################
def create_table(data: dict) -> Table:
status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)
table_name = cfg.active_board
table = Table(
title=f"[blue]Active Board: {table_name}[/]",
highlight=True,
show_header=True,
show_footer=True if cfg.show_footer == "True" else False,
caption=BOARD_CAPTION_STRING,
)
for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):
table.add_column(
header=category + f"\t({len(status_dict[cfg.vis_cols[i]])} Task/s)",
header_style="bold",
justify="left",
overflow="fold",
footer=FOOTER[0]
if i == 0
else FOOTER[1]
if i == len(cfg.vis_cols) - 1
else "",
min_width=cfg.col_min_width,
)
for row_tasks in zip_longest(*status_dict.values()):
table.add_row(*row_tasks)
return table
# Board Action selection
def input_ask_for_action():
console.print(
"[yellow]Whats up!?[/], how can I help you being productive today :rocket:?"
)
console.print(
"\t[1] :clipboard: [green]Create new Task[/]"
+ 2 * "\t"
+ "[2] :clockwise_vertical_arrows: [bold cornflower_blue]Update/Check Task[/]"
)
console.print(
"\t[3] :bookmark_tabs: [bold yellow]Change Kanban Board[/]"
+ "\t"
+ "[4] :magnifying_glass_tilted_left: [bold blue]Show Task Details[/]"
)
console.print(
"\t[5] :cross_mark: [red]Delete Kanban Board[/]"
+ "\t"
+ "[6] :hammer_and_wrench: [grey69]Show Current Settings[/]"
)
action = IntPrompt.ask(
prompt="Choose wisely :books:",
choices=[
"1",
"2",
"3",
"4",
"5",
"6",
],
show_choices=False,
)
return action
# Action 1: New Task
def input_create_new_task() -> dict:
title = Prompt.ask(
prompt="[1/5] Add Task Title",
)
description = Prompt.ask(
prompt="[2/5] Add Task Description",
show_default=True,
default="",
)
tag = Prompt.ask(
prompt="[3/5] Add a Tag",
show_default=True,
default="ETC",
)
while True:
due_date = Prompt.ask(
prompt="[4/5] Add a Due Date (YYYY-MM-DD)",
show_default=True,
default="",
)
if not due_date or check_due_date_format(date_str=due_date):
break
else:
console.print(
f":warning: '{due_date}' has [red]not[/] "
+ "the right format YYYY-MM-DD"
)
console.print(f"\t[1] {COLOR_DICT['Ready']}")
console.print(f"\t[2] {COLOR_DICT['Doing']}")
status = IntPrompt.ask(
prompt="[5/5] Status of Task",
show_choices=False,
choices=["1", "2"],
show_default=True,
default="1",
)
new_task = {
"Title": title,
"Description": description,
"Status": "Ready" if str(status) == "1" else "Doing",
"Tag": tag.upper(), | "Creation_Date": current_time_to_str(), | 6 | 2023-11-11 14:43:55+00:00 | 4k |
AMAAI-Lab/mustango | audioldm/latent_diffusion/ddpm.py | [
{
"identifier": "exists",
"path": "audioldm/utils.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "audioldm/utils.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "count_params",
"path": "audioldm/utils.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "audioldm/utils.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "audioldm/latent_diffusion/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "make_beta_schedule",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t).contiguous()\n return out.reshape(b, *((1,) * (len(x_shape) - 1))).contiguous()"
},
{
"identifier": "noise_like",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
}
] | import sys
import os
import torch
import torch.nn as nn
import numpy as np
import soundfile as sf
import os
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from audioldm.utils import exists, default, count_params, instantiate_from_config
from audioldm.latent_diffusion.ema import LitEma
from audioldm.latent_diffusion.util import (
make_beta_schedule,
extract_into_tensor,
noise_like,
) | 3,317 | log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
self.state = None
# print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.latent_t_size = latent_t_size
self.latent_f_size = latent_f_size
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
# print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.register_schedule(
given_betas=given_betas,
beta_schedule=beta_schedule,
timesteps=timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
else:
self.logvar = nn.Parameter(self.logvar, requires_grad=False)
self.logger_save_dir = None
self.logger_project = None
self.logger_version = None
self.label_indices_total = None
# To avoid the system cannot find metric value for checkpoint
self.metrics_buffer = {
"val/kullback_leibler_divergence_sigmoid": 15.0,
"val/kullback_leibler_divergence_softmax": 10.0,
"val/psnr": 0.0,
"val/ssim": 0.0,
"val/inception_score_mean": 1.0,
"val/inception_score_std": 0.0,
"val/kernel_inception_distance_mean": 0.0,
"val/kernel_inception_distance_std": 0.0,
"val/frechet_inception_distance": 133.0,
"val/frechet_audio_distance": 32.0,
}
self.initial_learning_rate = None
def get_log_dir(self):
if (
self.logger_save_dir is None
and self.logger_project is None
and self.logger_version is None
):
return os.path.join(
self.logger.save_dir, self.logger._project, self.logger.version
)
else:
return os.path.join(
self.logger_save_dir, self.logger_project, self.logger_version
)
def set_log_dir(self, save_dir, project, version):
self.logger_save_dir = save_dir
self.logger_project = project
self.logger_version = version
def register_schedule(
self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
if exists(given_betas):
betas = given_betas
else:
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DiffusionWrapper(nn.Module):
def __init__(self, diff_model_config, conditioning_key):
super().__init__()
self.diffusion_model = instantiate_from_config(diff_model_config)
self.conditioning_key = conditioning_key
assert self.conditioning_key in [
None,
"concat",
"crossattn",
"hybrid",
"adm",
"film",
]
def forward(
self, x, t, c_concat: list = None, c_crossattn: list = None, c_film: list = None
):
x = x.contiguous()
t = t.contiguous()
if self.conditioning_key is None:
out = self.diffusion_model(x, t)
elif self.conditioning_key == "concat":
xc = torch.cat([x] + c_concat, dim=1)
out = self.diffusion_model(xc, t)
elif self.conditioning_key == "crossattn":
cc = torch.cat(c_crossattn, 1)
out = self.diffusion_model(x, t, context=cc)
elif self.conditioning_key == "hybrid":
xc = torch.cat([x] + c_concat, dim=1)
cc = torch.cat(c_crossattn, 1)
out = self.diffusion_model(xc, t, context=cc)
elif (
self.conditioning_key == "film"
): # The condition is assumed to be a global token, which wil pass through a linear layer and added with the time embedding for the FILM
cc = c_film[0].squeeze(1) # only has one token
out = self.diffusion_model(x, t, y=cc)
elif self.conditioning_key == "adm":
cc = c_crossattn[0]
out = self.diffusion_model(x, t, y=cc)
else:
raise NotImplementedError()
return out
class DDPM(nn.Module):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
latent_t_size=256,
latent_f_size=16,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
self.state = None
# print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.latent_t_size = latent_t_size
self.latent_f_size = latent_f_size
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
# print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.register_schedule(
given_betas=given_betas,
beta_schedule=beta_schedule,
timesteps=timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
else:
self.logvar = nn.Parameter(self.logvar, requires_grad=False)
self.logger_save_dir = None
self.logger_project = None
self.logger_version = None
self.label_indices_total = None
# To avoid the system cannot find metric value for checkpoint
self.metrics_buffer = {
"val/kullback_leibler_divergence_sigmoid": 15.0,
"val/kullback_leibler_divergence_softmax": 10.0,
"val/psnr": 0.0,
"val/ssim": 0.0,
"val/inception_score_mean": 1.0,
"val/inception_score_std": 0.0,
"val/kernel_inception_distance_mean": 0.0,
"val/kernel_inception_distance_std": 0.0,
"val/frechet_inception_distance": 133.0,
"val/frechet_audio_distance": 32.0,
}
self.initial_learning_rate = None
def get_log_dir(self):
if (
self.logger_save_dir is None
and self.logger_project is None
and self.logger_version is None
):
return os.path.join(
self.logger.save_dir, self.logger._project, self.logger.version
)
else:
return os.path.join(
self.logger_save_dir, self.logger_project, self.logger_version
)
def set_log_dir(self, save_dir, project, version):
self.logger_save_dir = save_dir
self.logger_project = project
self.logger_version = version
def register_schedule(
self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
if exists(given_betas):
betas = given_betas
else: | betas = make_beta_schedule( | 5 | 2023-11-14 23:29:31+00:00 | 4k |
lxmusics/lx-music-api-server-python | main.py | [
{
"identifier": "config",
"path": "common/config.py",
"snippet": "def get_data_connection():\ndef get_cache_connection():\ndef handle_default_config():\ndef load_data():\ndef save_data(config_data):\ndef getCache(module, key):\ndef updateCache(module, key, data):\ndef resetRequestTime(ip):\ndef updateRequestTime(ip):\ndef getRequestTime(ip):\ndef read_data(key):\ndef write_data(key, value):\ndef push_to_list(key, obj):\ndef write_config(key, value):\ndef read_default_config(key):\ndef _read_config(key):\ndef read_config(key):\ndef write_data(key, value):\ndef initConfig():\ndef ban_ip(ip_addr, ban_time=-1):\ndef check_ip_banned(ip_addr):\nclass ConfigReadException(Exception):\nclass ConfigReadException(Exception):"
},
{
"identifier": "lxsecurity",
"path": "common/lxsecurity.py",
"snippet": "def checklxmheader(lxm, url):"
},
{
"identifier": "log",
"path": "common/log.py",
"snippet": "def log(self, message, allow_hidden=True):\n if self.module_name == \"flask\" and \"\\n\" in message:\n if message.startswith(\"Error\"):\n return self._logger.error(message)\n for m in message.split(\"\\n\"):\n if \"WARNING\" in m:\n self._logger.warning(m)\n else:\n self._logger.info(m)\n return\n if len(str(message)) > log_length_limit and allow_hidden:\n message = str(message)[:log_length_limit] + \" ...\"\n self._logger.info(message)"
},
{
"identifier": "Httpx",
"path": "common/Httpx.py",
"snippet": "def is_valid_utf8(text):\ndef is_plain_text(text):\ndef convert_dict_to_form_string(dic):\ndef log_plaintext(text):\ndef request(url, options = {}):\n def _json():\ndef checkcn():\n def __init__(self, status, content, headers):\n def json(self):\nasync def convert_to_requests_response(aiohttp_response):\nasync def AsyncRequest(url, options = {}):\nclass ClientResponse:"
},
{
"identifier": "variable",
"path": "common/variable.py",
"snippet": "def _read_config_file():\ndef _read_config(key):"
},
{
"identifier": "scheduler",
"path": "common/scheduler.py",
"snippet": "class taskWrapper:\n def __init__(self, name, function, interval = 86400, args = {}, latest_execute = 0):\n def check_available(self):\n async def run(self):\ndef append(name, task, interval = 86400, args = {}):\nasync def thread_runner():\nasync def run():"
},
{
"identifier": "lx_script",
"path": "common/lx_script.py",
"snippet": "async def get_response(retry = 0):\nasync def get_script():\nasync def generate_script_response(request):"
}
] | import sys
import ujson as json
import threading
import traceback
import modules
import asyncio
import aiohttp
import time
import concurrent
from common import config
from common import lxsecurity
from common import log
from common import Httpx
from common import variable
from common import scheduler
from common import lx_script
from aiohttp.web import Response | 2,009 |
if ((sys.version_info.major == 3 and sys.version_info.minor < 6) or sys.version_info.major == 2):
print('Python版本过低,请使用Python 3.6+ ')
sys.exit(1)
def handleResult(dic, status = 200):
return Response(body = json.dumps(dic, indent=2, ensure_ascii=False), content_type='application/json', status = status)
logger = log.log("main")
aiologger = log.log('aiohttp_web')
stopEvent = None
if (sys.version_info.minor < 8 and sys.version_info.major == 3):
logger.warning('您使用的Python版本已经停止更新,不建议继续使用')
stopEvent = concurrent.futures._base.CancelledError
else:
stopEvent = asyncio.exceptions.CancelledError
def start_checkcn_thread():
threading.Thread(target=Httpx.checkcn).start()
# check request info before start
async def handle_before_request(app, handler):
async def handle_request(request):
try:
# nginx proxy header
if (request.headers.get("X-Real-IP")):
request.remote_addr = request.headers.get("X-Real-IP")
else:
request.remote_addr = request.remote
# check ip
if (config.check_ip_banned(request.remote_addr)):
return handleResult({"code": 1, "msg": "您的IP已被封禁", "data": None}, 403)
# check global rate limit
if (
(time.time() - config.getRequestTime('global'))
<
(config.read_config("security.rate_limit.global"))
):
return handleResult({"code": 5, "msg": "全局限速", "data": None}, 429)
if (
(time.time() - config.getRequestTime(request.remote_addr))
<
(config.read_config("security.rate_limit.ip"))
):
return handleResult({"code": 5, "msg": "IP限速", "data": None}, 429)
# update request time
config.updateRequestTime('global')
config.updateRequestTime(request.remote_addr)
# check host
if (config.read_config("security.allowed_host.enable")):
if request.host.split(":")[0] not in config.read_config("security.allowed_host.list"):
if config.read_config("security.allowed_host.blacklist.enable"):
config.ban_ip(request.remote_addr, int(config.read_config("security.allowed_host.blacklist.length")))
return handleResult({'code': 6, 'msg': '未找到您所请求的资源', 'data': None}, 404)
resp = await handler(request)
if (isinstance(resp, str)):
resp = Response(body = resp, content_type='text/plain', status = 200)
elif (isinstance(resp, dict)):
resp = handleResult(resp)
elif (not isinstance(resp, Response)):
resp = Response(body = str(resp), content_type='text/plain', status = 200)
aiologger.info(f'{request.remote_addr} - {request.method} "{request.path}", {resp.status}')
return resp
except:
logger.error(traceback.format_exc())
return {"code": 4, "msg": "内部服务器错误", "data": None}
return handle_request
async def main(request):
return handleResult({"code": 0, "msg": "success", "data": None})
async def handle(request):
method = request.match_info.get('method')
source = request.match_info.get('source')
songId = request.match_info.get('songId')
quality = request.match_info.get('quality')
if (config.read_config("security.key.enable") and request.host.split(':')[0] not in config.read_config('security.whitelist_host')):
if (request.headers.get("X-Request-Key")) != config.read_config("security.key.value"):
if (config.read_config("security.key.ban")):
config.ban_ip(request.remote_addr)
return handleResult({"code": 1, "msg": "key验证失败", "data": None}, 403)
if (config.read_config('security.check_lxm.enable') and request.host.split(':')[0] not in config.read_config('security.whitelist_host')):
lxm = request.headers.get('lxm')
if (not lxsecurity.checklxmheader(lxm, request.url)):
if (config.read_config('security.lxm_ban.enable')):
config.ban_ip(request.remote_addr)
return handleResult({"code": 1, "msg": "lxm请求头验证失败", "data": None}, 403)
try:
query = dict(request.query)
if (method in dir(modules) and query == {}):
return handleResult(await getattr(modules, method)(source, songId, quality))
elif ((method + '_with_query') in dir(modules) and query != {}):
return handleResult(await getattr(modules, method + '_with_query')(source, songId, quality, query))
else:
if (query == {}):
return handleResult(await modules.other(method, source, songId, quality))
else:
return handleResult(await modules.other_with_query(method, source, songId, quality, query))
except:
logger.error(traceback.format_exc())
return handleResult({'code': 4, 'msg': '内部服务器错误', 'data': None}, 500)
async def handle_404(request):
return handleResult({'code': 6, 'msg': '未找到您所请求的资源', 'data': None}, 404)
app = aiohttp.web.Application(middlewares=[handle_before_request])
# mainpage
app.router.add_get('/', main)
# api
app.router.add_get('/{method}/{source}/{songId}/{quality}', handle)
app.router.add_get('/{method}/{source}/{songId}', handle)
if (config.read_config('common.allow_download_script')):
| #!/usr/bin/env python3
# ----------------------------------------
# - mode: python -
# - author: helloplhm-qwq -
# - name: main.py -
# - project: lx-music-api-server -
# - license: MIT -
# ----------------------------------------
# This file is part of the "lx-music-api-server" project.
if ((sys.version_info.major == 3 and sys.version_info.minor < 6) or sys.version_info.major == 2):
print('Python版本过低,请使用Python 3.6+ ')
sys.exit(1)
def handleResult(dic, status = 200):
return Response(body = json.dumps(dic, indent=2, ensure_ascii=False), content_type='application/json', status = status)
logger = log.log("main")
aiologger = log.log('aiohttp_web')
stopEvent = None
if (sys.version_info.minor < 8 and sys.version_info.major == 3):
logger.warning('您使用的Python版本已经停止更新,不建议继续使用')
stopEvent = concurrent.futures._base.CancelledError
else:
stopEvent = asyncio.exceptions.CancelledError
def start_checkcn_thread():
threading.Thread(target=Httpx.checkcn).start()
# check request info before start
async def handle_before_request(app, handler):
async def handle_request(request):
try:
# nginx proxy header
if (request.headers.get("X-Real-IP")):
request.remote_addr = request.headers.get("X-Real-IP")
else:
request.remote_addr = request.remote
# check ip
if (config.check_ip_banned(request.remote_addr)):
return handleResult({"code": 1, "msg": "您的IP已被封禁", "data": None}, 403)
# check global rate limit
if (
(time.time() - config.getRequestTime('global'))
<
(config.read_config("security.rate_limit.global"))
):
return handleResult({"code": 5, "msg": "全局限速", "data": None}, 429)
if (
(time.time() - config.getRequestTime(request.remote_addr))
<
(config.read_config("security.rate_limit.ip"))
):
return handleResult({"code": 5, "msg": "IP限速", "data": None}, 429)
# update request time
config.updateRequestTime('global')
config.updateRequestTime(request.remote_addr)
# check host
if (config.read_config("security.allowed_host.enable")):
if request.host.split(":")[0] not in config.read_config("security.allowed_host.list"):
if config.read_config("security.allowed_host.blacklist.enable"):
config.ban_ip(request.remote_addr, int(config.read_config("security.allowed_host.blacklist.length")))
return handleResult({'code': 6, 'msg': '未找到您所请求的资源', 'data': None}, 404)
resp = await handler(request)
if (isinstance(resp, str)):
resp = Response(body = resp, content_type='text/plain', status = 200)
elif (isinstance(resp, dict)):
resp = handleResult(resp)
elif (not isinstance(resp, Response)):
resp = Response(body = str(resp), content_type='text/plain', status = 200)
aiologger.info(f'{request.remote_addr} - {request.method} "{request.path}", {resp.status}')
return resp
except:
logger.error(traceback.format_exc())
return {"code": 4, "msg": "内部服务器错误", "data": None}
return handle_request
async def main(request):
return handleResult({"code": 0, "msg": "success", "data": None})
async def handle(request):
method = request.match_info.get('method')
source = request.match_info.get('source')
songId = request.match_info.get('songId')
quality = request.match_info.get('quality')
if (config.read_config("security.key.enable") and request.host.split(':')[0] not in config.read_config('security.whitelist_host')):
if (request.headers.get("X-Request-Key")) != config.read_config("security.key.value"):
if (config.read_config("security.key.ban")):
config.ban_ip(request.remote_addr)
return handleResult({"code": 1, "msg": "key验证失败", "data": None}, 403)
if (config.read_config('security.check_lxm.enable') and request.host.split(':')[0] not in config.read_config('security.whitelist_host')):
lxm = request.headers.get('lxm')
if (not lxsecurity.checklxmheader(lxm, request.url)):
if (config.read_config('security.lxm_ban.enable')):
config.ban_ip(request.remote_addr)
return handleResult({"code": 1, "msg": "lxm请求头验证失败", "data": None}, 403)
try:
query = dict(request.query)
if (method in dir(modules) and query == {}):
return handleResult(await getattr(modules, method)(source, songId, quality))
elif ((method + '_with_query') in dir(modules) and query != {}):
return handleResult(await getattr(modules, method + '_with_query')(source, songId, quality, query))
else:
if (query == {}):
return handleResult(await modules.other(method, source, songId, quality))
else:
return handleResult(await modules.other_with_query(method, source, songId, quality, query))
except:
logger.error(traceback.format_exc())
return handleResult({'code': 4, 'msg': '内部服务器错误', 'data': None}, 500)
async def handle_404(request):
return handleResult({'code': 6, 'msg': '未找到您所请求的资源', 'data': None}, 404)
app = aiohttp.web.Application(middlewares=[handle_before_request])
# mainpage
app.router.add_get('/', main)
# api
app.router.add_get('/{method}/{source}/{songId}/{quality}', handle)
app.router.add_get('/{method}/{source}/{songId}', handle)
if (config.read_config('common.allow_download_script')): | app.router.add_get('/script', lx_script.generate_script_response) | 6 | 2023-11-10 13:16:30+00:00 | 4k |
ai-forever/Kandinsky-3 | kandinsky3/model/unet.py | [
{
"identifier": "Identity",
"path": "kandinsky3/model/nn.py",
"snippet": "class Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n @staticmethod\n def forward(x, *args, **kwargs):\n return x"
},
{
"identifier": "Attention",
"path": "kandinsky3/model/nn.py",
"snippet": "class Attention(nn.Module):\n\n def __init__(self, in_channels, out_channels, context_dim, head_dim=64):\n super().__init__()\n assert out_channels % head_dim == 0\n self.num_heads = out_channels // head_dim\n self.scale = head_dim ** -0.5\n\n self.to_query = nn.Linear(in_channels, out_channels, bias=False)\n self.to_key = nn.Linear(context_dim, out_channels, bias=False)\n self.to_value = nn.Linear(context_dim, out_channels, bias=False)\n\n self.output_layer = nn.Linear(out_channels, out_channels, bias=False)\n\n def forward(self, x, context, context_mask=None):\n query = rearrange(self.to_query(x), 'b n (h d) -> b h n d', h=self.num_heads)\n key = rearrange(self.to_key(context), 'b n (h d) -> b h n d', h=self.num_heads)\n value = rearrange(self.to_value(context), 'b n (h d) -> b h n d', h=self.num_heads)\n\n attention_matrix = einsum('b h i d, b h j d -> b h i j', query, key) * self.scale\n if exist(context_mask):\n max_neg_value = -torch.finfo(attention_matrix.dtype).max\n context_mask = rearrange(context_mask, 'b j -> b 1 1 j')\n attention_matrix = attention_matrix.masked_fill(~context_mask, max_neg_value)\n attention_matrix = attention_matrix.softmax(dim=-1)\n\n out = einsum('b h i j, b h j d -> b h i d', attention_matrix, value)\n out = rearrange(out, 'b h n d -> b n (h d)')\n out = self.output_layer(out)\n return out"
},
{
"identifier": "SinusoidalPosEmb",
"path": "kandinsky3/model/nn.py",
"snippet": "class SinusoidalPosEmb(nn.Module):\n\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim=-1)"
},
{
"identifier": "ConditionalGroupNorm",
"path": "kandinsky3/model/nn.py",
"snippet": "class ConditionalGroupNorm(nn.Module):\n\n def __init__(self, groups, normalized_shape, context_dim):\n super().__init__()\n self.norm = nn.GroupNorm(groups, normalized_shape, affine=False)\n self.context_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(context_dim, 2 * normalized_shape)\n )\n self.context_mlp[1].weight.data.zero_()\n self.context_mlp[1].bias.data.zero_()\n\n def forward(self, x, context):\n context = self.context_mlp(context)\n ndims = ' 1' * len(x.shape[2:])\n context = rearrange(context, f'b c -> b c{ndims}')\n\n scale, shift = context.chunk(2, dim=1)\n x = self.norm(x) * (scale + 1.) + shift\n return x"
},
{
"identifier": "exist",
"path": "kandinsky3/model/utils.py",
"snippet": "def exist(item):\n return item is not None"
},
{
"identifier": "set_default_item",
"path": "kandinsky3/model/utils.py",
"snippet": "def set_default_item(condition, item_1, item_2=None):\n if condition:\n return item_1\n else:\n return item_2"
},
{
"identifier": "set_default_layer",
"path": "kandinsky3/model/utils.py",
"snippet": "def set_default_layer(condition, layer_1, args_1=[], kwargs_1={}, layer_2=Identity, args_2=[], kwargs_2={}):\n if condition:\n return layer_1(*args_1, **kwargs_1)\n else:\n return layer_2(*args_2, **kwargs_2)"
}
] | import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from .nn import Identity, Attention, SinusoidalPosEmb, ConditionalGroupNorm
from .utils import exist, set_default_item, set_default_layer | 1,757 |
class Block(nn.Module):
def __init__(self, in_channels, out_channels, time_embed_dim, kernel_size=3, norm_groups=32, up_resolution=None):
super().__init__()
self.group_norm = ConditionalGroupNorm(norm_groups, in_channels, time_embed_dim)
self.activation = nn.SiLU()
self.up_sample = set_default_layer(
exist(up_resolution) and up_resolution,
nn.ConvTranspose2d, (in_channels, in_channels), {'kernel_size': 2, 'stride': 2}
)
padding = set_default_item(kernel_size == 1, 0, 1)
self.projection = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding)
self.down_sample = set_default_layer(
exist(up_resolution) and not up_resolution,
nn.Conv2d, (out_channels, out_channels), {'kernel_size': 2, 'stride': 2}
)
def forward(self, x, time_embed):
x = self.group_norm(x, time_embed)
x = self.activation(x)
x = self.up_sample(x)
x = self.projection(x)
x = self.down_sample(x)
return x
class ResNetBlock(nn.Module):
def __init__(
self, in_channels, out_channels, time_embed_dim, norm_groups=32, compression_ratio=2, up_resolutions=4*[None]
):
super().__init__()
kernel_sizes = [1, 3, 3, 1]
hidden_channel = max(in_channels, out_channels) // compression_ratio
hidden_channels = [(in_channels, hidden_channel)] + [(hidden_channel, hidden_channel)] * 2 + [(hidden_channel, out_channels)]
self.resnet_blocks = nn.ModuleList([
Block(in_channel, out_channel, time_embed_dim, kernel_size, norm_groups, up_resolution)
for (in_channel, out_channel), kernel_size, up_resolution in zip(hidden_channels, kernel_sizes, up_resolutions)
])
self.shortcut_up_sample = set_default_layer(
True in up_resolutions,
nn.ConvTranspose2d, (in_channels, in_channels), {'kernel_size': 2, 'stride': 2}
)
self.shortcut_projection = set_default_layer(
in_channels != out_channels,
nn.Conv2d, (in_channels, out_channels), {'kernel_size': 1}
)
self.shortcut_down_sample = set_default_layer(
False in up_resolutions,
nn.Conv2d, (out_channels, out_channels), {'kernel_size': 2, 'stride': 2}
)
def forward(self, x, time_embed):
out = x
for resnet_block in self.resnet_blocks:
out = resnet_block(out, time_embed)
x = self.shortcut_up_sample(x)
x = self.shortcut_projection(x)
x = self.shortcut_down_sample(x)
x = x + out
return x
class AttentionPolling(nn.Module):
def __init__(self, num_channels, context_dim, head_dim=64):
super().__init__()
|
class Block(nn.Module):
def __init__(self, in_channels, out_channels, time_embed_dim, kernel_size=3, norm_groups=32, up_resolution=None):
super().__init__()
self.group_norm = ConditionalGroupNorm(norm_groups, in_channels, time_embed_dim)
self.activation = nn.SiLU()
self.up_sample = set_default_layer(
exist(up_resolution) and up_resolution,
nn.ConvTranspose2d, (in_channels, in_channels), {'kernel_size': 2, 'stride': 2}
)
padding = set_default_item(kernel_size == 1, 0, 1)
self.projection = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding)
self.down_sample = set_default_layer(
exist(up_resolution) and not up_resolution,
nn.Conv2d, (out_channels, out_channels), {'kernel_size': 2, 'stride': 2}
)
def forward(self, x, time_embed):
x = self.group_norm(x, time_embed)
x = self.activation(x)
x = self.up_sample(x)
x = self.projection(x)
x = self.down_sample(x)
return x
class ResNetBlock(nn.Module):
def __init__(
self, in_channels, out_channels, time_embed_dim, norm_groups=32, compression_ratio=2, up_resolutions=4*[None]
):
super().__init__()
kernel_sizes = [1, 3, 3, 1]
hidden_channel = max(in_channels, out_channels) // compression_ratio
hidden_channels = [(in_channels, hidden_channel)] + [(hidden_channel, hidden_channel)] * 2 + [(hidden_channel, out_channels)]
self.resnet_blocks = nn.ModuleList([
Block(in_channel, out_channel, time_embed_dim, kernel_size, norm_groups, up_resolution)
for (in_channel, out_channel), kernel_size, up_resolution in zip(hidden_channels, kernel_sizes, up_resolutions)
])
self.shortcut_up_sample = set_default_layer(
True in up_resolutions,
nn.ConvTranspose2d, (in_channels, in_channels), {'kernel_size': 2, 'stride': 2}
)
self.shortcut_projection = set_default_layer(
in_channels != out_channels,
nn.Conv2d, (in_channels, out_channels), {'kernel_size': 1}
)
self.shortcut_down_sample = set_default_layer(
False in up_resolutions,
nn.Conv2d, (out_channels, out_channels), {'kernel_size': 2, 'stride': 2}
)
def forward(self, x, time_embed):
out = x
for resnet_block in self.resnet_blocks:
out = resnet_block(out, time_embed)
x = self.shortcut_up_sample(x)
x = self.shortcut_projection(x)
x = self.shortcut_down_sample(x)
x = x + out
return x
class AttentionPolling(nn.Module):
def __init__(self, num_channels, context_dim, head_dim=64):
super().__init__() | self.attention = Attention(context_dim, num_channels, context_dim, head_dim) | 1 | 2023-11-13 10:16:04+00:00 | 4k |
spfrommer/torchexplorer | torchexplorer/render/layout.py | [
{
"identifier": "utils",
"path": "torchexplorer/utils.py",
"snippet": "def iter_not_none(iterable: Iterable[Any]) -> Iterator[Any]:\ndef enum_not_none(iterable: Iterable[Any]) -> Iterator[tuple[int, Any]]:\ndef interleave(l1: list[Any], l2: list[Any]) -> list[Any]:\ndef list_add(l1: list[float], l2: list[float]) -> list[float]:"
},
{
"identifier": "core",
"path": "torchexplorer/core.py",
"snippet": "class SizeTracker:\nclass ModuleInvocationHistograms:\nclass ModuleSharedHistograms:\nclass ExplorerMetadata:\nclass ModuleInvocationStructure():\nclass DummyAttachModule(nn.Module):\n def __init__(\n self,\n module: nn.Module,\n invocation_id: InvocationId,\n structure_id: int,\n input_n: int,\n output_n: int\n ):\n def module_metadata(self) -> ExplorerMetadata:\n def get_inner_structure(\n self, module: nn.Module, invocation_id: int\n ) -> Optional['ModuleInvocationStructure']:\n def get_inner_structure_from_memory_id(\n self, memory_id: int\n ) -> Optional['ModuleInvocationStructure']:\n def get_inner_structure_from_id(\n self, structure_id: int\n ) -> Optional['ModuleInvocationStructure']:\n def _inner_filter(self, test_fn: Callable) -> Optional['ModuleInvocationStructure']:\n def str_impl(self) -> str:\n def __init__(self):\n def forward(self, x):"
},
{
"identifier": "Tooltip",
"path": "torchexplorer/components/tooltip.py",
"snippet": "class Tooltip:\n \"\"\"The tooltip that pops up next to a Module.\"\"\"\n\n def __init__(self, title: str, keys: list[str], vals: list[str]):\n self.title = title\n self.keys = keys\n self.vals = vals\n \n @classmethod\n def create_io(cls, tracker: SizeTracker) -> 'Tooltip':\n name = tracker.type.split('.')[-1]\n keys, vals = ['size'], [str(tracker.size).replace('None', dash)]\n return Tooltip(name, keys, vals)\n \n @classmethod\n def create_moduleinvocation(\n cls, module: Module, parent_module: Module, invocation_id: InvocationId\n ) -> 'Tooltip':\n\n name_in_parent = cls._get_name_in_parent(module, parent_module)\n\n io_shape_keys, io_shape_vals = cls._get_io_shape_keyvals(module, invocation_id)\n extra_repr_keys, extra_repr_vals = cls._get_extra_repr_keyvals(module)\n\n keys = io_shape_keys + extra_repr_keys\n vals = io_shape_vals + extra_repr_vals\n\n assert len(keys) == len(vals)\n\n return Tooltip(name_in_parent, keys, vals)\n \n @classmethod\n def create_attach(cls, module: Module) -> 'Tooltip':\n return cls.create_io(module.torchexplorer_metadata.input_sizes[0][0])\n \n @classmethod\n def _get_name_in_parent(cls, module: Module, parent_module: Module) -> str:\n name_in_parent = ''\n for name, m in parent_module.named_children():\n if m == module:\n name_in_parent = name\n break\n \n if isinstance(m, ModuleList):\n for i, mm in enumerate(m):\n if mm == module:\n name_in_parent = f'{name}[{i}]'\n break\n \n if isinstance(m, ModuleDict):\n for k, mm in m.items():\n if mm == module:\n name_in_parent = f'{name}[{k}]'\n break\n \n return name_in_parent\n\n @classmethod\n def _get_io_shape_keyvals(\n cls, module: Module, invocation_id: InvocationId\n ) -> tuple[list[str], list[str]]:\n\n metadata = module.torchexplorer_metadata \n\n keys, vals = [], []\n\n one_input = len(metadata.input_sizes[invocation_id]) == 1\n for i, input_tracker in enumerate(metadata.input_sizes[invocation_id]):\n keys.append('in_size' if one_input else f'in{i}_size')\n vals.append(str(input_tracker.size).replace('None', dash))\n \n one_output = len(metadata.output_sizes[invocation_id]) == 1\n for i, output_tracker in enumerate(metadata.output_sizes[invocation_id]):\n keys.append('out_size' if one_output else f'out{i}_size')\n vals.append(str(output_tracker.size).replace('None', dash))\n\n return keys, vals\n \n @classmethod\n def _get_extra_repr_keyvals(cls, module: Module) -> tuple[list[str], list[str]]:\n try:\n keys, vals = [], []\n extra_rep = module.extra_repr()\n pairs = re.split(r',\\s*(?![^()]*\\))(?![^[]]*\\])', extra_rep)\n for pair in pairs:\n if pair == '':\n continue\n k, v = pair.split('=') if ('=' in pair) else (dash, pair)\n keys.append(k.strip())\n vals.append(v.strip())\n except Exception:\n keys, vals = [], []\n \n return keys, vals"
},
{
"identifier": "ModuleInvocationHistograms",
"path": "torchexplorer/core.py",
"snippet": "class ModuleInvocationHistograms:\n \"\"\"The histograms associated to a particular InvocationId on a module.\"\"\"\n input_hists: list[IncrementalHistogram] = field(default_factory=lambda: [])\n output_hists: list[IncrementalHistogram] = field(default_factory=lambda: [])"
},
{
"identifier": "ModuleInvocationStructure",
"path": "torchexplorer/core.py",
"snippet": "class ModuleInvocationStructure():\n \"\"\"The parsed structure of a module invocation.\n\n There can be multiple of these for a particular module if that module's forward\n method is invoked multiple times on the forwards pass of a parent.\"\"\"\n\n def __init__(\n self,\n module: nn.Module,\n invocation_id: InvocationId,\n structure_id: int,\n input_n: int,\n output_n: int\n ):\n\n self.module = module\n self.invocation_id = invocation_id\n # A unique id for this structure, to enable caching of graphviz calls\n self.structure_id = structure_id\n\n # Nodes are either 'Input x'/'Output x' strings or ModuleInvocationStructures\n self.inner_graph = nx.DiGraph()\n\n for i in range(input_n):\n name = f'Input {i}'\n self.inner_graph.add_node(name, memory_id=None, label=name)\n \n for i in range(output_n):\n name = f'Output {i}'\n self.inner_graph.add_node(name, memory_id=None, label=name)\n\n self.upstreams_fetched = False\n\n self.graphviz_json_cache: Optional[dict] = None\n\n def module_metadata(self) -> ExplorerMetadata:\n return self.module.torchexplorer_metadata\n\n def get_inner_structure(\n self, module: nn.Module, invocation_id: int\n ) -> Optional['ModuleInvocationStructure']:\n\n return self._inner_filter(\n lambda node: node.module == module and node.invocation_id == invocation_id\n )\n\n def get_inner_structure_from_memory_id(\n self, memory_id: int\n ) -> Optional['ModuleInvocationStructure']:\n\n return self._inner_filter(lambda node: id(node) == memory_id)\n\n def get_inner_structure_from_id(\n self, structure_id: int\n ) -> Optional['ModuleInvocationStructure']:\n\n return self._inner_filter(lambda node: node.structure_id == structure_id)\n\n def _inner_filter(self, test_fn: Callable) -> Optional['ModuleInvocationStructure']:\n for node in self.inner_graph.nodes:\n if isinstance(node, ModuleInvocationStructure):\n if test_fn(node):\n return node\n\n return None\n\n # NOTE: Overriding __str__ breaks the graphviz rendering...\n def str_impl(self) -> str:\n return f'{self.module.__class__.__name__}, Invocation {self.invocation_id}'"
},
{
"identifier": "is_input_node",
"path": "torchexplorer/structure/structure.py",
"snippet": "def is_input_node(node) -> bool:\n if not isinstance(node, str):\n return False\n return bool(re.match(r'Input \\d+', node)) or (node=='Input')"
},
{
"identifier": "is_io_node",
"path": "torchexplorer/structure/structure.py",
"snippet": "def is_io_node(node) -> bool:\n return is_input_node(node) or is_output_node(node)"
},
{
"identifier": "EdgeLayout",
"path": "torchexplorer/render/structs.py",
"snippet": "class EdgeLayout:\n path_points: list[list[float]]\n arrowhead_points: list[list[float]]\n downstream_input_index: Optional[int]\n upstream_output_index: Optional[int]"
},
{
"identifier": "TooltipLayout",
"path": "torchexplorer/render/structs.py",
"snippet": "class TooltipLayout:\n tooltip: Tooltip\n\n # Coordinates in parent of the layout this tooltip belongs to\n bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0]) \n top_right_corner: list[float] = field(default_factory=lambda: [0, 0])"
},
{
"identifier": "NodeLayout",
"path": "torchexplorer/render/structs.py",
"snippet": "class NodeLayout:\n display_name: Optional[str] = None\n tooltip: Optional[TooltipLayout] = None\n\n invocation_hists: Optional[ModuleInvocationHistograms] = None\n invocation_grad_hists: Optional[ModuleInvocationHistograms] = None\n shared_hists: Optional[ModuleSharedHistograms] = None\n\n # Coordinates in parent layout\n bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0]) \n top_right_corner: list[float] = field(default_factory=lambda: [0, 0]) \n\n # Inner graph data\n inner_graph_layouts: list['NodeLayout'] = (\n field(default_factory=lambda: [])\n )\n inner_graph_edges: list[EdgeLayout] = field(default_factory=lambda: [])\n\n # Data added in the _process_graph function, after everything has been layed out\n # These ids are not related to the structure_id of the ModuleInvocationStructure\n id: Optional[int] = None\n parent_id: Optional[int] = None\n # Parent stack includes current layout (this goes into the parents view in vega)\n parent_stack: Optional[list[tuple[str, int]]] = None\n child_ids: Optional[list[int]] = None"
}
] | import copy
import html
import json
import string
import numpy as np
import networkx as nx
from typing import Optional, Union
from subprocess import Popen, PIPE
from torchexplorer import utils
from torchexplorer import core
from torchexplorer.components.tooltip import Tooltip
from torchexplorer.core import ModuleInvocationHistograms, ModuleInvocationStructure
from torchexplorer.structure.structure import is_input_node, is_io_node
from torchexplorer.render.structs import (
EdgeLayout, TooltipLayout, NodeLayout
) | 2,536 | from __future__ import annotations
def layout(
structure: ModuleInvocationStructure, cache: Optional[dict] = None
| from __future__ import annotations
def layout(
structure: ModuleInvocationStructure, cache: Optional[dict] = None | ) -> tuple[NodeLayout, dict]: | 9 | 2023-11-13 05:56:04+00:00 | 4k |
namin/llm-verified-with-monte-carlo-tree-search | run_ppo_block.py | [
{
"identifier": "Node",
"path": "montecarlo/node.py",
"snippet": "class Node:\n def __init__(self, state):\n self.state = state\n self.win_value = 0\n self.policy_value = None\n self.visits = 0\n self.parent = None\n self.children = []\n self.expanded = False\n self.player_number = None\n self.discovery_factor = 0.35\n\n def update_win_value(self, value):\n self.win_value += value\n self.visits += 1\n\n if self.parent:\n self.parent.update_win_value(value)\n\n def update_policy_value(self, value):\n self.policy_value = value\n\n def add_child(self, child):\n self.children.append(child)\n child.parent = self\n\n def add_children(self, children):\n for child in children:\n self.add_child(child)\n\n def get_preferred_child(self, root_node):\n best_children = []\n best_score = float(\"-inf\")\n\n for child in self.children:\n score = child.get_score(root_node)\n\n if score > best_score:\n best_score = score\n best_children = [child]\n elif score == best_score:\n best_children.append(child)\n\n return random.choice(best_children)\n\n def get_score(self, root_node):\n discovery_operand = (\n self.discovery_factor\n * (self.policy_value or 1)\n * sqrt(log(self.parent.visits) / (self.visits or 1))\n )\n\n win_multiplier = (\n 1 if self.parent.player_number == root_node.player_number else -1\n )\n win_operand = win_multiplier * self.win_value / (self.visits or 1)\n\n self.score = win_operand + discovery_operand\n\n return self.score\n\n def is_scorable(self):\n return self.visits or self.policy_value != None\n \n def print_node(self, f, i, root, st):\n escape = lambda x : json.dumps(x).strip('\"')\n if self.parent is None:\n f.write((' ' * i) + st + \" [label=\\\"\" + escape(self.state) + \"\\\",shape=box]\\n\")\n else:\n diff = '\\n'.join([x for x in self.state.split(\"\\n\") if x not in self.parent.state.split(\"\\n\")])\n f.write((' ' * i) + st + \" [label=\\\"\" + escape(diff) + \"\\\",shape=box]\\n\")\n\n num = 0\n for child in self.children:\n new_st = st + \"_\" + str(num)\n child.print_node(f, i + 2, root, new_st)\n f.write(' ' * i + st + \" -- \" + new_st + \"\\n\")\n num = num + 1"
},
{
"identifier": "MonteCarlo",
"path": "montecarlo/montecarlo.py",
"snippet": "class MonteCarlo:\n def __init__(self, root_node):\n self.root_node = root_node\n self.solution = None\n self.child_finder = None\n self.node_evaluator = lambda child, montecarlo: None\n self.stats_expansion_count = 0\n self.stats_failed_expansion_count = 0\n\n def make_choice(self):\n best_children = []\n most_visits = float(\"-inf\")\n\n for child in self.root_node.children:\n if child.visits > most_visits:\n most_visits = child.visits\n best_children = [child]\n elif child.visits == most_visits:\n best_children.append(child)\n\n return random.choice(best_children)\n\n def make_exploratory_choice(self):\n children_visits = map(lambda child: child.visits, self.root_node.children)\n children_visit_probabilities = [\n visit / self.root_node.visits for visit in children_visits\n ]\n random_probability = random.uniform(0, 1)\n probabilities_already_counted = 0.0\n\n for i, probability in enumerate(children_visit_probabilities):\n if probabilities_already_counted + probability >= random_probability:\n return self.root_node.children[i]\n\n probabilities_already_counted += probability\n\n def simulate(self, expansion_count=1):\n i = 0\n while expansion_count is None or i < expansion_count:\n i += 1\n\n if self.solution is not None:\n return\n\n current_node = self.root_node\n\n while current_node.expanded:\n current_node = current_node.get_preferred_child(self.root_node)\n\n self.expand(current_node)\n\n def expand(self, node):\n self.stats_expansion_count += 1\n self.child_finder(node, self)\n\n for child in node.children:\n child_win_value = self.node_evaluator(child, self)\n\n if child_win_value != None:\n child.update_win_value(child_win_value)\n\n if not child.is_scorable():\n self.random_rollout(child)\n child.children = []\n\n if len(node.children):\n node.expanded = True\n else:\n self.stats_failed_expansion_count += 1\n\n def random_rollout(self, node):\n self.child_finder(node, self)\n child = random.choice(node.children)\n node.children = []\n node.add_child(child)\n child_win_value = self.node_evaluator(child, self)\n\n if child_win_value != None:\n node.update_win_value(child_win_value)\n else:\n self.random_rollout(child)\n\n def print_tree(self, f):\n f.write(\"graph\\n{\\n\")\n self.root_node.print_node(f, 0, self.root_node, \"a\")\n f.write(\"}\\n\")"
},
{
"identifier": "score_func",
"path": "lang.py",
"snippet": "def can_be_solution(msg: str, min_lines: int, check_func=None) -> bool:\ndef find_largest_new_block(old_text: str, text: str) -> str:\ndef find_largest_new_block_code(old_code: str, code: str) -> str:"
},
{
"identifier": "prompt",
"path": "prompts.py",
"snippet": "NO_CHECK_PROOF = lambda v: True\n CHECK_PROOF = lambda v: proof_marker in v\n CHECK_PROOF2 = lambda v: v.count(proof_marker) >= 2\n CHECK_PROOF = NO_CHECK_PROOF\nNO_CHECK_CHEAT = lambda v: False\n CHECK_CHEAT = lambda v: cheat_marker in v\n CHECK_CHEAT = NO_CHECK_CHEAT\nEXTRA_CONSTANT_FOLDING = \" and performs all additions by constants\"\nEXTRA_CONSTANT_FOLDING = \"\"\ndef remove_hints2(prompt):\ndef remove_hints3(prompt):\ndef remove_hints(prompt):"
},
{
"identifier": "limit_depth",
"path": "common.py",
"snippet": "def count_depth(node, f=lambda x: x):\ndef limit_depth(node, f=lambda x: x):"
},
{
"identifier": "args",
"path": "cmdline.py",
"snippet": "class CommonArguments:\ndef get_args():"
}
] | import ppo
import torch
from montecarlo.node import Node
from montecarlo.montecarlo import MonteCarlo
from lang import score_func, can_be_solution, find_largest_new_block
from prompts import prompt, expansion_count, min_lines, check_func
from common import limit_depth, max_completion_depth
from cmdline import args | 1,991 |
n_iter = args.n_iter
# n_iter = 10
class GenNode:
def __init__(self, text, gens):
self.text = text
self.gens = gens
def reinforce(gens, reward):
rewards = [torch.tensor(reward)]
for query_tensors, response_tensors in gens:
ppo.trainer_step(query_tensors, response_tensors, rewards)
def generate_complete(old_text, montecarlo, gens, current_completion_depth=1):
if current_completion_depth >= max_completion_depth:
return None
(text, gen) = ppo.generate(old_text)
score = score_func(text)
if score is None or score < 0:
code = find_largest_new_block(old_text, text)
print("Found code block:", code)
if code is not None:
text = text[0 : text.index("```")] + "```\n" + code # hack
score = 1.0
# fallthrough
else:
if score is None:
gens.append(gen)
return generate_complete(
text, montecarlo, gens, current_completion_depth + 1
)
else:
reinforce([gen], score)
return None
else:
gens.append(gen)
reinforce(gens, score)
node = Node(GenNode(text, gens))
|
n_iter = args.n_iter
# n_iter = 10
class GenNode:
def __init__(self, text, gens):
self.text = text
self.gens = gens
def reinforce(gens, reward):
rewards = [torch.tensor(reward)]
for query_tensors, response_tensors in gens:
ppo.trainer_step(query_tensors, response_tensors, rewards)
def generate_complete(old_text, montecarlo, gens, current_completion_depth=1):
if current_completion_depth >= max_completion_depth:
return None
(text, gen) = ppo.generate(old_text)
score = score_func(text)
if score is None or score < 0:
code = find_largest_new_block(old_text, text)
print("Found code block:", code)
if code is not None:
text = text[0 : text.index("```")] + "```\n" + code # hack
score = 1.0
# fallthrough
else:
if score is None:
gens.append(gen)
return generate_complete(
text, montecarlo, gens, current_completion_depth + 1
)
else:
reinforce([gen], score)
return None
else:
gens.append(gen)
reinforce(gens, score)
node = Node(GenNode(text, gens)) | if can_be_solution(text, min_lines, check_func): | 3 | 2023-11-11 19:56:04+00:00 | 4k |
BraveGroup/Drive-WM | src/diffusers/utils/testing_utils.py | [
{
"identifier": "BACKENDS_MAPPING",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "BACKENDS_MAPPING = OrderedDict(\n [\n (\"bs4\", (is_bs4_available, BS4_IMPORT_ERROR)),\n (\"flax\", (is_flax_available, FLAX_IMPORT_ERROR)),\n (\"inflect\", (is_inflect_available, INFLECT_IMPORT_ERROR)),\n (\"onnx\", (is_onnx_available, ONNX_IMPORT_ERROR)),\n (\"opencv\", (is_opencv_available, OPENCV_IMPORT_ERROR)),\n (\"scipy\", (is_scipy_available, SCIPY_IMPORT_ERROR)),\n (\"torch\", (is_torch_available, PYTORCH_IMPORT_ERROR)),\n (\"transformers\", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),\n (\"unidecode\", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),\n (\"librosa\", (is_librosa_available, LIBROSA_IMPORT_ERROR)),\n (\"k_diffusion\", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)),\n (\"note_seq\", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)),\n (\"wandb\", (is_wandb_available, WANDB_IMPORT_ERROR)),\n (\"omegaconf\", (is_omegaconf_available, OMEGACONF_IMPORT_ERROR)),\n (\"tensorboard\", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)),\n (\"compel\", (is_compel_available, COMPEL_IMPORT_ERROR)),\n (\"ftfy\", (is_ftfy_available, FTFY_IMPORT_ERROR)),\n (\"torchsde\", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)),\n (\"invisible_watermark\", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)),\n ]\n)"
},
{
"identifier": "is_compel_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_compel_available():\n return _compel_available"
},
{
"identifier": "is_flax_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_flax_available():\n return _flax_available"
},
{
"identifier": "is_note_seq_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_note_seq_available():\n return _note_seq_available"
},
{
"identifier": "is_onnx_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_onnx_available():\n return _onnx_available"
},
{
"identifier": "is_opencv_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_opencv_available():\n return _opencv_available"
},
{
"identifier": "is_peft_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_peft_available():\n return _peft_available"
},
{
"identifier": "is_torch_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_torch_available():\n return _torch_available"
},
{
"identifier": "is_torch_version",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_torch_version(operation: str, version: str):\n \"\"\"\n Args:\n Compares the current PyTorch version to a given reference with an operation.\n operation (`str`):\n A string representation of an operator, such as `\">\"` or `\"<=\"`\n version (`str`):\n A string version of PyTorch\n \"\"\"\n return compare_versions(parse(_torch_version), operation, version)"
},
{
"identifier": "is_torchsde_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_torchsde_available():\n return _torchsde_available"
},
{
"identifier": "is_transformers_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_transformers_available():\n return _transformers_available"
},
{
"identifier": "get_logger",
"path": "src/diffusers/utils/logging.py",
"snippet": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom diffusers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)"
}
] | import functools
import importlib
import inspect
import io
import logging
import multiprocessing
import os
import random
import re
import struct
import sys
import tempfile
import time
import unittest
import urllib.parse
import numpy as np
import PIL.Image
import PIL.ImageOps
import requests
import torch
import cv2
from contextlib import contextmanager
from distutils.util import strtobool
from io import BytesIO, StringIO
from pathlib import Path
from typing import List, Optional, Union
from numpy.linalg import norm
from packaging import version
from .import_utils import (
BACKENDS_MAPPING,
is_compel_available,
is_flax_available,
is_note_seq_available,
is_onnx_available,
is_opencv_available,
is_peft_available,
is_torch_available,
is_torch_version,
is_torchsde_available,
is_transformers_available,
)
from .logging import get_logger
from _pytest.config import create_terminal_writer | 2,548 |
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
# format is usually:
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
test_file, test_class, test_fn = test_name.split("::")
test_fn = test_fn.split()[0]
with open(filename, "a") as f:
print(";".join([test_file, test_class, test_fn, output_str]), file=f)
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def nightly(test_case):
"""
Decorator marking a test that runs nightly in the diffusers CI.
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_2(test_case):
"""
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
"""
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
test_case
)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
test_case
)
def skip_mps(test_case):
"""Decorator marking a test to skip if torch_device is 'mps'"""
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
|
global_rng = random.Random()
logger = get_logger(__name__)
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) > version.parse("0.5")
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) > version.parse("4.33")
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if is_torch_available():
if "DIFFUSERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}"
) from e
logger.info(f"torch_device overrode to {torch_device}")
else:
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
is_torch_higher_equal_than_1_12 = version.parse(
version.parse(torch.__version__).base_version
) >= version.parse("1.12")
if is_torch_higher_equal_than_1_12:
# Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
mps_backend_registered = hasattr(torch.backends, "mps")
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
def torch_all_close(a, b, *args, **kwargs):
if not is_torch_available():
raise ValueError("PyTorch needs to be installed to use this function.")
if not torch.allclose(a, b, *args, **kwargs):
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
return True
def numpy_cosine_similarity_distance(a, b):
similarity = np.dot(a, b) / (norm(a) * norm(b))
distance = 1.0 - similarity.mean()
return distance
def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"):
test_name = os.environ.get("PYTEST_CURRENT_TEST")
if not torch.is_tensor(tensor):
tensor = torch.from_numpy(tensor)
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
# format is usually:
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
test_file, test_class, test_fn = test_name.split("::")
test_fn = test_fn.split()[0]
with open(filename, "a") as f:
print(";".join([test_file, test_class, test_fn, output_str]), file=f)
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def nightly(test_case):
"""
Decorator marking a test that runs nightly in the diffusers CI.
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_2(test_case):
"""
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
"""
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
test_case
)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
test_case
)
def skip_mps(test_case):
"""Decorator marking a test to skip if torch_device is 'mps'"""
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
""" | return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) | 2 | 2023-11-18 01:40:55+00:00 | 4k |
basnijholt/unidep | unidep/_dependencies_parsing.py | [
{
"identifier": "Platform",
"path": "unidep/platform_definitions.py",
"snippet": "VALID_SELECTORS = get_args(Selector)\nPEP508_MARKERS = {\n \"linux-64\": \"sys_platform == 'linux' and platform_machine == 'x86_64'\",\n \"linux-aarch64\": \"sys_platform == 'linux' and platform_machine == 'aarch64'\",\n \"linux-ppc64le\": \"sys_platform == 'linux' and platform_machine == 'ppc64le'\",\n \"osx-64\": \"sys_platform == 'darwin' and platform_machine == 'x86_64'\",\n \"osx-arm64\": \"sys_platform == 'darwin' and platform_machine == 'arm64'\",\n \"win-64\": \"sys_platform == 'win32' and platform_machine == 'AMD64'\",\n (\"linux-64\", \"linux-aarch64\", \"linux-ppc64le\"): \"sys_platform == 'linux'\",\n (\"osx-64\", \"osx-arm64\"): \"sys_platform == 'darwin'\",\n (\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"osx-arm64\",\n ): \"sys_platform == 'linux' or sys_platform == 'darwin'\",\n}\nPLATFORM_SELECTOR_MAP: dict[Platform, list[Selector]] = {\n \"linux-64\": [\"linux64\", \"unix\", \"linux\"],\n \"linux-aarch64\": [\"aarch64\", \"unix\", \"linux\"],\n \"linux-ppc64le\": [\"ppc64le\", \"unix\", \"linux\"],\n # \"osx64\" is a selector unique to conda-build referring to\n # platforms on macOS and the Python architecture is x86-64\n \"osx-64\": [\"osx64\", \"osx\", \"macos\", \"unix\"],\n \"osx-arm64\": [\"arm64\", \"osx\", \"macos\", \"unix\"],\n \"win-64\": [\"win64\", \"win\"],\n}\nPLATFORM_SELECTOR_MAP_REVERSE: dict[Selector, set[Platform]] = {}\ndef validate_selector(selector: Selector) -> None:\ndef platforms_from_selector(selector: str) -> list[Platform]:\n def platforms(self) -> list[Platform] | None:\n def pprint(self) -> str:\n def name_with_pin(self, *, is_pip: bool = False) -> str:\nclass Spec(NamedTuple):"
},
{
"identifier": "dependencies_filename",
"path": "unidep/utils.py",
"snippet": "def dependencies_filename(folder_or_path: str | Path) -> Path:\n \"\"\"Get the path to `requirements.yaml` or `pyproject.toml` file.\"\"\"\n path = Path(folder_or_path)\n if path.is_dir():\n fname_yaml = path / \"requirements.yaml\"\n if fname_yaml.exists():\n return fname_yaml\n fname_toml = path / \"pyproject.toml\"\n if fname_toml.exists() and unidep_configured_in_toml(fname_toml):\n return fname_toml\n msg = (\n f\"File `{fname_yaml}` or `{fname_toml}` (with unidep configuration)\"\n f\" not found in `{folder_or_path}`.\"\n )\n raise FileNotFoundError(msg)\n if not path.exists():\n msg = f\"File `{path}` not found.\"\n raise FileNotFoundError(msg)\n return path"
},
{
"identifier": "is_pip_installable",
"path": "unidep/utils.py",
"snippet": "def is_pip_installable(folder: str | Path) -> bool: # pragma: no cover\n \"\"\"Determine if the project is pip installable.\n\n Checks for existence of setup.py or [build-system] in pyproject.toml.\n \"\"\"\n path = Path(folder)\n if (path / \"setup.py\").exists():\n return True\n\n # When toml makes it into the standard library, we can use that instead\n # For now this is good enough, except it doesn't handle the case where\n # [build-system] is inside of a multi-line literal string.\n pyproject_path = path / \"pyproject.toml\"\n if pyproject_path.exists():\n with pyproject_path.open(\"r\") as file:\n for line in file:\n if line.strip().startswith(\"[build-system]\"):\n return True\n return False"
},
{
"identifier": "parse_package_str",
"path": "unidep/utils.py",
"snippet": "def parse_package_str(package_str: str) -> ParsedPackageStr:\n \"\"\"Splits a string into package name, version pinning, and platform selector.\"\"\"\n # Regex to match package name, version pinning, and optionally platform selector\n name_pattern = r\"[a-zA-Z0-9_-]+\"\n version_pin_pattern = r\".*?\"\n selector_pattern = r\"[a-z0-9\\s]+\"\n pattern = rf\"({name_pattern})\\s*({version_pin_pattern})?(:({selector_pattern}))?$\"\n match = re.match(pattern, package_str)\n\n if match:\n package_name = match.group(1).strip()\n version_pin = match.group(2).strip() if match.group(2) else None\n selector = match.group(4).strip() if match.group(4) else None\n\n if selector is not None:\n for s in selector.split():\n validate_selector(cast(Selector, s))\n\n return ParsedPackageStr(\n package_name,\n version_pin,\n selector,\n )\n\n msg = f\"Invalid package string: '{package_str}'\"\n raise ValueError(msg)"
},
{
"identifier": "selector_from_comment",
"path": "unidep/utils.py",
"snippet": "def selector_from_comment(comment: str) -> str | None:\n \"\"\"Extract a valid selector from a comment.\"\"\"\n multiple_brackets_pat = re.compile(r\"#.*\\].*\\[\") # Detects multiple brackets\n if multiple_brackets_pat.search(comment):\n msg = f\"Multiple bracketed selectors found in comment: '{comment}'\"\n raise ValueError(msg)\n\n sel_pat = re.compile(r\"#\\s*\\[([^\\[\\]]+)\\]\")\n m = sel_pat.search(comment)\n if not m:\n return None\n selectors = m.group(1).strip().split()\n for s in selectors:\n validate_selector(cast(Selector, s))\n return \" \".join(selectors)"
},
{
"identifier": "unidep_configured_in_toml",
"path": "unidep/utils.py",
"snippet": "def unidep_configured_in_toml(path: Path) -> bool:\n \"\"\"Check if dependencies are specified in pyproject.toml.\n\n If a TOML parser is not available it finds `[tool.unidep]` in `pyproject.toml`.\n \"\"\"\n if HAS_TOML:\n with path.open(\"rb\") as f:\n data = tomllib.load(f)\n return bool(data.get(\"tool\", {}).get(\"unidep\", {}))\n # TODO[Bas]: will fail if defining dict in # noqa: TD004, TD003, FIX002\n # pyproject.toml directly e.g., it contains:\n # `tool = {unidep = {dependencies = ...}}`\n return any( # pragma: no cover\n line.lstrip().startswith(\"[tool.unidep\")\n for line in path.read_text().splitlines()\n )"
},
{
"identifier": "warn",
"path": "unidep/utils.py",
"snippet": "def warn(\n message: str | Warning,\n category: type[Warning] = UserWarning,\n stacklevel: int = 1,\n) -> None:\n \"\"\"Emit a warning with a custom format specific to this package.\"\"\"\n original_format = warnings.formatwarning\n warnings.formatwarning = _simple_warning_format\n try:\n warnings.warn(message, category, stacklevel=stacklevel + 1)\n finally:\n warnings.formatwarning = original_format"
}
] | import hashlib
import os
import sys
import tomllib
import tomli as tomllib
import tomli_w
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING, Any, NamedTuple
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from unidep.platform_definitions import Platform, Spec, platforms_from_selector
from unidep.utils import (
dependencies_filename,
is_pip_installable,
parse_package_str,
selector_from_comment,
unidep_configured_in_toml,
warn,
)
from typing import Literal
from typing_extensions import Literal | 3,492 |
def _parse_dependency(
dependency: str,
dependencies: CommentedMap,
index_or_key: int | str,
which: Literal["conda", "pip", "both"],
identifier: int,
ignore_pins: list[str],
overwrite_pins: dict[str, str | None],
skip_dependencies: list[str],
) -> list[Spec]:
name, pin, selector = parse_package_str(dependency)
if name in ignore_pins:
pin = None
if name in skip_dependencies:
return []
if name in overwrite_pins:
pin = overwrite_pins[name]
comment = (
_extract_first_comment(dependencies, index_or_key)
if isinstance(dependencies, (CommentedMap, CommentedSeq))
else None
)
if comment and selector is None:
selector = selector_from_comment(comment)
identifier_hash = _identifier(identifier, selector)
if which == "both":
return [
Spec(name, "conda", pin, identifier_hash, selector),
Spec(name, "pip", pin, identifier_hash, selector),
]
return [Spec(name, which, pin, identifier_hash, selector)]
class ParsedRequirements(NamedTuple):
"""Requirements with comments."""
channels: list[str]
platforms: list[Platform]
requirements: dict[str, list[Spec]]
class Requirements(NamedTuple):
"""Requirements as CommentedSeq."""
# mypy doesn't support CommentedSeq[str], so we use list[str] instead.
channels: list[str] # actually a CommentedSeq[str]
conda: list[str] # actually a CommentedSeq[str]
pip: list[str] # actually a CommentedSeq[str]
def _parse_overwrite_pins(overwrite_pins: list[str]) -> dict[str, str | None]:
"""Parse overwrite pins."""
result = {}
for overwrite_pin in overwrite_pins:
pkg = parse_package_str(overwrite_pin)
result[pkg.name] = pkg.pin
return result
def _load(p: Path, yaml: YAML) -> dict[str, Any]:
if p.suffix == ".toml":
if not HAS_TOML: # pragma: no cover
msg = (
"❌ No toml support found in your Python installation."
" If you are using unidep from `pyproject.toml` and this"
" error occurs during installation, make sure you add"
'\n\n[build-system]\nrequires = [..., "unidep[toml]"]\n\n'
" Otherwise, please install it with `pip install tomli`."
)
raise ImportError(msg)
with p.open("rb") as f:
return tomllib.load(f)["tool"]["unidep"]
with p.open() as f:
return yaml.load(f)
def _get_local_dependencies(data: dict[str, Any]) -> list[str]:
"""Get `local_dependencies` from a `requirements.yaml` or `pyproject.toml` file."""
if "local_dependencies" in data:
return data["local_dependencies"]
if "includes" in data:
warn(
"⚠️ You are using `includes` in `requirements.yaml` or `pyproject.toml`"
" `[unidep.tool]` which is deprecated since 0.42.0 and has been renamed to"
" `local_dependencies`.",
category=DeprecationWarning,
stacklevel=2,
)
return data["includes"]
return []
def parse_requirements( # noqa: PLR0912
*paths: Path,
ignore_pins: list[str] | None = None,
overwrite_pins: list[str] | None = None,
skip_dependencies: list[str] | None = None,
verbose: bool = False,
) -> ParsedRequirements:
"""Parse a list of `requirements.yaml` or `pyproject.toml` files."""
ignore_pins = ignore_pins or []
skip_dependencies = skip_dependencies or []
overwrite_pins_map = _parse_overwrite_pins(overwrite_pins or [])
requirements: dict[str, list[Spec]] = defaultdict(list)
channels: set[str] = set()
platforms: set[Platform] = set()
datas = []
seen: set[Path] = set()
yaml = YAML(typ="rt")
for p in paths:
if verbose:
print(f"📄 Parsing `{p}`")
data = _load(p, yaml)
datas.append(data)
seen.add(p.resolve())
# Handle "local_dependencies" (or old name "includes", changed in 0.42.0)
for include in _get_local_dependencies(data):
try:
| """unidep - Unified Conda and Pip requirements management.
This module provides parsing of `requirements.yaml` and `pyproject.toml` files.
"""
from __future__ import annotations
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
else: # pragma: no cover
try: # pragma: no cover
if sys.version_info >= (3, 11):
else:
HAS_TOML = True
except ImportError: # pragma: no cover
HAS_TOML = False
def find_requirements_files(
base_dir: str | Path = ".",
depth: int = 1,
*,
verbose: bool = False,
) -> list[Path]:
"""Scan a directory for `requirements.yaml` and `pyproject.toml` files."""
base_path = Path(base_dir)
found_files = []
# Define a helper function to recursively scan directories
def _scan_dir(path: Path, current_depth: int) -> None:
if verbose:
print(f"🔍 Scanning in `{path}` at depth {current_depth}")
if current_depth > depth:
return
for child in path.iterdir():
if child.is_dir():
_scan_dir(child, current_depth + 1)
elif child.name == "requirements.yaml":
found_files.append(child)
if verbose:
print(f'🔍 Found `"requirements.yaml"` at `{child}`')
elif child.name == "pyproject.toml" and unidep_configured_in_toml(child):
if verbose:
print(f'🔍 Found `"pyproject.toml"` with dependencies at `{child}`')
found_files.append(child)
_scan_dir(base_path, 0)
return sorted(found_files)
def _extract_first_comment(
commented_map: CommentedMap,
index_or_key: int | str,
) -> str | None:
"""Extract the first comment from a CommentedMap."""
comments = commented_map.ca.items.get(index_or_key, None)
if comments is None:
return None
comment_strings = next(
c.value.split("\n")[0].rstrip().lstrip() for c in comments if c is not None
)
if not comment_strings:
# empty string
return None
return "".join(comment_strings)
def _identifier(identifier: int, selector: str | None) -> str:
"""Return a unique identifier based on the comment."""
platforms = None if selector is None else tuple(platforms_from_selector(selector))
data_str = f"{identifier}-{platforms}"
# Hash using SHA256 and take the first 8 characters for a shorter hash
return hashlib.sha256(data_str.encode()).hexdigest()[:8]
def _parse_dependency(
dependency: str,
dependencies: CommentedMap,
index_or_key: int | str,
which: Literal["conda", "pip", "both"],
identifier: int,
ignore_pins: list[str],
overwrite_pins: dict[str, str | None],
skip_dependencies: list[str],
) -> list[Spec]:
name, pin, selector = parse_package_str(dependency)
if name in ignore_pins:
pin = None
if name in skip_dependencies:
return []
if name in overwrite_pins:
pin = overwrite_pins[name]
comment = (
_extract_first_comment(dependencies, index_or_key)
if isinstance(dependencies, (CommentedMap, CommentedSeq))
else None
)
if comment and selector is None:
selector = selector_from_comment(comment)
identifier_hash = _identifier(identifier, selector)
if which == "both":
return [
Spec(name, "conda", pin, identifier_hash, selector),
Spec(name, "pip", pin, identifier_hash, selector),
]
return [Spec(name, which, pin, identifier_hash, selector)]
class ParsedRequirements(NamedTuple):
"""Requirements with comments."""
channels: list[str]
platforms: list[Platform]
requirements: dict[str, list[Spec]]
class Requirements(NamedTuple):
"""Requirements as CommentedSeq."""
# mypy doesn't support CommentedSeq[str], so we use list[str] instead.
channels: list[str] # actually a CommentedSeq[str]
conda: list[str] # actually a CommentedSeq[str]
pip: list[str] # actually a CommentedSeq[str]
def _parse_overwrite_pins(overwrite_pins: list[str]) -> dict[str, str | None]:
"""Parse overwrite pins."""
result = {}
for overwrite_pin in overwrite_pins:
pkg = parse_package_str(overwrite_pin)
result[pkg.name] = pkg.pin
return result
def _load(p: Path, yaml: YAML) -> dict[str, Any]:
if p.suffix == ".toml":
if not HAS_TOML: # pragma: no cover
msg = (
"❌ No toml support found in your Python installation."
" If you are using unidep from `pyproject.toml` and this"
" error occurs during installation, make sure you add"
'\n\n[build-system]\nrequires = [..., "unidep[toml]"]\n\n'
" Otherwise, please install it with `pip install tomli`."
)
raise ImportError(msg)
with p.open("rb") as f:
return tomllib.load(f)["tool"]["unidep"]
with p.open() as f:
return yaml.load(f)
def _get_local_dependencies(data: dict[str, Any]) -> list[str]:
"""Get `local_dependencies` from a `requirements.yaml` or `pyproject.toml` file."""
if "local_dependencies" in data:
return data["local_dependencies"]
if "includes" in data:
warn(
"⚠️ You are using `includes` in `requirements.yaml` or `pyproject.toml`"
" `[unidep.tool]` which is deprecated since 0.42.0 and has been renamed to"
" `local_dependencies`.",
category=DeprecationWarning,
stacklevel=2,
)
return data["includes"]
return []
def parse_requirements( # noqa: PLR0912
*paths: Path,
ignore_pins: list[str] | None = None,
overwrite_pins: list[str] | None = None,
skip_dependencies: list[str] | None = None,
verbose: bool = False,
) -> ParsedRequirements:
"""Parse a list of `requirements.yaml` or `pyproject.toml` files."""
ignore_pins = ignore_pins or []
skip_dependencies = skip_dependencies or []
overwrite_pins_map = _parse_overwrite_pins(overwrite_pins or [])
requirements: dict[str, list[Spec]] = defaultdict(list)
channels: set[str] = set()
platforms: set[Platform] = set()
datas = []
seen: set[Path] = set()
yaml = YAML(typ="rt")
for p in paths:
if verbose:
print(f"📄 Parsing `{p}`")
data = _load(p, yaml)
datas.append(data)
seen.add(p.resolve())
# Handle "local_dependencies" (or old name "includes", changed in 0.42.0)
for include in _get_local_dependencies(data):
try: | requirements_path = dependencies_filename(p.parent / include).resolve() | 1 | 2023-11-16 04:23:01+00:00 | 4k |
BAAI-DCAI/SegVol | network/model.py | [
{
"identifier": "select_points",
"path": "utils/monai_inferers_utils.py",
"snippet": "def select_points(preds, num_positive_extra=4, num_negative_extra=0, fix_extra_point_num=None):\n spacial_dim = 3\n points = torch.zeros((0, 3))\n labels = torch.zeros((0))\n pos_thred = 0.9\n neg_thred = 0.1\n \n # get pos/net indices\n positive_indices = torch.nonzero(preds > pos_thred, as_tuple=True) # ([pos x], [pos y], [pos z])\n negative_indices = torch.nonzero(preds < neg_thred, as_tuple=True)\n\n ones_idx = (preds > pos_thred).nonzero(as_tuple=True)\n if all(tmp.nelement() == 0 for tmp in ones_idx):\n # all neg\n num_positive_extra = 0\n selected_positive_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n else:\n # random select a pos point\n random_idx = torch.randint(len(positive_indices[0]), (1,))\n selected_positive_point = torch.tensor([positive_indices[i][random_idx] for i in range(spacial_dim)]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.ones((1))))\n\n if num_positive_extra > 0:\n pos_idx_list = torch.randperm(len(positive_indices[0]))[:num_positive_extra]\n extra_positive_points = []\n for pos_idx in pos_idx_list:\n extra_positive_points.append([positive_indices[i][pos_idx] for i in range(spacial_dim)])\n extra_positive_points = torch.tensor(extra_positive_points).reshape(-1, 3)\n points = torch.cat((points, extra_positive_points), dim=0)\n labels = torch.cat((labels, torch.ones((extra_positive_points.shape[0]))))\n\n if num_negative_extra > 0:\n neg_idx_list = torch.randperm(len(negative_indices[0]))[:num_negative_extra]\n extra_negative_points = []\n for neg_idx in neg_idx_list:\n extra_negative_points.append([negative_indices[i][neg_idx] for i in range(spacial_dim)])\n extra_negative_points = torch.tensor(extra_negative_points).reshape(-1, 3)\n points = torch.cat((points, extra_negative_points), dim=0)\n labels = torch.cat((labels, torch.zeros((extra_negative_points.shape[0]))))\n # print('extra_negative_points ', extra_negative_points, extra_negative_points.shape)\n # print('==> points ', points.shape, labels)\n \n if fix_extra_point_num is None:\n left_point_num = num_positive_extra + num_negative_extra + 1 - labels.shape[0]\n else:\n left_point_num = fix_extra_point_num + 1 - labels.shape[0]\n\n for _ in range(left_point_num):\n ignore_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, ignore_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n\n return (points, labels)"
},
{
"identifier": "generate_box",
"path": "utils/monai_inferers_utils.py",
"snippet": "def generate_box(pred_pre, bbox_shift=None):\n meaning_post_label = pred_pre # [h, w, d]\n ones_idx = (meaning_post_label > 0).nonzero(as_tuple=True)\n if all(tensor.nelement() == 0 for tensor in ones_idx):\n bboxes = torch.tensor([-1,-1,-1,-1,-1,-1])\n # print(bboxes, bboxes.shape)\n return bboxes\n min_coords = [dim.min() for dim in ones_idx] # [x_min, y_min, z_min]\n max_coords = [dim.max() for dim in ones_idx] # [x_max, y_max, z_max]\n\n\n if bbox_shift is None:\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor)\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor)\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)\n else:\n # add perturbation to bounding box coordinates\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor + random.randint(-bbox_shift, bbox_shift))\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor + random.randint(-bbox_shift, bbox_shift))\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)"
},
{
"identifier": "BCELoss",
"path": "utils/loss.py",
"snippet": "class BCELoss(nn.Module):\r\n def __init__(self):\r\n super(BCELoss, self).__init__()\r\n self.criterion = nn.BCEWithLogitsLoss()\r\n\r\n def forward(self, predict, target):\r\n assert predict.shape == target.shape, 'predict & target shape do not match\\n' + str(predict.shape) + '\\n' + str(target.shape)\r\n target_ = target.clone()\r\n target_[target == -1] = 0\r\n\r\n ce_loss = self.criterion(predict, target_)\r\n\r\n return ce_loss\r"
},
{
"identifier": "BinaryDiceLoss",
"path": "utils/loss.py",
"snippet": "class BinaryDiceLoss(nn.Module):\r\n def __init__(self, smooth=1, p=2, reduction='mean'):\r\n super(BinaryDiceLoss, self).__init__()\r\n self.smooth = smooth\r\n self.p = p\r\n self.reduction = reduction\r\n\r\n def forward(self, predict, target):\r\n predict = torch.sigmoid(predict)\r\n target_ = target.clone()\r\n target_[target == -1] = 0\r\n assert predict.shape[0] == target.shape[0], \"predict & target batch size don't match\\n\" + str(predict.shape) + '\\n' + str(target.shape[0])\r\n predict = predict.contiguous().view(predict.shape[0], -1)\r\n target_ = target_.contiguous().view(target_.shape[0], -1)\r\n\r\n num = torch.sum(torch.mul(predict, target_), dim=1)\r\n den = torch.sum(predict, dim=1) + torch.sum(target_, dim=1) + self.smooth\r\n\r\n dice_score = 2*num / den\r\n dice_loss = 1 - dice_score\r\n\r\n # dice_loss_avg = dice_loss[target[:,0]!=-1].sum() / dice_loss[target[:,0]!=-1].shape[0]\r\n dice_loss_avg = dice_loss.sum() / dice_loss.shape[0]\r\n\r\n return dice_loss_avg\r"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from transformers import AutoTokenizer, CLIPTextModel, CLIPTextConfig
from utils.monai_inferers_utils import select_points, generate_box
from utils.loss import BCELoss, BinaryDiceLoss
from torch.cuda.amp import autocast | 1,893 |
#%% set up model
class SegVol(nn.Module):
def __init__(self,
image_encoder,
mask_decoder,
prompt_encoder,
clip_ckpt,
roi_size,
patch_size,
test_mode=False,
):
super().__init__()
self.image_encoder = image_encoder
self.mask_decoder = mask_decoder
self.prompt_encoder = prompt_encoder
self.text_encoder = TextEncoder(clip_ckpt)
self.feat_shape = np.array(roi_size)/np.array(patch_size)
self.test_mode = test_mode
self.dice_loss = BinaryDiceLoss().cuda()
|
#%% set up model
class SegVol(nn.Module):
def __init__(self,
image_encoder,
mask_decoder,
prompt_encoder,
clip_ckpt,
roi_size,
patch_size,
test_mode=False,
):
super().__init__()
self.image_encoder = image_encoder
self.mask_decoder = mask_decoder
self.prompt_encoder = prompt_encoder
self.text_encoder = TextEncoder(clip_ckpt)
self.feat_shape = np.array(roi_size)/np.array(patch_size)
self.test_mode = test_mode
self.dice_loss = BinaryDiceLoss().cuda() | self.bce_loss = BCELoss().cuda() | 2 | 2023-11-10 08:25:37+00:00 | 4k |
xk-huang/segment-caption-anything | scripts/tools/build_annotation_db.py | [
{
"identifier": "Arguments",
"path": "src/arguments.py",
"snippet": "class Arguments:\n defaults: List[Any] = field(default_factory=lambda: defaults)\n\n training: SCASeq2SeqTrainingArguments = field(default_factory=lambda: SCASeq2SeqTrainingArguments(output_dir=\"?\"))\n\n # NOTE(xiaoke): to only maintain one sort of data config, we use soft links to link the data config to the train/eval config separately.\n # NOTE(xiaoke): Should be Union[List[DataArguments], DataArguments], while OmegaConf doesn't support Union. So use str to compose the configs dynamically.\n # NOTE(xiaoke): So we cannot override the args in the config file, since it will be converted to str.\n train_data: List[str] = field(default_factory=list)\n train_data_interleave_probabilities: Optional[List[float]] = field(default=None)\n train_data_overrides: List[str] = field(\n default_factory=list,\n metadata={\"help\": \"overrides for train data. \\\"train_data_overrides='[data.with_image\\=False]'\\\"\"},\n )\n\n eval_data: List[str] = field(default_factory=list)\n eval_data_overrides: List[str] = field(\n default_factory=list,\n metadata={\"help\": \"overrides for eval data. \\\"eval_data_overrides='[data.with_image\\=False]'\\\"\"},\n )\n\n model: ModelArguments = field(default_factory=ModelArguments)\n wandb: WandbArguments = field(default_factory=WandbArguments)\n\n data_transforms: Optional[DataTransformsArguments] = field(default=None)"
},
{
"identifier": "global_setup",
"path": "src/arguments.py",
"snippet": "def global_setup(\n args: DictConfig,\n) -> Tuple[Arguments, SCASeq2SeqTrainingArguments, ModelArguments]:\n \"\"\"Global setup of arguments.\"\"\"\n if args.training.output_log_dir is not None:\n output_log_dir = args.training.output_log_dir\n if not osp.exists(output_log_dir):\n os.makedirs(output_log_dir)\n # NOTE: this is a dirty hack to enable logging to a different directory\n # by default in Hydra, logging.root.handlers contains two handler: stream & file\n # NOTE: mainly used in amulet\n for handler in logging.root.handlers:\n if isinstance(handler, logging.FileHandler):\n file_path = handler.baseFilename\n file_name = osp.basename(file_path)\n external_file_path = osp.join(output_log_dir, file_name)\n logging.root.addHandler(logging.FileHandler(external_file_path))\n logger.info(f\"Add external file handler to {external_file_path}\")\n break\n\n hostname = socket.gethostname()\n logger.info(f\"Running on {hostname}\")\n\n # Convert args to the actual dataclass object, to enable methods. Need to\n # delete _n_gpu, a property that TrainingArgs init doesn't expect.\n del args.training._n_gpu\n # Dirty hack: only run post init when we're ready to convert to TrainingArgs\n args.training._run_post_init = True\n # NOTE: otherwise, do_eval will be set to True in TrainingArguments.__post_init__\n if args.training.do_eval == False and args.training.do_train == False:\n args.training.evaluation_strategy = \"no\"\n args.training.load_best_model_at_end = False\n\n training_args = OmegaConf.to_object(args.training)\n model_args = OmegaConf.to_object(args.model)\n\n if (\n isinstance(model_args, (SCAModelArguments, SCADirectDecodingModelArguments))\n and args.model.model_name_or_path is None\n ):\n # NOTE: we need to set the default value of `model_name_or_path` to None\n # otherwise, it will be set to `base_sca` by default\n raise ValueError(f\"{type(model_args)} is not supported in model cfg name.\")\n\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device},\"\n f\" log_level: {log_level} n_gpu: {training_args.n_gpu}\"\n f\" distributed training: {bool(training_args.local_rank != -1)}, 16-bits\"\n f\" training: {training_args.fp16}, bf16 training: {training_args.bf16}\"\n )\n logger.debug(f\"Training/evaluation parameters {training_args}\")\n\n return args, training_args, model_args"
},
{
"identifier": "prepare_datasets",
"path": "src/train.py",
"snippet": "def prepare_datasets(args):\n train_data = []\n for train_data_config_name in args.train_data:\n cfg = hydra.compose(config_name=f\"data/{train_data_config_name}\", overrides=args.train_data_overrides)\n train_data.append(cfg.data)\n args.train_data = train_data\n\n # NOTE(xiaoke): We should only inference one eval dataset\n if len(args.eval_data) > 1:\n logger.warning(f\"We should only inference one dataset, got {args.eval_data}\")\n eval_data = []\n for eval_data_config_name in args.eval_data:\n cfg = hydra.compose(config_name=f\"data/{eval_data_config_name}\", overrides=args.eval_data_overrides)\n eval_data.append(cfg.data)\n\n train_dataset = []\n for i, each_train_data in enumerate(train_data):\n # NOTE: add data `split` to each dataset\n each_train_data.split = \"train\"\n\n _train_dataset = instantiate(each_train_data)\n train_dataset.append(_train_dataset)\n logger.info(f\"Train Dataset [{i}]: {each_train_data}\\n{_train_dataset}\")\n\n eval_dataset = {}\n for i, each_eval_data in enumerate(eval_data):\n # NOTE: add data `split` to each dataset\n # NOTE: visual genome has validation set, but we use test set for evaluation\n if \"visual_genome.py\" in each_eval_data.path and getattr(each_eval_data, \"use_densecap_splits\", None) is True:\n logger.info(\"Using densecap splits in Visual Genome, using test split to eval\")\n each_eval_data.split = \"test\"\n\n # NOTE: refcoco has validation set, but we use test set for evaluation\n elif \"refcoco.py\" in each_eval_data.path:\n if each_eval_data.name.startswith(\"refcoco-\") or each_eval_data.name.startswith(\"refcoco+-\"):\n if each_eval_data.split is None or each_eval_data.split == \"train\":\n raise ValueError(f\"refcoco{{,+}} must have split for eval. got {each_eval_data.split}\")\n logger.info(f\"Using refcoco{{,+}}: {each_eval_data.split} split to eval\")\n elif each_eval_data.name.startswith(\"refcocog\"):\n logger.info(\"Using refcocog val split to eval\")\n each_eval_data.split = \"validation\"\n elif each_eval_data.name.startswith(\"refclef\"):\n logger.info(\"Using refclef val split to eval\")\n each_eval_data.split = \"validation\"\n\n # NOTE: coco has validation set, but it does not have test set.\n elif \"coco_instance.py\" in each_eval_data.path or \"coco_instance-local.py\" in each_eval_data.path:\n logger.info(\"Using coco val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif \"objects365-local.py\" in each_eval_data.path:\n logger.info(\"Using objects365 (in fact, it is COCO) val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif \"v3det-local.py\" in each_eval_data.path:\n logger.info(\"Using v3det (in fact, it is COCO) val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif \"sbu-pseudo_region-local.py\" in each_eval_data.path or \"sbu-pseudo_region.py\" in each_eval_data.path:\n logger.info(\"Using sbu to eval, but it does not have test split, so we use train split\")\n each_eval_data.split = \"train\"\n\n elif \"coco_caption-pseudo_region.py\" in each_eval_data.path:\n logger.info(\"Using coco_caption (in fact, it is COCO) val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif (\n \"visual_genome-densecap-local.py\" in each_eval_data.path\n or \"visual_genome-grit-local.py\" in each_eval_data.path\n ):\n logger.info(f\"Using visual_genome (They are my custom splits for GRiT and Densecap) test split to eval\")\n each_eval_data.split = \"test\"\n else:\n raise ValueError(\n f\"Unknown dataset {each_eval_data.path}, we cannot determine the split for it. Please edit `src/train.py:prepare_datasets` to add the split for it.\"\n )\n\n _eval_dataset = instantiate(each_eval_data)\n eval_dataset_name = _get_data_name(each_eval_data)\n eval_dataset[eval_dataset_name] = _eval_dataset\n logger.info(f\"Eval Dataset [{i}]: {each_eval_data}\\n{_eval_dataset}\")\n args.eval_data = eval_data # NOTE: overwrite previous eval_data\n\n if args.train_data_interleave_probabilities is not None and len(train_dataset) != len(\n args.train_data_interleave_probabilities\n ):\n raise ValueError(\n f\"train_data_interleave_probabilities must have the same length as train_data, got {len(train_dataset)} and {len(args.train_data_interleave_probabilities)}\"\n )\n # NOTE(xiaoke): Expected a list of Dataset objects or a list of IterableDataset objects.\n if len(train_dataset) > 0:\n if args.train_data_interleave_probabilities is None:\n logger.warning(\n \"train_data_interleave_probabilities is not provided, \"\n \"the resulting dataset will have max_length_datasets*nb_dataset samples. \"\n \"As we use `all_exhausted` stopping strategy which is a oversampling strategy.\"\n )\n else:\n if sum(args.train_data_interleave_probabilities) != 1.0:\n logger.info(f\"Normalize train_data_interleave_probabilities to sum to 1.0\")\n args.train_data_interleave_probabilities = [\n each_prob / sum(args.train_data_interleave_probabilities)\n for each_prob in args.train_data_interleave_probabilities\n ]\n logger.info(f\"train_data_interleave_probabilities: {args.train_data_interleave_probabilities}\")\n # NOTE(xiaoke): Accourding to `datasets/src/datasets/arrow_dataset.py:_interleave_map_style_datasets:6079` and\n # `Breadcrumbsdatasets/src/datasets/iterable_dataset.py:_interleave_iterable_datasets:2293`\n train_dataset = interleave_datasets(\n train_dataset,\n probabilities=args.train_data_interleave_probabilities,\n seed=args.training.seed,\n stopping_strategy=\"all_exhausted\",\n )\n else:\n train_dataset = None\n\n logger.info(f\"Train Dataset: {train_dataset}\")\n logger.info(f\"Eval Dataset: {eval_dataset}\")\n return train_dataset, eval_dataset"
}
] | import sys
import base64
import io
import json
import logging
import os
import os.path as osp
import datasets
import hydra
import numpy as np
import tqdm
import pycocotools.mask
import logging
import torch
import sqlite3
import json
from hydra.core.hydra_config import HydraConfig
from hydra.core.utils import configure_log
from omegaconf import DictConfig, OmegaConf
from PIL import Image
from utils.git_utils import TSVWriter
from src.arguments import Arguments, global_setup
from hydra.utils import instantiate
from transformers import set_seed, AutoTokenizer
from datasets import interleave_datasets, concatenate_datasets
from src.train import prepare_datasets
from torch.utils.data import IterableDataset, DataLoader
from itertools import islice | 2,852 | # TODO: extract images from refcoco series
sys.path.append(".")
logger = logging.getLogger(__name__)
@hydra.main(version_base="1.3", config_path="../../src/conf", config_name="conf")
def main(args: Arguments):
logger.warning(f"Turn no_cuda = True.")
args.training.no_cuda = True
# NOTE: ddp is initialized in _setup_devices class in `transformers/training_args.py`
| # TODO: extract images from refcoco series
sys.path.append(".")
logger = logging.getLogger(__name__)
@hydra.main(version_base="1.3", config_path="../../src/conf", config_name="conf")
def main(args: Arguments):
logger.warning(f"Turn no_cuda = True.")
args.training.no_cuda = True
# NOTE: ddp is initialized in _setup_devices class in `transformers/training_args.py` | args, training_args, _ = global_setup(args) | 1 | 2023-11-17 14:10:41+00:00 | 4k |
theroyallab/tabbyAPI | OAI/utils_oai.py | [
{
"identifier": "ChatCompletionMessage",
"path": "OAI/types/chat_completion.py",
"snippet": "class ChatCompletionMessage(BaseModel):\n role: Optional[str] = None\n content: Optional[str] = None"
},
{
"identifier": "ChatCompletionRespChoice",
"path": "OAI/types/chat_completion.py",
"snippet": "class ChatCompletionRespChoice(BaseModel):\n # Index is 0 since we aren't using multiple choices\n index: int = 0\n finish_reason: str\n message: ChatCompletionMessage"
},
{
"identifier": "ChatCompletionStreamChunk",
"path": "OAI/types/chat_completion.py",
"snippet": "class ChatCompletionStreamChunk(BaseModel):\n id: str = Field(default_factory=lambda: f\"chatcmpl-{uuid4().hex}\")\n choices: List[ChatCompletionStreamChoice]\n created: int = Field(default_factory=lambda: int(time()))\n model: str\n object: str = \"chat.completion.chunk\""
},
{
"identifier": "ChatCompletionResponse",
"path": "OAI/types/chat_completion.py",
"snippet": "class ChatCompletionResponse(BaseModel):\n id: str = Field(default_factory=lambda: f\"chatcmpl-{uuid4().hex}\")\n choices: List[ChatCompletionRespChoice]\n created: int = Field(default_factory=lambda: int(time()))\n model: str\n object: str = \"chat.completion\"\n usage: Optional[UsageStats] = None"
},
{
"identifier": "ChatCompletionStreamChoice",
"path": "OAI/types/chat_completion.py",
"snippet": "class ChatCompletionStreamChoice(BaseModel):\n # Index is 0 since we aren't using multiple choices\n index: int = 0\n finish_reason: Optional[str]\n delta: Union[ChatCompletionMessage, dict] = {}"
},
{
"identifier": "CompletionResponse",
"path": "OAI/types/completion.py",
"snippet": "class CompletionResponse(BaseModel):\n \"\"\"Represents a completion response.\"\"\"\n\n id: str = Field(default_factory=lambda: f\"cmpl-{uuid4().hex}\")\n choices: List[CompletionRespChoice]\n created: int = Field(default_factory=lambda: int(time()))\n model: str\n object: str = \"text_completion\"\n usage: Optional[UsageStats] = None"
},
{
"identifier": "CompletionRespChoice",
"path": "OAI/types/completion.py",
"snippet": "class CompletionRespChoice(BaseModel):\n \"\"\"Represents a single choice in a completion response.\"\"\"\n\n # Index is 0 since we aren't using multiple choices\n index: int = 0\n finish_reason: str\n logprobs: Optional[LogProbs] = None\n text: str"
},
{
"identifier": "UsageStats",
"path": "OAI/types/common.py",
"snippet": "class UsageStats(BaseModel):\n \"\"\"Represents usage stats.\"\"\"\n\n prompt_tokens: int\n completion_tokens: int\n total_tokens: int"
},
{
"identifier": "LoraList",
"path": "OAI/types/lora.py",
"snippet": "class LoraList(BaseModel):\n \"\"\"Represents a list of Lora cards.\"\"\"\n\n object: str = \"list\"\n data: List[LoraCard] = Field(default_factory=list)"
},
{
"identifier": "LoraCard",
"path": "OAI/types/lora.py",
"snippet": "class LoraCard(BaseModel):\n \"\"\"Represents a single Lora card.\"\"\"\n\n id: str = \"test\"\n object: str = \"lora\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n scaling: Optional[float] = None"
},
{
"identifier": "ModelList",
"path": "OAI/types/model.py",
"snippet": "class ModelList(BaseModel):\n \"\"\"Represents a list of model cards.\"\"\"\n\n object: str = \"list\"\n data: List[ModelCard] = Field(default_factory=list)"
},
{
"identifier": "ModelCard",
"path": "OAI/types/model.py",
"snippet": "class ModelCard(BaseModel):\n \"\"\"Represents a single model card.\"\"\"\n\n id: str = \"test\"\n object: str = \"model\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n logging: Optional[LogPreferences] = None\n parameters: Optional[ModelCardParameters] = None"
},
{
"identifier": "unwrap",
"path": "utils.py",
"snippet": "def unwrap(wrapped, default=None):\n \"\"\"Unwrap function for Optionals.\"\"\"\n if wrapped is None:\n return default\n\n return wrapped"
}
] | import pathlib
from typing import Optional
from OAI.types.chat_completion import (
ChatCompletionMessage,
ChatCompletionRespChoice,
ChatCompletionStreamChunk,
ChatCompletionResponse,
ChatCompletionStreamChoice,
)
from OAI.types.completion import CompletionResponse, CompletionRespChoice
from OAI.types.common import UsageStats
from OAI.types.lora import LoraList, LoraCard
from OAI.types.model import ModelList, ModelCard
from utils import unwrap | 1,604 | """ Utility functions for the OpenAI server. """
def create_completion_response(
text: str,
prompt_tokens: int,
completion_tokens: int,
model_name: Optional[str],
):
"""Create a completion response from the provided text."""
choice = CompletionRespChoice(finish_reason="Generated", text=text)
response = CompletionResponse(
choices=[choice],
model=unwrap(model_name, ""),
usage=UsageStats(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
return response
def create_chat_completion_response(
text: str,
prompt_tokens: int,
completion_tokens: int,
model_name: Optional[str],
):
"""Create a chat completion response from the provided text."""
message = ChatCompletionMessage(role="assistant", content=text)
choice = ChatCompletionRespChoice(finish_reason="Generated", message=message)
response = ChatCompletionResponse(
choices=[choice],
model=unwrap(model_name, ""),
usage=UsageStats(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
return response
def create_chat_completion_stream_chunk(
const_id: str,
text: Optional[str] = None,
model_name: Optional[str] = None,
finish_reason: Optional[str] = None,
):
"""Create a chat completion stream chunk from the provided text."""
if finish_reason:
message = {}
else:
message = ChatCompletionMessage(role="assistant", content=text)
# The finish reason can be None
choice = ChatCompletionStreamChoice(finish_reason=finish_reason, delta=message)
chunk = ChatCompletionStreamChunk(
id=const_id, choices=[choice], model=unwrap(model_name, "")
)
return chunk
def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):
"""Get the list of models from the provided path."""
# Convert the provided draft model path to a pathlib path for
# equality comparisons
if draft_model_path:
draft_model_path = pathlib.Path(draft_model_path).resolve()
model_card_list = ModelList()
for path in model_path.iterdir():
# Don't include the draft models path
if path.is_dir() and path != draft_model_path:
model_card = ModelCard(id=path.name)
model_card_list.data.append(model_card) # pylint: disable=no-member
return model_card_list
def get_lora_list(lora_path: pathlib.Path):
"""Get the list of Lora cards from the provided path."""
lora_list = LoraList()
for path in lora_path.iterdir():
if path.is_dir():
| """ Utility functions for the OpenAI server. """
def create_completion_response(
text: str,
prompt_tokens: int,
completion_tokens: int,
model_name: Optional[str],
):
"""Create a completion response from the provided text."""
choice = CompletionRespChoice(finish_reason="Generated", text=text)
response = CompletionResponse(
choices=[choice],
model=unwrap(model_name, ""),
usage=UsageStats(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
return response
def create_chat_completion_response(
text: str,
prompt_tokens: int,
completion_tokens: int,
model_name: Optional[str],
):
"""Create a chat completion response from the provided text."""
message = ChatCompletionMessage(role="assistant", content=text)
choice = ChatCompletionRespChoice(finish_reason="Generated", message=message)
response = ChatCompletionResponse(
choices=[choice],
model=unwrap(model_name, ""),
usage=UsageStats(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
return response
def create_chat_completion_stream_chunk(
const_id: str,
text: Optional[str] = None,
model_name: Optional[str] = None,
finish_reason: Optional[str] = None,
):
"""Create a chat completion stream chunk from the provided text."""
if finish_reason:
message = {}
else:
message = ChatCompletionMessage(role="assistant", content=text)
# The finish reason can be None
choice = ChatCompletionStreamChoice(finish_reason=finish_reason, delta=message)
chunk = ChatCompletionStreamChunk(
id=const_id, choices=[choice], model=unwrap(model_name, "")
)
return chunk
def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):
"""Get the list of models from the provided path."""
# Convert the provided draft model path to a pathlib path for
# equality comparisons
if draft_model_path:
draft_model_path = pathlib.Path(draft_model_path).resolve()
model_card_list = ModelList()
for path in model_path.iterdir():
# Don't include the draft models path
if path.is_dir() and path != draft_model_path:
model_card = ModelCard(id=path.name)
model_card_list.data.append(model_card) # pylint: disable=no-member
return model_card_list
def get_lora_list(lora_path: pathlib.Path):
"""Get the list of Lora cards from the provided path."""
lora_list = LoraList()
for path in lora_path.iterdir():
if path.is_dir(): | lora_card = LoraCard(id=path.name) | 9 | 2023-11-10 05:54:02+00:00 | 4k |
zorazrw/filco | get_inputs.py | [
{
"identifier": "has_answer",
"path": "eval.py",
"snippet": "def has_answer(text: str, answers: list[str]) -> float:\n \"\"\"Check if text contains any of the answers.\"\"\"\n return float(any([(ans.lower() in text.lower()) for ans in answers]))"
},
{
"identifier": "load_dataset",
"path": "utils.py",
"snippet": "def load_dataset(path: str) -> list[dict]:\n \"\"\"Load dataset from JSON or JSONL file.\"\"\"\n if path.endswith(\".json\"):\n return json.load(open(path, \"r\"))\n elif path.endswith(\".jsonl\"):\n return [json.loads(line.strip()) for line in open(path, \"r\")]\n else:\n extension = path.split(\".\")[-1]\n raise ValueError(f\"File extension [{extension}] not valid.\")"
},
{
"identifier": "write_dataset",
"path": "utils.py",
"snippet": "def write_dataset(path: str, dataset: list[dict]):\n \"\"\"Write dataset to JSON or JSONL file.\"\"\"\n if path.endswith(\".json\"):\n json.dump(dataset, open(path, \"w\"))\n elif path.endswith(\".jsonl\"):\n with open(path, \"w\") as fw:\n for res_dict in dataset:\n fw.write(json.dumps(res_dict) + \"\\n\")\n else:\n extension = path.split(\".\")[-1]\n raise ValueError(f\"File extension [{extension}] not valid.\")"
}
] | import argparse
from eval import has_answer
from utils import load_dataset, write_dataset | 2,291 |
# ICT Example Creation Functions
def get_ict_io(
example: dict,
in_context_examples: list[dict],
input_list: list[str],
output_list: list[str],
no_prefix: bool = False,
filter_criteria: str = "strinc",
n_contexts: int = 1,
num_sents: int = None,
threshold: float = None,
question_prefix: str = "question",
answer_prefix: str = "answer",
context_prefix: str = "context",
) -> tuple[str, str]:
"""Get input and output texts with in-context examples."""
ict_io_list = []
for example in in_context_examples:
itext, otext = get_example_io(
example,
input_list,
output_list,
n_contexts=n_contexts,
num_sents=num_sents,
threshold=threshold,
filter_criteria=filter_criteria,
question_prefix=question_prefix,
answer_prefix=answer_prefix,
context_prefix=context_prefix,
)
ict_io_list.append("\n".join([itext, otext]))
input_text, output_text = get_example_io(
example,
input_list,
output_list,
n_contexts=n_contexts,
num_sents=num_sents,
threshold=threshold,
filter_criteria=filter_criteria,
question_prefix=question_prefix,
answer_prefix=answer_prefix,
context_prefix=context_prefix,
)
if no_prefix:
prefix = ""
else:
input_text_list = []
for ii in input_list:
if (ii == "filtered") or (ii == "passage"):
input_text_list.append(context_prefix)
elif ii == "question":
input_text_list.append(question_prefix)
else:
input_text_list.append(ii)
output_text_list = []
for oo in output_list:
if oo == "filtered":
output_text_list.append(
f"most helpful sentence in the {context_prefix}"
)
elif oo == "answer":
if answer_prefix == "response":
output_text_list.append("response to the query")
elif answer_prefix == "judgement":
output_text_list.append("judgement to the claim")
else:
output_text_list.append("answer to the question")
if len(output_text_list) == 1:
prefix = f"Given the {input_text_list}, predict the {output_text_list[0]}."
else:
prefix = (
f"Given the {input_text_list}, "
f"predict the {output_text_list[0]} first, "
f"then predict the {output_text_list[1]}."
)
if question_prefix == "claim" and answer_prefix == "judgement":
prefix += (
"('SUPPORTS' or 'REFUTES')\n"
"If the 'context' does not provide enough information "
"to judge the claim, use your own knowledge instead."
)
full_input_text = "\n\n".join([prefix] + ict_io_list + [input_text])
return full_input_text.strip(), output_text.strip()
def main():
"""Run the main data processing function."""
dataset = load_dataset(args.dataset_path)
N = len(dataset)
def get_examples(index: int, n_examples: int) -> list[int]:
"""Get indices of in-context examples."""
indices = [(index - i - 1) % N for i in range(n_examples)]
return [dataset[i] for i in indices]
procset = []
for idx, ex in enumerate(dataset):
input_text, output_text = get_ict_io(
example=ex,
in_context_examples=get_examples(idx, args.n_examples),
input_list=args.input_list,
output_list=args.output_list,
no_prefix=args.no_prefix,
filter_criteria=args.filter_criteria,
n_contexts=args.n_contexts,
num_sents=args.num_sents,
threshold=args.threshold,
question_prefix=args.question_prefix,
answer_prefix=args.answer_prefix,
context_prefix=args.context_prefix,
)
procset.append({"input": input_text, "output": output_text})
| """Create I/O to Evaluate/Train Models.
Default I/O for Context Filtering: [i] question context [o] sent
Default I/O for Output Generation: [i] sent question [o] answer
"""
# Individual Components
QUESTION_PREFIX = "question"
ANSWER_PREFIX = "answer"
CONTEXT_PREFIX = "context"
prefix_format = "{}: {}"
def get_question(
example: dict,
question_prefix: str = QUESTION_PREFIX,
add_prefix: bool = True,
) -> str:
"""Get the question from the example."""
question = example["question"]
if add_prefix:
question = prefix_format.format(question_prefix, question)
return question
def get_context(
example: dict,
n_contexts: int = 1,
context_prefix: str = CONTEXT_PREFIX,
add_prefix: bool = True,
) -> str:
"""Get the context from the example."""
context_list = [ctx["text"] for ctx in example["ctxs"][:n_contexts]]
context = '\n'.join(context_list)
if add_prefix:
context = prefix_format.format(context_prefix, context)
return context
def get_sent(
example: dict,
n_contexts: int = 1,
criteria: str = "strinc",
num_sents: int = None,
threshold: float = None,
) -> str:
"""Get the best sentence from contexts."""
sentences = []
if threshold is None:
threshold = 0.0
for idx in range(n_contexts):
if criteria == "strinc":
for sent_dict in example["ctxs"][idx]["sentences"]:
if sent_dict[criteria] is True:
sentences.append(sent_dict["text"])
# break
else:
if num_sents is None:
num_sents = len(example["ctxs"][idx]["sentences"])
ctx_sents = sorted(
example["ctxs"][idx]["sentences"],
key=lambda x: -x[criteria]
)
sentences.extend([
s["text"] for s in ctx_sents[: num_sents]
if s[criteria] >= threshold
])
sent_text = " ".join(sentences)
return sent_text
def get_answer(
example: dict,
answer_prefix: str = ANSWER_PREFIX,
find_best: bool = True,
n_contexts: int = 1,
add_prefix: bool = True,
) -> str:
"""Find the answer index that best possibly in the context.
Using the top-1 retrieved context by default.
"""
if find_best:
for idx in range(n_contexts):
context = example["ctxs"][idx]["text"].lower()
answer_exists = [
has_answer(context, [ans.lower()]) for ans in example["answers"]
]
if any(answer_exists):
answer_text = example["answers"][answer_exists.index(True)]
break
else:
answer_text = example["answers"][0]
else:
answer_text = example["answers"][0]
if add_prefix:
answer_text = prefix_format.format(answer_prefix, answer_text)
return answer_text
# Example Creation Functions
def get_example_io(
example: dict,
input_list: list[str],
output_list: list[str],
n_contexts: int = 1,
num_sents: int = None,
threshold: float = None,
filter_criteria: str = "strinc",
question_prefix: str = "question",
answer_prefix: str = "answer",
context_prefix: str = "context",
) -> tuple[str, str]:
"""Get input and output texts for the given example."""
input_text_list, output_text_list = [], []
for inp in input_list:
if inp == "question":
input_text_list.append(
get_question(example, question_prefix=question_prefix)
)
elif inp == "passage":
input_text_list.append(get_context(example, n_contexts, context_prefix=context_prefix))
elif inp == "filtered":
sent = get_sent(
example=example,
n_contexts=n_contexts,
criteria=filter_criteria,
num_sents=num_sents,
threshold=threshold,
)
if not sent.strip():
sent = get_context(example, context_prefix=context_prefix)
else:
sent = prefix_format.format(CONTEXT_PREFIX, sent)
input_text_list.append(sent)
else:
raise ValueError(f"Invalid input type {inp}")
input_text = "\n".join(input_text_list)
for out in output_list:
if out == "answer":
output_text_list.append(
get_answer(
example,
answer_prefix=answer_prefix,
n_contexts=n_contexts,
)
)
elif out == "filtered":
output_text_list.append(
get_sent(
example=example,
n_contexts=n_contexts,
criteria=filter_criteria,
num_sents=num_sents,
threshold=threshold,
)
)
else:
raise ValueError(f"Invalid output type {out}")
output_text = "\n".join(output_text_list)
return input_text, output_text
# ICT Example Creation Functions
def get_ict_io(
example: dict,
in_context_examples: list[dict],
input_list: list[str],
output_list: list[str],
no_prefix: bool = False,
filter_criteria: str = "strinc",
n_contexts: int = 1,
num_sents: int = None,
threshold: float = None,
question_prefix: str = "question",
answer_prefix: str = "answer",
context_prefix: str = "context",
) -> tuple[str, str]:
"""Get input and output texts with in-context examples."""
ict_io_list = []
for example in in_context_examples:
itext, otext = get_example_io(
example,
input_list,
output_list,
n_contexts=n_contexts,
num_sents=num_sents,
threshold=threshold,
filter_criteria=filter_criteria,
question_prefix=question_prefix,
answer_prefix=answer_prefix,
context_prefix=context_prefix,
)
ict_io_list.append("\n".join([itext, otext]))
input_text, output_text = get_example_io(
example,
input_list,
output_list,
n_contexts=n_contexts,
num_sents=num_sents,
threshold=threshold,
filter_criteria=filter_criteria,
question_prefix=question_prefix,
answer_prefix=answer_prefix,
context_prefix=context_prefix,
)
if no_prefix:
prefix = ""
else:
input_text_list = []
for ii in input_list:
if (ii == "filtered") or (ii == "passage"):
input_text_list.append(context_prefix)
elif ii == "question":
input_text_list.append(question_prefix)
else:
input_text_list.append(ii)
output_text_list = []
for oo in output_list:
if oo == "filtered":
output_text_list.append(
f"most helpful sentence in the {context_prefix}"
)
elif oo == "answer":
if answer_prefix == "response":
output_text_list.append("response to the query")
elif answer_prefix == "judgement":
output_text_list.append("judgement to the claim")
else:
output_text_list.append("answer to the question")
if len(output_text_list) == 1:
prefix = f"Given the {input_text_list}, predict the {output_text_list[0]}."
else:
prefix = (
f"Given the {input_text_list}, "
f"predict the {output_text_list[0]} first, "
f"then predict the {output_text_list[1]}."
)
if question_prefix == "claim" and answer_prefix == "judgement":
prefix += (
"('SUPPORTS' or 'REFUTES')\n"
"If the 'context' does not provide enough information "
"to judge the claim, use your own knowledge instead."
)
full_input_text = "\n\n".join([prefix] + ict_io_list + [input_text])
return full_input_text.strip(), output_text.strip()
def main():
"""Run the main data processing function."""
dataset = load_dataset(args.dataset_path)
N = len(dataset)
def get_examples(index: int, n_examples: int) -> list[int]:
"""Get indices of in-context examples."""
indices = [(index - i - 1) % N for i in range(n_examples)]
return [dataset[i] for i in indices]
procset = []
for idx, ex in enumerate(dataset):
input_text, output_text = get_ict_io(
example=ex,
in_context_examples=get_examples(idx, args.n_examples),
input_list=args.input_list,
output_list=args.output_list,
no_prefix=args.no_prefix,
filter_criteria=args.filter_criteria,
n_contexts=args.n_contexts,
num_sents=args.num_sents,
threshold=args.threshold,
question_prefix=args.question_prefix,
answer_prefix=args.answer_prefix,
context_prefix=args.context_prefix,
)
procset.append({"input": input_text, "output": output_text})
| write_dataset(args.output_path, procset) | 2 | 2023-11-14 21:18:30+00:00 | 4k |
ShipBit/wingman-ai | gui/sections/context_runner.py | [
{
"identifier": "Icon",
"path": "gui/components/icon.py",
"snippet": "class Icon(ctk.CTkImage):\n def __init__(self, icon: str, size: int | tuple[int, int]=50, themed=True):\n if isinstance(size, int):\n size = (size, size)\n\n icon_dir = path.join(path.abspath(path.dirname(__file__)), \"..\", \"..\", \"assets\", \"icons\")\n super().__init__(light_image=Image.open(path.join(icon_dir, f\"{icon}{'_light' if themed else ''}.png\")),\n dark_image=Image.open(path.join(icon_dir, f\"{icon}{'_dark' if themed else ''}.png\")),\n size=size)"
},
{
"identifier": "WingmenList",
"path": "gui/components/wingmen_list.py",
"snippet": "class WingmenList(ctk.CTkFrame):\n def __init__(self, master, wingmen, broken=False, **kwargs):\n super().__init__(master, **kwargs)\n\n self.grid_columnconfigure(2, weight=1)\n self.grid_rowconfigure(0, weight=1)\n\n count = len(wingmen)\n self.wingmen_list = []\n\n if count:\n if broken:\n intro = f\"Looks like {count} of your Wingmen {'is' if count == 1 else 'are'} not operational:\"\n else:\n intro = f\"You currently have {count} Wingm{'a' if count == 1 else 'e'}n registered\"\n self.header_label = ctk.CTkLabel(\n self, text=\"Push-to-talk Key\", text_color=(\"gray40\", \"gray55\")\n )\n self.header_label.grid(row=1, column=0, padx=10)\n self.header_label2 = ctk.CTkLabel(\n self, text=\"Wingman to activate\", text_color=(\"gray40\", \"gray55\")\n )\n self.header_label2.grid(row=1, column=2, padx=10, sticky=\"w\")\n self.intro_label = ctk.CTkLabel(self, text=intro)\n self.intro_label.grid(row=0, column=0, columnspan=4, padx=10, pady=10)\n\n\n for i, wingman in enumerate(wingmen):\n row_index = i + 2\n\n key = wingman[\"name\"] if broken else wingman.get_record_key()\n key_label = ctk.CTkLabel(self, text=key)\n key_label.grid(row=row_index, column=0, padx=10)\n\n delimiter_label = ctk.CTkLabel(self, text=\"〉\")\n delimiter_label.grid(row=row_index, column=1, padx=10)\n\n value = wingman[\"error\"] if broken else wingman.name\n value_label = ctk.CTkLabel(self, text=value)\n value_label.grid(row=row_index, column=2, padx=10, sticky=\"w\")\n self.wingmen_list.append([key_label, delimiter_label, value_label])\n else:\n if not broken:\n self.warning_label = ctk.CTkLabel(self, text=\"⚠️ WARNING ⚠️\")\n self.warning_label.grid(row=0, column=1, padx=10, pady=10)\n self.warning_msg = ctk.CTkLabel(\n self,\n text=\"Seems like you don't have any functional wingmen registered.\",\n )\n self.warning_msg.grid(row=1, column=1, padx=10, pady=0)"
},
{
"identifier": "Printr",
"path": "services/printr.py",
"snippet": "class Printr(object):\n _instance = None\n\n LILA = \"\\033[95m\"\n BLUE = \"\\033[94m\"\n CYAN = \"\\033[96m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n CLEAR = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n FAINT = \"\\033[2m\"\n NORMAL_WEIGHT = \"\\033[22m\"\n UNDERLINE = \"\\033[4m\"\n END_UNDERLINE = \"\\033[24m\"\n OVERLINE = \"\\033[53m\"\n END_OVERLINE = \"\\033[55m\"\n FRAMED = \"\\033[51m\"\n ENCIRCLED = \"\\033[52m\"\n DELETE_LINE = \"\\033[2K\\033[1G\"\n PREVIOUS_LINE = \"\\033[2F\"\n\n tags = [\n # {\"tagName\": \"bold\", \"font\": \"TkTextFont bold\"},\n {\"tagName\": \"info\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"warn\", \"foreground\": \"orange\"},\n {\"tagName\": \"err\", \"foreground\": \"red\"},\n\n {\"tagName\": \"green\", \"foreground\": \"#33cc33\"},\n {\"tagName\": \"blue\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"violet\", \"foreground\": \"#aa33dd\"},\n {\"tagName\": \"grey\", \"foreground\": \"grey\"}\n ]\n\n CHANNEL = Literal[\"main\", \"error\", \"warning\", \"info\"]\n OUTPUT_TYPES = None | ctk.StringVar | ctk.CTkTextbox\n\n _message_stacks: dict[CHANNEL, list] = dict(\n main=[],\n error=[],\n warning=[],\n info=[]\n )\n\n # NOTE this is a singleton class\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super(Printr, cls).__new__(cls)\n\n cls.out: dict[Printr.CHANNEL, Printr.OUTPUT_TYPES ] = dict(\n main=None,\n error=None,\n warning=None,\n info=None\n )\n return cls._instance\n\n\n def set_output(self, output_channel: CHANNEL, output_element: OUTPUT_TYPES):\n if isinstance(output_element, ctk.CTkTextbox):\n for tag in self.tags:\n output_element.tag_config(**tag)\n\n self.out[output_channel] = output_element\n\n msg_stack = self._message_stacks.get(output_channel, [])\n if len(msg_stack) > 0:\n msg = \"\\n\".join(msg_stack)\n self.print(msg, output_channel)\n # TODO: clear stack?\n for _ in range(len(msg_stack)):\n msg_stack.pop()\n\n\n\n def print(self, text, output_channel: CHANNEL = \"main\", tags=None, wait_for_gui=False, console_only=False):\n channel = self.out.get(output_channel, None)\n if channel and not console_only:\n if isinstance(channel, ctk.CTkTextbox):\n channel.configure(state=\"normal\")\n channel.insert(\"end\", f\"{text}\\n\", tags=tags)\n channel.see(\"end\")\n channel.configure(state=\"disabled\")\n else:\n # output type -> StringVar\n channel.set(text)\n elif wait_for_gui and not console_only:\n # message should only be shown in GUI\n # so add it to the queue to wait for GUI initialization\n self._message_stacks.get(output_channel, []).append(text)\n else:\n # no special output type -> terminal output\n print(text)\n\n\n def print_err(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"error\", wait_for_gui=wait_for_gui)\n\n def print_warn(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"warning\", wait_for_gui=wait_for_gui)\n\n def print_info(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"info\", wait_for_gui=wait_for_gui)\n\n\n @staticmethod\n def clr(text, color_format):\n return f\"{color_format}{text}{Printr.CLEAR}\"\n\n @staticmethod\n def clr_print(text, color_format):\n print(Printr.clr(text, color_format))\n\n @staticmethod\n def sys_print(text, headline=\"\", color=RED, first_message=True):\n if first_message:\n print(\"\")\n if headline.strip():\n print(\n Printr.clr(f\"{Printr.BOLD}{headline}{Printr.NORMAL_WEIGHT}\", color)\n )\n else:\n print(Printr.PREVIOUS_LINE)\n print(Printr.clr(f\"⎢ {text}\", color))\n print(\"\")\n\n @staticmethod\n def err_print(text, first_message=True):\n Printr.sys_print(text, \"Something went wrong!\", first_message=first_message)\n\n @staticmethod\n def warn_print(text, first_message=True):\n Printr.sys_print(text, \"Please note:\", Printr.YELLOW, first_message)\n\n @staticmethod\n def info_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.BLUE, first_message)\n\n @staticmethod\n def hl_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.CYAN, first_message)\n\n @staticmethod\n def override_print(text):\n print(f\"{Printr.DELETE_LINE}{text}\")\n\n @staticmethod\n def box_start():\n print(\n f\"{Printr.CYAN}⎡{Printr.OVERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_OVERLINE}⎤\"\n )\n print(f\"⎢{Printr.CLEAR}\")\n\n @staticmethod\n def box_end():\n print(f\"{Printr.CYAN}⎢\")\n print(\n f\"⎣{Printr.UNDERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_UNDERLINE}⎦{Printr.CLEAR}\"\n )\n\n @staticmethod\n def box_print(text):\n print(f\"{Printr.CYAN}⎜{Printr.CLEAR} {text}\")"
}
] | import customtkinter as ctk
from gui.components.icon import Icon
from gui.components.wingmen_list import WingmenList
from services.printr import Printr | 3,145 |
printr = Printr()
class ContextRunner(ctk.CTkFrame):
def __init__(self, master, context="", **kwargs):
super().__init__(master, **kwargs)
self.core = master.core
self.core.load_context(context)
self.status_var = ctk.StringVar(self, "Inactive", "status")
tower = self.core.tower
auto_run = self.core.config_manager.gui_config.get("auto-run", "off") == "on"
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(3, weight=1)
context_title = (
context.title().replace("_", " ").strip() if context else "Default"
)
self.title = ctk.CTkLabel(
self,
text=context_title,
font=("TkHeadingFont", 20, "bold"),
text_color="#EB154D",
)
self.title.grid(row=0, column=0, padx=20, pady=10, sticky="w")
# TODO: Make this a component
self.status = ctk.CTkLabel(
self,
textvariable=self.status_var,
anchor="w",
fg_color=("grey70", "grey30"),
corner_radius=10,
width=65,
pady=3,
)
self.status.grid(row=0, column=0, padx=20, pady=10, sticky="e")
self.status_icon_active = Icon("state_active", 16, False)
self.status_icon_inactive = Icon("state_inactive", 16, False)
self.status_led = ctk.CTkLabel(
self, image=self.status_icon_inactive, text="", fg_color="transparent"
)
self.status_led.grid(row=0, column=0, padx=95, pady=10, sticky="e")
wingmen = []
if tower:
wingmen = tower.get_wingmen()
|
printr = Printr()
class ContextRunner(ctk.CTkFrame):
def __init__(self, master, context="", **kwargs):
super().__init__(master, **kwargs)
self.core = master.core
self.core.load_context(context)
self.status_var = ctk.StringVar(self, "Inactive", "status")
tower = self.core.tower
auto_run = self.core.config_manager.gui_config.get("auto-run", "off") == "on"
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(3, weight=1)
context_title = (
context.title().replace("_", " ").strip() if context else "Default"
)
self.title = ctk.CTkLabel(
self,
text=context_title,
font=("TkHeadingFont", 20, "bold"),
text_color="#EB154D",
)
self.title.grid(row=0, column=0, padx=20, pady=10, sticky="w")
# TODO: Make this a component
self.status = ctk.CTkLabel(
self,
textvariable=self.status_var,
anchor="w",
fg_color=("grey70", "grey30"),
corner_radius=10,
width=65,
pady=3,
)
self.status.grid(row=0, column=0, padx=20, pady=10, sticky="e")
self.status_icon_active = Icon("state_active", 16, False)
self.status_icon_inactive = Icon("state_inactive", 16, False)
self.status_led = ctk.CTkLabel(
self, image=self.status_icon_inactive, text="", fg_color="transparent"
)
self.status_led.grid(row=0, column=0, padx=95, pady=10, sticky="e")
wingmen = []
if tower:
wingmen = tower.get_wingmen() | self.wingmen_list = WingmenList(self, wingmen=wingmen) | 1 | 2023-11-15 09:36:06+00:00 | 4k |
OliverMao/FlaskAutoApiBuilder | Faab/FaabFunction.py | [
{
"identifier": "login_required",
"path": "Faab/FaabJWT.py",
"snippet": "def login_required(f):\n \"\"\"\n 使用functools模块的wraps装饰内部函数\n \"\"\"\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n try:\n if g.username == -1:\n # print('error1')\n return {'message': 'token已失效'}, 401\n elif g.username == -2:\n # print('error2')\n return {'message': 'token认证失败'}, 401\n elif g.username == -3:\n # print('error3')\n return {'message': '非法的token'}, 401\n else:\n return f(*args, **kwargs)\n except Exception as e:\n print(e)\n return {'message': '请先登录认证.'}, 401\n\n '第2种方法,在返回内部函数之前,先修改wrapper的name属性'\n # wrapper.__name__ = f.__name__\n return wrapper"
},
{
"identifier": "db",
"path": "Faab/extensions.py",
"snippet": ""
}
] | import json
import pandas as pd
import io
from functools import wraps
from flasgger import swag_from
from flask import request, g, send_file
from sqlalchemy import and_
from sqlalchemy.orm import class_mapper
from flask_sqlalchemy import SQLAlchemy
from Faab.FaabJWT import login_required
from Faab.extensions import db | 2,096 | return wrapper
# noinspection ALL
def check_request_turn(func):
# noinspection PyTypeChecker
@wraps(func)
def wrapper(self, *args, **kwargs):
form = request.json
need_update = form.get('need_update')
condition = form.get('condition')
for key, value in condition.items():
if key == "_Own":
continue
exists = self.check_parameter_exists(key)
if not exists:
return {'error': 'a参数错误', 'code': 11}
for key, value in need_update.items():
exists = self.check_parameter_exists(key)
if not exists:
return {'error': 'b参数错误', 'code': 10}
# noinspection PyCallingNonCallable
return func(self, *args, **kwargs)
return wrapper
def check_parameter_exists(self, parameter):
mapper = class_mapper(self.model)
return hasattr(mapper.column_attrs, parameter)
@swag_from('swag_config/get.yml')
def get(self):
params = dict(request.args)
_Not_Filter = False
if '_Not_Filter' in params:
_Not_Filter = json.loads(params.pop('_Not_Filter'))
if '_Own' in params:
name = params.get('_Own')
params[name] = g.username
params.pop('_Own')
if '_Pagination' not in params:
if '_Desc' not in params:
query = self.model.query.filter_by(is_delete=0)
else:
query = self.model.query.filter_by(is_delete=0).order_by(self.model.id.desc())
params.pop('_Desc')
filters = []
if '_Search' in params:
key = params.pop('_Search')
value = params.pop('_Search_value')
filters.append(getattr(self.model, key).like('%' + value + '%'))
for key, value in params.items():
filters.append(getattr(self.model, key) == value)
if _Not_Filter != False:
filters.append(getattr(self.model, _Not_Filter['key']) != _Not_Filter['value'])
if filters:
query = query.filter(and_(*filters))
lists = query.all()
return self.list_to_return(lists)
else:
params.pop('_Pagination')
page = int(params.pop('page'))
per_page = int(params.pop('per_page'))
if '_Desc' not in params:
query = self.model.query.filter_by(is_delete=0)
else:
query = self.model.query.filter_by(is_delete=0).order_by(self.model.id.desc())
params.pop('_Desc')
filters = []
if '_Search' in params:
key = params.pop('_Search')
value = params.pop('_Search_value')
filters.append(getattr(self.model, key).like('%' + value + '%'))
for key, value in params.items():
filters.append(getattr(self.model, key) == value)
if _Not_Filter != False:
filters.append(getattr(self.model, _Not_Filter['key']) != _Not_Filter['value'])
if filters:
query = query.filter(and_(*filters))
lists = query.paginate(page=page, per_page=per_page, error_out=False)
items = lists.items
has_next = lists.has_next
has_prev = lists.has_prev
total = lists.total
pages = lists.pages
return {'items': self.list_to_return(items), 'has_next': has_next, 'has_prev': has_prev, 'total': total,
'pages': pages}
@swag_from('swag_config/get_one.yml')
def get_one(self):
params = dict(request.args)
if '_Own' in params:
name = params.get('_Own')
params[name] = g.username
params.pop('_Own')
filters = []
query = self.model.query.filter_by(is_delete=0)
for key, value in params.items():
filters.append(getattr(self.model, key) == value)
if filters:
query = query.filter(and_(*filters))
item = query.first()
return self.one_to_return(item)
@swag_from('swag_config/post.yml')
def post(self):
sets = request.json
new_item = self.model()
for key, value in sets.items():
setattr(new_item, key, value)
try:
|
# ......
class AutoUrl:
def __init__(self, add_url_list):
for i in add_url_list:
AutoDB(i["model"], i["bp"], i["url_prefix"])
class AutoDB:
model = {}
bp = object
url_name = ""
def __init__(self, model, bp, url_name):
self.model = model
self.bp = bp
self.url_name = url_name
self.bp.add_url_rule('/' + url_name + '/get', endpoint=bp.name + url_name + 'get',
view_func=self.get,
methods=['GET'])
self.bp.add_url_rule('/' + url_name + '/get_one', endpoint=bp.name + url_name + 'get_one',
view_func=self.get_one,
methods=['GET'])
self.bp.add_url_rule('/' + url_name + '/post', endpoint=bp.name + url_name + 'post',
view_func=self.post,
methods=['POST'])
self.bp.add_url_rule('/' + url_name + '/delete/<int:one_or_list>/<int:true_del_or_false_del>',
endpoint=bp.name + url_name + 'delete', view_func=self.delete,
methods=['POST'])
self.bp.add_url_rule('/' + url_name + '/put', endpoint=bp.name + url_name + 'put',
view_func=self.put,
methods=['POST'])
self.bp.add_url_rule('/' + url_name + '/export', endpoint=bp.name + url_name + 'export',
view_func=self.export,
methods=['POST'])
def list_to_return(self, get_list):
"""
FuncName:列表转返回值
Parameter:查询出的结果
Return:Http返回值
"""
result = []
for item in get_list:
data = {}
for col in class_mapper(self.model).mapped_table.c:
value = str(getattr(item, col.name))
if value != 'None':
data[col.name] = value
else:
continue
result.append(data)
return result
def one_to_return(self, info):
"""
FuncName:单个数据转返回值
Parameter:查询出的结果
Return:Http返回值
"""
data = {}
if info:
for col in class_mapper(self.model).mapped_table.c:
value = str(getattr(info, col.name))
if value != 'None':
data[col.name] = value
else:
continue
return data
else:
return {}
# noinspection ALL
def check_request_delete(func):
# noinspection PyTypeChecker
@wraps(func)
def wrapper(self, *args, **kwargs):
params = request.json
for key, value in params.items():
exists = self.check_parameter_exists(key)
if not exists:
return {'error': '参数错误', 'code': 0}
# noinspection PyCallingNonCallable
return func(self, *args, **kwargs)
return wrapper
# noinspection ALL
def check_request_export(func):
# noinspection PyTypeChecker
@wraps(func)
def wrapper(self, *args, **kwargs):
form = request.json
need_export = form.get('need_export')
condition = form.get('condition')
for key, value in condition.items():
if key == "_Own" or key == "_Price":
continue
exists = self.check_parameter_exists(key)
if not exists:
return {'error': 'a参数错误', 'code': 11}
for key, value in need_export.items():
exists = self.check_parameter_exists(key)
if not exists:
return {'error': 'b参数错误', 'code': 10}
# noinspection PyCallingNonCallable
return func(self, *args, **kwargs)
return wrapper
# noinspection ALL
def check_request_turn(func):
# noinspection PyTypeChecker
@wraps(func)
def wrapper(self, *args, **kwargs):
form = request.json
need_update = form.get('need_update')
condition = form.get('condition')
for key, value in condition.items():
if key == "_Own":
continue
exists = self.check_parameter_exists(key)
if not exists:
return {'error': 'a参数错误', 'code': 11}
for key, value in need_update.items():
exists = self.check_parameter_exists(key)
if not exists:
return {'error': 'b参数错误', 'code': 10}
# noinspection PyCallingNonCallable
return func(self, *args, **kwargs)
return wrapper
def check_parameter_exists(self, parameter):
mapper = class_mapper(self.model)
return hasattr(mapper.column_attrs, parameter)
@swag_from('swag_config/get.yml')
def get(self):
params = dict(request.args)
_Not_Filter = False
if '_Not_Filter' in params:
_Not_Filter = json.loads(params.pop('_Not_Filter'))
if '_Own' in params:
name = params.get('_Own')
params[name] = g.username
params.pop('_Own')
if '_Pagination' not in params:
if '_Desc' not in params:
query = self.model.query.filter_by(is_delete=0)
else:
query = self.model.query.filter_by(is_delete=0).order_by(self.model.id.desc())
params.pop('_Desc')
filters = []
if '_Search' in params:
key = params.pop('_Search')
value = params.pop('_Search_value')
filters.append(getattr(self.model, key).like('%' + value + '%'))
for key, value in params.items():
filters.append(getattr(self.model, key) == value)
if _Not_Filter != False:
filters.append(getattr(self.model, _Not_Filter['key']) != _Not_Filter['value'])
if filters:
query = query.filter(and_(*filters))
lists = query.all()
return self.list_to_return(lists)
else:
params.pop('_Pagination')
page = int(params.pop('page'))
per_page = int(params.pop('per_page'))
if '_Desc' not in params:
query = self.model.query.filter_by(is_delete=0)
else:
query = self.model.query.filter_by(is_delete=0).order_by(self.model.id.desc())
params.pop('_Desc')
filters = []
if '_Search' in params:
key = params.pop('_Search')
value = params.pop('_Search_value')
filters.append(getattr(self.model, key).like('%' + value + '%'))
for key, value in params.items():
filters.append(getattr(self.model, key) == value)
if _Not_Filter != False:
filters.append(getattr(self.model, _Not_Filter['key']) != _Not_Filter['value'])
if filters:
query = query.filter(and_(*filters))
lists = query.paginate(page=page, per_page=per_page, error_out=False)
items = lists.items
has_next = lists.has_next
has_prev = lists.has_prev
total = lists.total
pages = lists.pages
return {'items': self.list_to_return(items), 'has_next': has_next, 'has_prev': has_prev, 'total': total,
'pages': pages}
@swag_from('swag_config/get_one.yml')
def get_one(self):
params = dict(request.args)
if '_Own' in params:
name = params.get('_Own')
params[name] = g.username
params.pop('_Own')
filters = []
query = self.model.query.filter_by(is_delete=0)
for key, value in params.items():
filters.append(getattr(self.model, key) == value)
if filters:
query = query.filter(and_(*filters))
item = query.first()
return self.one_to_return(item)
@swag_from('swag_config/post.yml')
def post(self):
sets = request.json
new_item = self.model()
for key, value in sets.items():
setattr(new_item, key, value)
try: | db.session.add(new_item) | 1 | 2023-11-10 09:25:44+00:00 | 4k |
mattyamonaca/LCM_i2i_PoC | config.py | [
{
"identifier": "get_pipe",
"path": "lcm.py",
"snippet": "def get_pipe(config):\n vae_model_path = config.vae_model_path.get()\n vae_model_path = vae_model_path.replace(\"\\\\\", \"/\")\n LoRA_model_path = config.LoRA_model_path.get()\n LoRA_model_path = LoRA_model_path.replace(\"\\\\\", \"/\")\n\n if config.vae_model_path.get() != \"\":\n pipe = AutoPipelineForImage2Image.from_pretrained(\n config.generation_model_name.get(), torch_dtype=torch.float16, use_safetensors=True,\n vae = AutoencoderKL.from_single_file(vae_model_path, torch_dtype=torch.float16)\n ).to(\"cuda\")\n else:\n pipe = AutoPipelineForImage2Image.from_pretrained(\n config.generation_model_name.get(), torch_dtype=torch.float16, use_safetensors=True,\n ).to(\"cuda\")\n\n if config.LoRA_model_path.get() != \"\":\n pipe.load_lora_weights(LoRA_model_path, adapter_name=\"LoRA\")\n pipe.load_lora_weights(config.lcm_model_name.get(), adapter_name=\"lcm\")\n pipe.set_adapters([\"LoRA\", \"lcm\"], adapter_weights=[config.LoRAstrength_value, 1.0])\n pipe.fuse_lora()\n else:\n pipe.load_lora_weights(config.lcm_model_name.get(), adapter_name=\"lcm\")\n\n pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)\n\n if pipe.safety_checker is not None:\n pipe.safety_checker = lambda images, **kwargs: (images, [False])\n\n return pipe"
},
{
"identifier": "LCM_run",
"path": "lcm.py",
"snippet": "def LCM_run(config, pipe):\n global img # imgをグローバル変数として使用します\n\n window_name = \"Window Capture\"\n cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\n cv2.setWindowProperty(window_name, cv2.WND_PROP_TOPMOST, 1)\n config.running = True\n listener = keyboard.Listener(on_press=on_press)\n listener.start() # キーボードリスナーを開始\n\n try:\n while config.running:\n screenshot = config.screen_capture.capture()\n input_img_np = np.array(screenshot)\n\n generator = torch.Generator(\"cuda\").manual_seed(2500)\n input_img = Image.fromarray(input_img_np)\n\n img_pil = pipe(\n strength=config.strength_value,\n prompt=config.prompt.get(),\n image=input_img,\n num_inference_steps=config.num_inference_steps_value,\n guidance_scale=1,\n generator=generator\n ).images[0]\n\n img = np.array(img_pil)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imshow(window_name, img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n finally:\n cv2.destroyAllWindows()\n listener.stop() # キーボードリスナーを停止する\n listener.join() # リスナーの完全な停止を待つ# キーボードリスナーを停止"
},
{
"identifier": "ScreenCapture",
"path": "capture.py",
"snippet": "class ScreenCapture:\n\n def __init__(self):\n self.root = tk.Tk()\n self.root.attributes('-fullscreen', True)\n self.root.attributes('-topmost', True)\n self.root.attributes('-alpha', 0.5)\n self.root.configure(bg='white')\n self.root.bind('<Escape>', lambda e: self.root.quit())\n\n self.start_position = None\n self.selection = None\n\n def on_click(self, x, y, button, pressed):\n if pressed:\n self.start_position = (x, y)\n self.selection = tk.Canvas(self.root, cursor=\"cross\", bg='black')\n self.selection.place(x=x, y=y, width=1, height=1)\n else:\n self.root.quit()\n\n def on_drag(self, x, y):\n if self.start_position:\n self.selection.place_configure(width=max(1, x - self.start_position[0]), height=max(1, y - self.start_position[1]))\n\n\n def capture(self):\n screenshot = ImageGrab.grab(bbox=(self.x0, self.y0, self.x1, self.y1))\n #screenshot = ImageGrab.grab(bbox=(self.start_position[0], self.start_position[1], self.end_position[0], self.end_position[1]))\n return screenshot\n\n\n def listen(self):\n # マウスリスナーの開始\n listener = mouse.Listener(on_click=self.on_click, on_move=self.on_drag)\n listener.start()\n\n self.root.mainloop()\n listener.stop()\n self.x0, self.y0 = self.start_position\n self.x1, self.y1 = self.selection.winfo_x() + self.selection.winfo_width(), self.selection.winfo_y() + self.selection.winfo_height()"
}
] | from diffusers.utils import load_image
from tkinter import ttk
from lcm import get_pipe, LCM_run
from capture import ScreenCapture
import tkinter as tk
import threading | 2,270 |
class ConfigWindow:
def __init__(self):
master = tk.Tk()
self.run_thread = None
self.running = False
self.master = master
master.title("Configuration")
master.geometry("400x500") # ウィンドウサイズを設定
master.attributes("-topmost", True) # ウィンドウサイズを最前列固定
style = ttk.Style()
style.configure("TLabel", font=("Arial", 12))
style.configure("TEntry", padding=5)
style.configure("TButton", padding=5, font=("Arial", 10))
# LCMモデル名
ttk.Label(master, text="LCMモデル名").grid(row=0, column=0, padx=10, pady=10, sticky="w")
self.lcm_model_name = tk.StringVar(value="latent-consistency/lcm-lora-sdv1-5")
ttk.Entry(master, textvariable=self.lcm_model_name, width=30).grid(row=0, column=1, padx=10, pady=10)
# 生成モデル名
ttk.Label(master, text="生成モデル名").grid(row=1, column=0, padx=10, pady=10, sticky="w")
self.generation_model_name = tk.StringVar(value="852wa/SDHK")
ttk.Entry(master, textvariable=self.generation_model_name, width=30).grid(row=1, column=1, padx=10, pady=10)
# vaeモデルパス
ttk.Label(master, text="vaeモデルパス").grid(row=2, column=0, padx=10, pady=10, sticky="w")
self.vae_model_path = tk.StringVar()
ttk.Entry(master, textvariable=self.vae_model_path, width=30).grid(row=2, column=1, padx=10, pady=10)
# LoRAモデルパス
ttk.Label(master, text="LoRAモデルパス").grid(row=3, column=0, padx=10, pady=10, sticky="w")
self.LoRA_model_path = tk.StringVar()
ttk.Entry(master, textvariable=self.LoRA_model_path, width=30).grid(row=3, column=1, padx=10, pady=10)
# LoRAstrength
ttk.Label(master, text="LoRAstrength").grid(row=4, column=0, padx=10, pady=10, sticky="w")
self.LoRAstrength = tk.StringVar(value=1.0)
ttk.Entry(master, textvariable=self.LoRAstrength, width=30).grid(row=4, column=1, padx=10, pady=10)
# プロンプト
ttk.Label(master, text="プロンプト").grid(row=5, column=0, padx=10, pady=10, sticky="w")
self.prompt = tk.StringVar()
ttk.Entry(master, textvariable=self.prompt, width=30).grid(row=5, column=1, padx=10, pady=10)
# strength
ttk.Label(master, text="strength").grid(row=6, column=0, padx=10, pady=10, sticky="w")
self.strength = tk.StringVar(value=0.75)
ttk.Entry(master, textvariable=self.strength, width=30).grid(row=6, column=1, padx=10, pady=10)
# num_inference_steps
ttk.Label(master, text="num_inference_steps").grid(row=7, column=0, padx=10, pady=10, sticky="w")
self.num_inference_steps = tk.StringVar(value=8)
ttk.Entry(master, textvariable=self.num_inference_steps, width=30).grid(row=7, column=1, padx=10, pady=10)
#画面キャプチャ
capture_button = ttk.Button(master, text="キャプチャ開始", command=self.capture_screen)
capture_button.grid(row=8, column=0, columnspan=2, padx=10, pady=10, sticky="ew")
#パラメータ更新
capture_button = ttk.Button(master, text="パラメータ更新", command=self.update_param)
capture_button.grid(row=9, column=0, columnspan=2, padx=10, pady=10, sticky="ew")
def update_param(self):
self.num_inference_steps_value = int(self.num_inference_steps.get())
self.strength_value = float(self.strength.get())
self.LoRAstrength_value = float(self.LoRAstrength.get())
def capture_screen(self):
if self.run_thread is not None:
self.running = False
self.run_thread.join()
|
class ConfigWindow:
def __init__(self):
master = tk.Tk()
self.run_thread = None
self.running = False
self.master = master
master.title("Configuration")
master.geometry("400x500") # ウィンドウサイズを設定
master.attributes("-topmost", True) # ウィンドウサイズを最前列固定
style = ttk.Style()
style.configure("TLabel", font=("Arial", 12))
style.configure("TEntry", padding=5)
style.configure("TButton", padding=5, font=("Arial", 10))
# LCMモデル名
ttk.Label(master, text="LCMモデル名").grid(row=0, column=0, padx=10, pady=10, sticky="w")
self.lcm_model_name = tk.StringVar(value="latent-consistency/lcm-lora-sdv1-5")
ttk.Entry(master, textvariable=self.lcm_model_name, width=30).grid(row=0, column=1, padx=10, pady=10)
# 生成モデル名
ttk.Label(master, text="生成モデル名").grid(row=1, column=0, padx=10, pady=10, sticky="w")
self.generation_model_name = tk.StringVar(value="852wa/SDHK")
ttk.Entry(master, textvariable=self.generation_model_name, width=30).grid(row=1, column=1, padx=10, pady=10)
# vaeモデルパス
ttk.Label(master, text="vaeモデルパス").grid(row=2, column=0, padx=10, pady=10, sticky="w")
self.vae_model_path = tk.StringVar()
ttk.Entry(master, textvariable=self.vae_model_path, width=30).grid(row=2, column=1, padx=10, pady=10)
# LoRAモデルパス
ttk.Label(master, text="LoRAモデルパス").grid(row=3, column=0, padx=10, pady=10, sticky="w")
self.LoRA_model_path = tk.StringVar()
ttk.Entry(master, textvariable=self.LoRA_model_path, width=30).grid(row=3, column=1, padx=10, pady=10)
# LoRAstrength
ttk.Label(master, text="LoRAstrength").grid(row=4, column=0, padx=10, pady=10, sticky="w")
self.LoRAstrength = tk.StringVar(value=1.0)
ttk.Entry(master, textvariable=self.LoRAstrength, width=30).grid(row=4, column=1, padx=10, pady=10)
# プロンプト
ttk.Label(master, text="プロンプト").grid(row=5, column=0, padx=10, pady=10, sticky="w")
self.prompt = tk.StringVar()
ttk.Entry(master, textvariable=self.prompt, width=30).grid(row=5, column=1, padx=10, pady=10)
# strength
ttk.Label(master, text="strength").grid(row=6, column=0, padx=10, pady=10, sticky="w")
self.strength = tk.StringVar(value=0.75)
ttk.Entry(master, textvariable=self.strength, width=30).grid(row=6, column=1, padx=10, pady=10)
# num_inference_steps
ttk.Label(master, text="num_inference_steps").grid(row=7, column=0, padx=10, pady=10, sticky="w")
self.num_inference_steps = tk.StringVar(value=8)
ttk.Entry(master, textvariable=self.num_inference_steps, width=30).grid(row=7, column=1, padx=10, pady=10)
#画面キャプチャ
capture_button = ttk.Button(master, text="キャプチャ開始", command=self.capture_screen)
capture_button.grid(row=8, column=0, columnspan=2, padx=10, pady=10, sticky="ew")
#パラメータ更新
capture_button = ttk.Button(master, text="パラメータ更新", command=self.update_param)
capture_button.grid(row=9, column=0, columnspan=2, padx=10, pady=10, sticky="ew")
def update_param(self):
self.num_inference_steps_value = int(self.num_inference_steps.get())
self.strength_value = float(self.strength.get())
self.LoRAstrength_value = float(self.LoRAstrength.get())
def capture_screen(self):
if self.run_thread is not None:
self.running = False
self.run_thread.join()
| self.screen_capture = ScreenCapture() | 2 | 2023-11-17 08:10:27+00:00 | 4k |
jeromeleong/mirrors-zhile-io-pandora | src/pandora/turbo/chat.py | [
{
"identifier": "Conversations",
"path": "src/pandora/turbo/base.py",
"snippet": "class Conversations:\n def __init__(self):\n self.__data = []\n\n def list(self, offset, limit):\n return len(self.__data), self.__data[offset: limit]\n\n def clear(self):\n self.__data = []\n\n def delete(self, conversation):\n self.__data = [x for x in self.__data if conversation.conversation_id != x.conversation_id]\n\n def new(self):\n conversation = Conversation()\n self.__data.insert(0, conversation)\n\n return conversation\n\n def get(self, conversation_id):\n for x in self.__data:\n if x.conversation_id == conversation_id:\n return x\n\n return None\n\n def guard_get(self, conversation_id):\n conversation = self.get(conversation_id)\n if not conversation:\n raise Exception('Can\\'t load conversation {}'.format(conversation_id))\n\n return conversation"
},
{
"identifier": "UserPrompt",
"path": "src/pandora/turbo/base.py",
"snippet": "class UserPrompt(Prompt):\n def __init__(self, prompt_id, content, parent):\n super().__init__(prompt_id=prompt_id, role='user', content=content, parent=parent)\n\n def get_message(self, end=True):\n return {\n 'id': self.prompt_id,\n 'author': {\n 'role': self.role,\n 'name': None,\n 'metadata': {}\n },\n 'create_time': self.create_time,\n 'update_time': None,\n 'content': {\n 'content_type': 'text',\n 'parts': [self.content]\n },\n 'end_turn': None,\n 'weight': 1.0,\n 'metadata': {\n 'timestamp_': 'absolute',\n 'message_type': None\n },\n 'recipient': 'all'\n }"
},
{
"identifier": "Prompt",
"path": "src/pandora/turbo/base.py",
"snippet": "class Prompt:\n def __init__(self, prompt_id=None, role=None, content=None, parent=None):\n self.prompt_id = prompt_id or str(uuid.uuid4())\n self.parent_id = None\n self.role = role\n self.content = content\n self.children = []\n self.create_time = dt.now().timestamp()\n\n if parent:\n self.parent_id = parent.prompt_id\n parent.add_child(self.prompt_id)\n\n def add_child(self, prompt_id):\n self.children.append(prompt_id)\n\n def get_message(self, end=True):\n return None\n\n def get_info(self):\n return {\n 'id': self.prompt_id,\n 'message': self.get_message(),\n 'parent': self.parent_id,\n 'children': self.children\n }"
},
{
"identifier": "SystemPrompt",
"path": "src/pandora/turbo/base.py",
"snippet": "class SystemPrompt(Prompt):\n def __init__(self, content, parent):\n super().__init__(role='system', content=content, parent=parent)\n\n def get_message(self, end=True):\n return {\n 'id': self.prompt_id,\n 'author': {\n 'role': self.role,\n 'name': None,\n 'metadata': {}\n },\n 'create_time': self.create_time,\n 'update_time': None,\n 'content': {\n 'content_type': 'text',\n 'parts': ['']\n },\n 'end_turn': True,\n 'weight': 1.0,\n 'metadata': {},\n 'recipient': 'all'\n }"
},
{
"identifier": "ChatCompletion",
"path": "src/pandora/openai/api.py",
"snippet": "class ChatCompletion(API):\n def __init__(self, proxy=None):\n self.session = requests.Session()\n self.req_kwargs = {\n 'proxies': {\n 'http': proxy,\n 'https': proxy,\n } if proxy else None,\n 'verify': where(),\n 'timeout': 600,\n 'allow_redirects': False,\n }\n\n self.user_agent = 'pandora/{}'.format(__version__)\n\n super().__init__(proxy, self.req_kwargs['verify'])\n\n def __get_headers(self, api_key):\n return {\n 'Authorization': 'Bearer ' + api_key,\n 'User-Agent': self.user_agent,\n 'Content-Type': 'application/json',\n }\n\n def request(self, api_key, model, messages, stream=True, **kwargs):\n data = {\n 'model': model,\n 'messages': messages,\n **kwargs,\n 'stream': stream,\n }\n\n return self.__request_conversation(api_key, data, stream)\n\n def __request_conversation(self, api_key, data, stream):\n default = default_api_prefix()\n\n if api_key.startswith('fk-') or api_key.startswith('pk-'):\n prefix = default\n else:\n prefix = getenv('OPENAI_API_PREFIX', default)\n url = '{}/v1/chat/completions'.format(prefix)\n\n if stream:\n headers = {**self.__get_headers(api_key), 'Accept': 'text/event-stream'}\n return self._request_sse(url, headers, data)\n\n resp = self.session.post(url=url, headers=self.__get_headers(api_key), json=data, **self.req_kwargs)\n\n def __generate_wrap():\n yield resp.json()\n\n return resp.status_code, resp.headers, __generate_wrap()"
},
{
"identifier": "gpt_num_tokens",
"path": "src/pandora/openai/token.py",
"snippet": "def gpt_num_tokens(messages, model='gpt-3.5-turbo'):\n encoding = tiktoken.encoding_for_model(model)\n\n num_tokens = 0\n for message in messages:\n num_tokens += 4\n for key, value in message.items():\n num_tokens += len(encoding.encode(value))\n if 'name' == key:\n num_tokens -= 1\n num_tokens += 2\n\n return num_tokens"
}
] | import json
from datetime import datetime as dt
from os import getenv
from requests import Response
from .base import Conversations, UserPrompt, Prompt, SystemPrompt
from ..openai.api import ChatCompletion
from ..openai.token import gpt_num_tokens | 3,159 | return resp.json()
def clear_conversations(self, raw=False, token=None):
def __shadow():
self.__get_conversations(token).clear()
result = {
'success': True
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
return resp.json()['success']
def del_conversation(self, conversation_id, raw=False, token=None):
def __shadow():
conversations = self.__get_conversations(token)
try:
conversation = conversations.guard_get(conversation_id)
except Exception as e:
return self.__out_error(str(e), 404)
conversations.delete(conversation)
result = {
'success': True
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('delete conversation failed: ' + resp.json()['detail'])
return resp.json()['success']
def gen_conversation_title(self, conversation_id, model, message_id, raw=False, token=None):
def __shadow():
conversation = self.__get_conversations(token).get(conversation_id)
if not conversation:
return self.__out_error('Conversation not found', 404)
if 'New chat' != conversation.title:
message = {
'message': 'Conversation {} already has title \'{}\''.format(conversation_id, conversation.title)
}
return self.__wrap_response(message)
messages = conversation.get_messages_directly(message_id)
messages.append({'role': 'user', 'content': self.TITLE_PROMPT})
status, header, generator = self.api.request(self.get_access_token(token), model, messages, False)
last_ok, last = self.__get_completion(status, next(generator))
if not last_ok:
return self.__out_error(last['detail'], status)
conversation.set_title(last.strip('"'))
result = {
'title': conversation.title
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('generate title failed: ' + resp.text)
return resp.json()['title']
def set_conversation_title(self, conversation_id, title, raw=False, token=None):
def __shadow():
try:
conversation = self.__get_conversations(token).guard_get(conversation_id)
except Exception as e:
return self.__out_error(str(e), 404)
conversation.set_title(title)
result = {
'success': True
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('update conversation failed: ' + resp.json()['detail'])
return resp.json()['success']
def talk(self, content, model, message_id, parent_message_id, conversation_id=None, stream=True, token=None):
system_prompt = None
if conversation_id:
conversation = self.__get_conversations(token).get(conversation_id)
if not conversation:
return self.__out_error_stream('Conversation not found', 404)
parent = conversation.get_prompt(parent_message_id)
else:
conversation = self.__get_conversations(token).new()
| # -*- coding: utf-8 -*-
class TurboGPT:
DEFAULT_SYSTEM_PROMPT = 'You are ChatGPT, a large language model trained by OpenAI. ' \
'Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\n' \
'Current date: {}'.format(dt.now().strftime('%Y-%m-%d'))
TITLE_PROMPT = 'Generate a brief title for our conversation.'
MAX_TOKENS = {
'gpt-3.5-turbo': 4096,
'gpt-4': 8192,
'gpt-4-32k': 32768,
}
FAKE_TOKENS = {
'gpt-3.5-turbo': 8191,
'gpt-4': 4095,
'gpt-4-32k': 8195,
}
def __init__(self, api_keys: dict, proxy=None):
self.api_keys = api_keys
self.api_keys_key_list = list(api_keys)
self.default_api_keys_key = self.api_keys_key_list[0]
self.api = ChatCompletion(proxy)
self.conversations_map = {}
self.system_prompt = getenv('API_SYSTEM_PROMPT', self.DEFAULT_SYSTEM_PROMPT)
def __get_conversations(self, api_keys_key=None):
if api_keys_key is None:
api_keys_key = self.default_api_keys_key
if api_keys_key not in self.conversations_map:
self.conversations_map[api_keys_key] = Conversations()
return self.conversations_map[api_keys_key]
def __is_fake_api(self, token=None):
api_key = self.get_access_token(token)
return api_key.startswith('fk-') or api_key.startswith('pk-')
def get_access_token(self, token_key=None):
return self.api_keys[token_key or self.default_api_keys_key]
def list_token_keys(self):
return self.api_keys_key_list
def list_models(self, raw=False, token=None):
fake_api = self.__is_fake_api(token)
models = {
'models': [
{
'slug': 'gpt-3.5-turbo',
'max_tokens': self.FAKE_TOKENS['gpt-3.5-turbo'] if fake_api else self.MAX_TOKENS['gpt-3.5-turbo'],
'title': 'GPT-3.5',
'description': 'Turbo is the api model that powers ChatGPT',
'tags': []
},
{
'slug': 'gpt-4',
'max_tokens': self.FAKE_TOKENS['gpt-4'] if fake_api else self.MAX_TOKENS['gpt-4'],
'title': 'GPT-4',
'description': 'More capable than any GPT-3.5, able to do complex tasks, and optimized for chat',
'tags': []
},
{
'slug': 'gpt-4-32k',
'max_tokens': self.FAKE_TOKENS['gpt-4-32k'] if fake_api else self.MAX_TOKENS['gpt-4-32k'],
'title': 'GPT-4 32K',
'description': 'Same capabilities as the base gpt-4 mode but with 4x the context length',
'tags': []
}
]
}
if raw:
return self.__wrap_response(models)
return models['models']
def list_conversations(self, offset, limit, raw=False, token=None):
offset = int(offset)
limit = int(limit)
total, items = self.__get_conversations(token).list(offset, limit)
stripped = []
for item in items:
stripped.append({
'id': item.conversation_id,
'title': item.title,
'create_time': dt.utcfromtimestamp(item.create_time).isoformat(),
})
result = {'items': stripped, 'total': total, 'limit': limit, 'offset': offset}
if raw:
return self.__wrap_response(result)
return result
def get_conversation(self, conversation_id, raw=False, token=None):
def __shadow():
try:
conversation = self.__get_conversations(token).guard_get(conversation_id)
except Exception as e:
return self.__out_error(str(e), 404)
return self.__wrap_response(conversation.get_info())
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('get conversation failed: ' + resp.json()['detail'])
return resp.json()
def clear_conversations(self, raw=False, token=None):
def __shadow():
self.__get_conversations(token).clear()
result = {
'success': True
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
return resp.json()['success']
def del_conversation(self, conversation_id, raw=False, token=None):
def __shadow():
conversations = self.__get_conversations(token)
try:
conversation = conversations.guard_get(conversation_id)
except Exception as e:
return self.__out_error(str(e), 404)
conversations.delete(conversation)
result = {
'success': True
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('delete conversation failed: ' + resp.json()['detail'])
return resp.json()['success']
def gen_conversation_title(self, conversation_id, model, message_id, raw=False, token=None):
def __shadow():
conversation = self.__get_conversations(token).get(conversation_id)
if not conversation:
return self.__out_error('Conversation not found', 404)
if 'New chat' != conversation.title:
message = {
'message': 'Conversation {} already has title \'{}\''.format(conversation_id, conversation.title)
}
return self.__wrap_response(message)
messages = conversation.get_messages_directly(message_id)
messages.append({'role': 'user', 'content': self.TITLE_PROMPT})
status, header, generator = self.api.request(self.get_access_token(token), model, messages, False)
last_ok, last = self.__get_completion(status, next(generator))
if not last_ok:
return self.__out_error(last['detail'], status)
conversation.set_title(last.strip('"'))
result = {
'title': conversation.title
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('generate title failed: ' + resp.text)
return resp.json()['title']
def set_conversation_title(self, conversation_id, title, raw=False, token=None):
def __shadow():
try:
conversation = self.__get_conversations(token).guard_get(conversation_id)
except Exception as e:
return self.__out_error(str(e), 404)
conversation.set_title(title)
result = {
'success': True
}
return self.__wrap_response(result)
resp = __shadow()
if raw:
return resp
if resp.status_code != 200:
raise Exception('update conversation failed: ' + resp.json()['detail'])
return resp.json()['success']
def talk(self, content, model, message_id, parent_message_id, conversation_id=None, stream=True, token=None):
system_prompt = None
if conversation_id:
conversation = self.__get_conversations(token).get(conversation_id)
if not conversation:
return self.__out_error_stream('Conversation not found', 404)
parent = conversation.get_prompt(parent_message_id)
else:
conversation = self.__get_conversations(token).new() | parent = conversation.add_prompt(Prompt(parent_message_id)) | 2 | 2023-11-12 10:31:05+00:00 | 4k |
leeyuentuen/polestar_api | custom_components/polestar_api/polestar.py | [
{
"identifier": "PolestarApiException",
"path": "custom_components/polestar_api/pypolestar/exception.py",
"snippet": "class PolestarApiException(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\""
},
{
"identifier": "PolestarAuthException",
"path": "custom_components/polestar_api/pypolestar/exception.py",
"snippet": "class PolestarAuthException(Exception):\n \"\"\"Base class for exceptions in Auth module.\"\"\"\n\n error_code: int = None\n message: str = None\n\n def __init__(self, message, error_code) -> None:\n \"\"\"Initialize the Polestar API.\"\"\"\n super().__init__(message)\n self.error_code = error_code"
},
{
"identifier": "PolestarApi",
"path": "custom_components/polestar_api/pypolestar/polestar.py",
"snippet": "class PolestarApi:\n \"\"\"Main class for handling connections with the Polestar API.\"\"\"\n\n def __init__(self, username: str, password: str) -> None:\n \"\"\"Initialize the Polestar API.\"\"\"\n self.auth = PolestarAuth(username, password)\n self.updating = False\n self.cache_data = {}\n self.latest_call_code = None\n self._client_session = httpx.AsyncClient()\n self.next_update = None\n\n\n async def init(self):\n \"\"\"Initialize the Polestar API.\"\"\"\n try:\n await self.auth.get_token()\n\n if self.auth.access_token is None:\n return\n\n await self._get_vehicle_data()\n\n except PolestarAuthException as e:\n _LOGGER.exception(\"Auth Exception: %s\", str(e))\n\n def get_latest_data(self, query: str, field_name: str) -> dict or bool or None:\n \"\"\"Get the latest data from the Polestar API.\"\"\"\n if self.cache_data and self.cache_data[query]:\n data = self.cache_data[query]['data']\n if data is None:\n return False\n return self._get_field_name_value(field_name, data)\n\n def _get_field_name_value(self, field_name: str, data: dict) -> str or bool or None:\n if field_name is None or data is None:\n return None\n\n if '/' in field_name:\n field_names = field_name.split('/')\n for key in field_names:\n if isinstance(data, dict) and key in data:\n data = data[key]\n else:\n return None\n return data\n\n if isinstance(data, dict) and field_name in data:\n return data[field_name]\n\n return None\n\n async def _get_odometer_data(self, vin: str):\n \"\"\"\" Get the latest odometer data from the Polestar API.\"\"\"\n params = {\n \"query\": \"query GetOdometerData($vin: String!) { getOdometerData(vin: $vin) { averageSpeedKmPerHour eventUpdatedTimestamp { iso unix __typename } odometerMeters tripMeterAutomaticKm tripMeterManualKm __typename }}\",\n \"operationName\": \"GetOdometerData\",\n \"variables\": \"{\\\"vin\\\":\\\"\" + vin + \"\\\"}\"\n }\n result = await self.get_graph_ql(params)\n\n if result and result['data']:\n # put result in cache\n self.cache_data[ODO_METER_DATA] = {\n 'data': result['data'][ODO_METER_DATA], 'timestamp': datetime.now()}\n\n async def _get_battery_data(self, vin: str):\n params = {\n \"query\": \"query GetBatteryData($vin: String!) { getBatteryData(vin: $vin) { averageEnergyConsumptionKwhPer100Km batteryChargeLevelPercentage chargerConnectionStatus chargingCurrentAmps chargingPowerWatts chargingStatus estimatedChargingTimeMinutesToTargetDistance estimatedChargingTimeToFullMinutes estimatedDistanceToEmptyKm estimatedDistanceToEmptyMiles eventUpdatedTimestamp { iso unix __typename } __typename }}\",\n \"operationName\": \"GetBatteryData\",\n \"variables\": \"{\\\"vin\\\":\\\"\" + vin + \"\\\"}\"\n }\n\n result = await self.get_graph_ql(params)\n\n if result and result['data']:\n # put result in cache\n self.cache_data[BATTERY_DATA] = {\n 'data': result['data'][BATTERY_DATA], 'timestamp': datetime.now()}\n\n async def _get_vehicle_data(self):\n \"\"\"\" Get the latest vehicle data from the Polestar API.\"\"\"\n # get Vehicle Data\n params = {\n \"query\": \"query getCars { getConsumerCarsV2 { vin internalVehicleIdentifier modelYear content { model { code name __typename } images { studio { url angles __typename } __typename } __typename } hasPerformancePackage registrationNo deliveryDate currentPlannedDeliveryDate __typename }}\",\n \"operationName\": \"getCars\",\n \"variables\": \"{}\"\n }\n\n result = await self.get_graph_ql(params)\n if result and result['data']:\n # check if there are cars in the account\n if result['data'][CAR_INFO_DATA] is None or len(result['data'][CAR_INFO_DATA]) == 0:\n _LOGGER.exception(\"No cars found in account\")\n # throw new exception\n raise PolestarNoDataException(\"No cars found in account\")\n\n self.cache_data[CAR_INFO_DATA] = {\n 'data': result['data'][CAR_INFO_DATA][0], 'timestamp': datetime.now()}\n\n async def get_ev_data(self, vin: str):\n \"\"\"Get the latest ev data from the Polestar API.\"\"\"\n if self.updating:\n return\n\n if self.next_update is not None and self.next_update > datetime.now():\n _LOGGER.debug(\"Skipping update, next update at %s\", self.next_update)\n return\n\n self.updating = True\n\n try:\n if self.auth.token_expiry is None:\n raise PolestarAuthException(\"No token expiry found\", 500)\n if (self.auth.token_expiry - datetime.now()).total_seconds() < 300:\n await self.auth.get_token(refresh=True)\n except PolestarAuthException as e:\n self.latest_call_code = 500\n _LOGGER.warning(\"Auth Exception: %s\", str(e))\n self.updating = False\n return\n\n async def call_api(func):\n try:\n await func()\n except PolestarNotAuthorizedException:\n await self.auth.get_token()\n except PolestarApiException as e:\n self.latest_call_code = 500\n _LOGGER.warning('Failed to get %s data %s',\n func.__name__, str(e))\n\n await call_api(lambda: self._get_odometer_data(vin))\n await call_api(lambda: self._get_battery_data(vin))\n\n self.updating = False\n self.next_update = datetime.now() + timedelta(seconds=5)\n\n def get_cache_data(self, query: str, field_name: str, skip_cache: bool = False):\n \"\"\"\" Get the latest data from the cache.\"\"\"\n if query is None:\n return None\n\n if self.cache_data and self.cache_data.get(query):\n cache_entry = self.cache_data[query]\n data = cache_entry['data']\n if data is not None:\n if skip_cache is True or cache_entry['timestamp'] + timedelta(seconds=CACHE_TIME) > datetime.now():\n return self._get_field_name_value(field_name, data)\n return None\n\n async def get_graph_ql(self, params: dict):\n \"\"\"Get the latest data from the Polestar API.\"\"\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"authorization\": f\"Bearer {self.auth.access_token}\"\n }\n\n url = \"https://pc-api.polestar.com/eu-north-1/my-star/\"\n result = await self._client_session.get(url, params=params, headers=headers)\n self.latest_call_code = result.status_code\n\n if result.status_code == 401:\n raise PolestarNotAuthorizedException(\"Unauthorized Exception\")\n\n if result.status_code != 200:\n raise PolestarApiException(f\"Get GraphQL error: {result.text}\")\n\n resultData = result.json()\n if resultData.get('errors'):\n self.latest_call_code = 500\n error_message = resultData['errors'][0]['message']\n if error_message == \"User not authenticated\":\n raise PolestarNotAuthorizedException(\"Unauthorized Exception\")\n _LOGGER.error(resultData.get('errors'))\n raise PolestarApiException(error_message)\n\n _LOGGER.debug(resultData)\n return resultData"
}
] | from datetime import datetime, timedelta
from urllib3 import disable_warnings
from homeassistant.core import HomeAssistant
from homeassistant.util.unit_system import METRIC_SYSTEM, UnitSystem
from .pypolestar.exception import PolestarApiException, PolestarAuthException
from .pypolestar.polestar import PolestarApi
import logging
import httpx | 2,185 | """Polestar API for Polestar integration."""
POST_HEADER_JSON = {"Content-Type": "application/json"}
_LOGGER = logging.getLogger(__name__)
class Polestar:
"""Polestar EV integration."""
def __init__(self,
hass: HomeAssistant,
username: str,
password: str
) -> None:
self.id = None
self.name = "Polestar "
| """Polestar API for Polestar integration."""
POST_HEADER_JSON = {"Content-Type": "application/json"}
_LOGGER = logging.getLogger(__name__)
class Polestar:
"""Polestar EV integration."""
def __init__(self,
hass: HomeAssistant,
username: str,
password: str
) -> None:
self.id = None
self.name = "Polestar " | self.polestarApi = PolestarApi(username, password) | 2 | 2023-11-17 21:24:36+00:00 | 4k |
dubverse-ai/MahaTTS | tts.py | [
{
"identifier": "config",
"path": "maha_tts/config.py",
"snippet": "class config:\n \n semantic_model_centroids = 10000 + 1\n seed_value = 3407\n\n # Text to Semantic\n t2s_position = 4096\n langs = ['english','tamil', 'telugu', 'punjabi', 'marathi', 'hindi', 'gujarati', 'bengali', 'assamese']\n lang_index = {i:j for j,i in enumerate(langs)}\n # Semantic to acoustic\n sa_timesteps_max = 1000\n\n #Acoustic Properties\n CLIP_LENGTH = 500\n MAX_WAV_VALUE=32768.0\n filter_length=1024\n hop_length=256 #256\n window = 'hann'\n win_length=1024\n n_mel_channels=80\n sampling_rate=22050\n mel_fmin=0.0\n mel_fmax=8000.0"
},
{
"identifier": "load_models",
"path": "maha_tts/inference.py",
"snippet": "def load_models(name,device=torch.device('cpu')):\n '''\n Load pre-trained models for different components of a text-to-speech system.\n\n Args:\n device (str): The target device for model loading (e.g., 'cpu' or 'cuda').\n checkpoint_diff (str): File path to the pre-trained model checkpoint for the diffusion model.\n checkpoint_ts (str): File path to the pre-trained model checkpoint for the text-to-semantic model.\n checkpoint_voco (str): File path to the pre-trained model checkpoint for the vocoder model.\n voco_config_path (str): File path to the configuration file for the vocoder model.\n\n Returns:\n diff_model (object): Loaded diffusion model for semantic-to-acoustic tokens.\n ts_model (object): Loaded text-to-semantic model for converting text-to-semantic tokens.\n vocoder (object): Loaded vocoder model for generating waveform from acoustic tokens.\n diffuser (object): Configured diffuser object for use in the diffusion model.\n '''\n\n assert name in model_dirs, \"no model name \"+name\n\n checkpoint_diff = os.path.join(DEFAULT_MODELS_DIR,name,'s2a_latest.pt')\n checkpoint_ts = os.path.join(DEFAULT_MODELS_DIR,name,'t2s_best.pt')\n checkpoint_voco = os.path.join(DEFAULT_MODELS_DIR,'hifigan','g_02500000')\n voco_config_path = os.path.join(DEFAULT_MODELS_DIR,'hifigan','config.json')\n \n # for i in [checkpoint_diff,checkpoint_ts,checkpoint_voco,voco_config_path]:\n if not os.path.exists(checkpoint_diff) or not os.path.exists(checkpoint_ts):\n download_model(name)\n \n if not os.path.exists(checkpoint_voco) or not os.path.exists(voco_config_path):\n download_model('hifigan')\n\n diff_model = load_diff_model(checkpoint_diff,device)\n ts_model = load_TS_model(checkpoint_ts,device,name)\n vocoder = load_vocoder_model(voco_config_path,checkpoint_voco,device)\n diffuser = load_diffuser()\n\n return diff_model,ts_model,vocoder,diffuser"
},
{
"identifier": "load_diffuser",
"path": "maha_tts/inference.py",
"snippet": "def load_diffuser(timesteps = 100, guidance=3):\n '''\n Load and configure a diffuser for denoising and guidance in the diffusion model.\n\n Args:\n timesteps (int): Number of denoising steps out of 1000. Default is 100.\n guidance (int): Conditioning-free guidance parameter. Default is 3.\n\n Returns:\n diffuser (object): Configured diffuser object for use in the diffusion model.\n\n Description:\n The `load_diffuser` function initializes a diffuser with specific settings for denoising and guidance.\n '''\n betas = get_named_beta_schedule('linear',config.sa_timesteps_max)\n diffuser = SpacedDiffusion(use_timesteps=space_timesteps(1000, [timesteps]), model_mean_type='epsilon',\n model_var_type='learned_range', loss_type='rescaled_mse', betas=betas,\n conditioning_free=True, conditioning_free_k=guidance)\n diffuser.training=False\n return diffuser"
},
{
"identifier": "infer_tts",
"path": "maha_tts/inference.py",
"snippet": "def infer_tts(text,ref_clips,diffuser,diff_model,ts_model,vocoder,language=None):\n '''\n Generate audio from the given text using a text-to-speech (TTS) pipeline.\n\n Args:\n text (str): The input text to be synthesized into speech.\n ref_clips (list): A list of paths to reference audio clips, preferably more than 3 clips.\n diffuser (object): A diffusion object used for denoising and guidance in the diffusion model. It should be obtained using load_diffuser.\n diff_model: diffusion model for semantic-to-acoustic tokens.\n ts_model: text-to-semantic model for converting text-to-semantic tokens.\n vocoder: vocoder model for generating waveform from acoustic tokens.\n\n Returns:\n audio (numpy.ndarray): Generated audio waveform.\n sampling_rate (int): Sampling rate of the generated audio.\n\n Description:\n The `infer_tts` function takes input text and reference audio clips, and processes them through a TTS pipeline.\n It first performs text preprocessing and generates semantic tokens using the specified text synthesis model.\n Then, it infers mel-spectrogram features using the diffusion model and the provided diffuser.\n Finally, it generates audio from the mel-spectrogram using the vocoder.\n\n Note: The function requires properly configured diff_model, ts_model, and vocoder objects for successful TTS.\n\n Example usage:\n audio, sampling_rate = infer_tts(\"Hello, how are you?\", ref_clips, diffuser, diff_model, ts_model, vocoder)\n '''\n device = next(ts_model.parameters()).device\n text = english_cleaners(text)\n ref_mels = get_ref_mels(ref_clips)\n with torch.no_grad():\n sem_tok,_ = generate_semantic_tokens(\n text,\n ts_model,\n ref_mels,\n language,\n temp = 0.7,\n top_p= 0.8,\n top_k= 5,\n n_tot_steps = 1000,\n device = device\n )\n mel = infer_mel(diff_model,int(((sem_tok.shape[-1] * 320 / 16000) * 22050/256)+1),sem_tok.unsqueeze(0) + 1,\n normalize_tacotron_mel(ref_mels),diffuser,temperature=0.5)\n\n audio = infer_wav(mel,vocoder)\n \n return audio,config.sampling_rate"
}
] | import torch,glob
from maha_tts import load_diffuser,load_models,infer_tts,config
from scipy.io.wavfile import write | 1,667 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using:',device)
text = 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition.'
langauge = 'english'
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using:',device)
text = 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition.'
langauge = 'english' | language = torch.tensor(config.lang_index[langauge]).to(device).unsqueeze(0) | 0 | 2023-11-16 09:44:54+00:00 | 4k |
wjun0830/CGDETR | cg_detr/start_end_dataset.py | [
{
"identifier": "load_jsonl",
"path": "utils/basic_utils.py",
"snippet": "def load_jsonl(filename):\n with open(filename, \"r\") as f:\n return [json.loads(l.strip(\"\\n\")) for l in f.readlines()]"
},
{
"identifier": "l2_normalize_np_array",
"path": "utils/basic_utils.py",
"snippet": "def l2_normalize_np_array(np_array, eps=1e-5):\n \"\"\"np_array: np.ndarray, (*, D), where the last dim will be normalized\"\"\"\n return np_array / (np.linalg.norm(np_array, axis=-1, keepdims=True) + eps)"
},
{
"identifier": "pad_sequences_1d",
"path": "utils/tensor_utils.py",
"snippet": "def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device(\"cpu\"), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if \"torch\" in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n\n extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert \"torch\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)\n else: # np\n assert \"numpy\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask # , lengths"
},
{
"identifier": "span_xx_to_cxw",
"path": "cg_detr/span_utils.py",
"snippet": "def span_xx_to_cxw(xx_spans):\n \"\"\"\n Args:\n xx_spans: tensor, (#windows, 2) or (..., 2), each row is a window of format (st, ed)\n\n Returns:\n cxw_spans: tensor, (#windows, 2), each row is a window of format (center=(st+ed)/2, width=(ed-st))\n >>> spans = torch.Tensor([[0, 1], [0.2, 0.4]])\n >>> span_xx_to_cxw(spans)\n tensor([[0.5000, 1.0000],\n [0.3000, 0.2000]])\n >>> spans = torch.Tensor([[[0, 1], [0.2, 0.4]]])\n >>> span_xx_to_cxw(spans)\n tensor([[[0.5000, 1.0000],\n [0.3000, 0.2000]]])\n \"\"\"\n center = xx_spans.sum(-1) * 0.5\n width = xx_spans[..., 1] - xx_spans[..., 0]\n return torch.stack([center, width], dim=-1)"
}
] | import torch
import numpy as np
import random
import logging
import torch.nn as nn
from torch.utils.data import Dataset
from tqdm import tqdm
from os.path import join, exists
from utils.basic_utils import load_jsonl, l2_normalize_np_array
from utils.tensor_utils import pad_sequences_1d
from cg_detr.span_utils import span_xx_to_cxw
from torchtext import vocab | 2,769 | 'train': ['kLxoNp-UchI', 'NyBmCxDoHJU', 'jcoYJXDG9sw', '-esJrBWj2d8'],
'val': ['E11zDS9XGzg']
},
'FM': {
'train': ['_xMr-HKMfVA', 'byxOvuiIJV0', 'VuWGsYPqAX8', 'xmEERLqJ2kU'],
'val': ['JKpqYvAdIsw']
},
'GA': {
'train': ['xxdtq8mxegs', 'i3wAGJaaktw', '0tmA_C6XwfM', '3eYKfiOEJNs'],
'val': ['Bhxk-O1Y7Ho']
},
'MS': {
'train': ['Hl-__g2gn_A', 'WG0MBPpPC6I', 'LRw_obCPUt0', '37rzWOQsNIw'],
'val': ['Yi4Ij2NM7U4']
},
'PK': {
'train': ['GsAD1KT1xo8', 'XkqCExn6_Us', 'b626MiF1ew4', 'PJrm840pAUI'],
'val': ['cjibtmSLxQ4']
},
'PR': {
'train': ['RBCABdttQmI', 'z_6gVvQb2d0', '4wU_LUjG5Ic', '91IHQYk1IQM'],
'val': ['fWutDQy1nnY']
},
'VT': {
'train': ['gzDbaEs1Rlg', 'XzYM3PfTM4w', '98MoyGZKHXc', 'AwmHb44_ouw'],
'val': ['J0nA4VgnoCo']
},
'VU': {
'train': ['akI8YFjEmUw', 'HT5vyqe0Xaw', 'vdmoEJ5YbrQ', 'xwqBXPGE9pQ'],
'val': ['sTEELN-vY30']
}
}
class StartEndDataset(Dataset):
Q_FEAT_TYPES = ["pooler_output", "last_hidden_state"]
"""One line in data loaded from data_path."
{
"qid": 7803,
"query": "Man in gray top walks from outside to inside.",
"duration": 150,
"vid": "RoripwjYFp8_360.0_510.0",
"relevant_clip_ids": [13, 14, 15, 16, 17],
"relevant_windows": [[26, 36]]
}
"""
def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,
q_feat_type="last_hidden_state",
max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode="video",
normalize_v=True, normalize_t=True, load_labels=True,
clip_len=2, max_windows=5, span_loss_type="l1", txt_drop_ratio=0,
dset_domain=None):
self.dset_name = dset_name
self.data_path = data_path
self.data_ratio = data_ratio
self.v_feat_dirs = v_feat_dirs \
if isinstance(v_feat_dirs, list) else [v_feat_dirs]
self.q_feat_dir = q_feat_dir
self.q_feat_type = q_feat_type
if max_v_l == -1:
max_v_l = 100000000
if max_q_l == -1:
max_q_l = 100
self.max_q_l = max_q_l
self.max_v_l = max_v_l
self.ctx_mode = ctx_mode
self.use_tef = "tef" in ctx_mode
self.use_video = "video" in ctx_mode
self.normalize_t = normalize_t
self.normalize_v = normalize_v
self.load_labels = load_labels
self.clip_len = clip_len
self.max_windows = max_windows # maximum number of windows to use as labels
self.span_loss_type = span_loss_type
self.txt_drop_ratio = txt_drop_ratio
if "val" in data_path or "test" in data_path:
assert txt_drop_ratio == 0
# checks
assert q_feat_type in self.Q_FEAT_TYPES
# data
self.data = self.load_data()
# load specific domain data for tvsum dataset
if self.dset_name in ['tvsum', 'tvsum_sfc']:
target_domain = dset_domain
assert target_domain in ["BK", "BT", "DS", "FM", "GA", "MS", "PK", "PR", "VT", "VU"]
new_data = []
for d in self.data:
if target_domain == d['domain']:
new_data.append(d)
self.data = new_data
# load specific domain data for youtube-hl dataset
if self.dset_name == 'youtube_uni':
target_domain = dset_domain
assert target_domain in ["dog", "gymnastics", "parkour", "skating", "skiing", "surfing"]
new_data = []
for d in self.data:
if target_domain == d['domain']:
new_data.append(d)
self.data = new_data
self.use_glove = False
self.use_glove = 'vgg' in self.v_feat_dirs[0]
if self.dset_name == 'charadesSTA' and self.use_glove:
self.vocab = vocab.pretrained_aliases['glove.6B.300d']()
self.vocab.itos.extend(['<unk>'])
self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]
self.vocab.vectors = torch.cat(
(self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)
self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)
def load_data(self):
|
logger = logging.getLogger(__name__)
TVSUM_SPLITS = {
'BK': {
'train': ['WxtbjNsCQ8A', 'EE-bNr36nyA', 'oDXZc0tZe04', 'uGu_10sucQo'],
'val': ['Se3oxnaPsz0']
},
'BT': {
'train': ['eQu1rNs0an0', 'qqR6AEXwxoQ', 'EYqVtI9YWJA', 'iVt07TCkFM0'],
'val': ['JgHubY5Vw3Y']
},
'DS': {
'train': ['kLxoNp-UchI', 'NyBmCxDoHJU', 'jcoYJXDG9sw', '-esJrBWj2d8'],
'val': ['E11zDS9XGzg']
},
'FM': {
'train': ['_xMr-HKMfVA', 'byxOvuiIJV0', 'VuWGsYPqAX8', 'xmEERLqJ2kU'],
'val': ['JKpqYvAdIsw']
},
'GA': {
'train': ['xxdtq8mxegs', 'i3wAGJaaktw', '0tmA_C6XwfM', '3eYKfiOEJNs'],
'val': ['Bhxk-O1Y7Ho']
},
'MS': {
'train': ['Hl-__g2gn_A', 'WG0MBPpPC6I', 'LRw_obCPUt0', '37rzWOQsNIw'],
'val': ['Yi4Ij2NM7U4']
},
'PK': {
'train': ['GsAD1KT1xo8', 'XkqCExn6_Us', 'b626MiF1ew4', 'PJrm840pAUI'],
'val': ['cjibtmSLxQ4']
},
'PR': {
'train': ['RBCABdttQmI', 'z_6gVvQb2d0', '4wU_LUjG5Ic', '91IHQYk1IQM'],
'val': ['fWutDQy1nnY']
},
'VT': {
'train': ['gzDbaEs1Rlg', 'XzYM3PfTM4w', '98MoyGZKHXc', 'AwmHb44_ouw'],
'val': ['J0nA4VgnoCo']
},
'VU': {
'train': ['akI8YFjEmUw', 'HT5vyqe0Xaw', 'vdmoEJ5YbrQ', 'xwqBXPGE9pQ'],
'val': ['sTEELN-vY30']
}
}
class StartEndDataset(Dataset):
Q_FEAT_TYPES = ["pooler_output", "last_hidden_state"]
"""One line in data loaded from data_path."
{
"qid": 7803,
"query": "Man in gray top walks from outside to inside.",
"duration": 150,
"vid": "RoripwjYFp8_360.0_510.0",
"relevant_clip_ids": [13, 14, 15, 16, 17],
"relevant_windows": [[26, 36]]
}
"""
def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,
q_feat_type="last_hidden_state",
max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode="video",
normalize_v=True, normalize_t=True, load_labels=True,
clip_len=2, max_windows=5, span_loss_type="l1", txt_drop_ratio=0,
dset_domain=None):
self.dset_name = dset_name
self.data_path = data_path
self.data_ratio = data_ratio
self.v_feat_dirs = v_feat_dirs \
if isinstance(v_feat_dirs, list) else [v_feat_dirs]
self.q_feat_dir = q_feat_dir
self.q_feat_type = q_feat_type
if max_v_l == -1:
max_v_l = 100000000
if max_q_l == -1:
max_q_l = 100
self.max_q_l = max_q_l
self.max_v_l = max_v_l
self.ctx_mode = ctx_mode
self.use_tef = "tef" in ctx_mode
self.use_video = "video" in ctx_mode
self.normalize_t = normalize_t
self.normalize_v = normalize_v
self.load_labels = load_labels
self.clip_len = clip_len
self.max_windows = max_windows # maximum number of windows to use as labels
self.span_loss_type = span_loss_type
self.txt_drop_ratio = txt_drop_ratio
if "val" in data_path or "test" in data_path:
assert txt_drop_ratio == 0
# checks
assert q_feat_type in self.Q_FEAT_TYPES
# data
self.data = self.load_data()
# load specific domain data for tvsum dataset
if self.dset_name in ['tvsum', 'tvsum_sfc']:
target_domain = dset_domain
assert target_domain in ["BK", "BT", "DS", "FM", "GA", "MS", "PK", "PR", "VT", "VU"]
new_data = []
for d in self.data:
if target_domain == d['domain']:
new_data.append(d)
self.data = new_data
# load specific domain data for youtube-hl dataset
if self.dset_name == 'youtube_uni':
target_domain = dset_domain
assert target_domain in ["dog", "gymnastics", "parkour", "skating", "skiing", "surfing"]
new_data = []
for d in self.data:
if target_domain == d['domain']:
new_data.append(d)
self.data = new_data
self.use_glove = False
self.use_glove = 'vgg' in self.v_feat_dirs[0]
if self.dset_name == 'charadesSTA' and self.use_glove:
self.vocab = vocab.pretrained_aliases['glove.6B.300d']()
self.vocab.itos.extend(['<unk>'])
self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]
self.vocab.vectors = torch.cat(
(self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)
self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)
def load_data(self): | datalist = load_jsonl(self.data_path) | 0 | 2023-11-10 12:45:25+00:00 | 4k |
WCGKING/KINGUSERBOT | Branded/plugins/pmguard.py | [
{
"identifier": "approve",
"path": "Branded/modules/data.py",
"snippet": "async def approve(user_ud: int):\n pm = await is_approved()\n pm.append(user_ud)\n await permitdb.update_one(\n {'permit': 'protection'},\n {\n '$set': {\n 'users': pm\n }\n },\n upsert=True\n )"
},
{
"identifier": "disapprove",
"path": "Branded/modules/data.py",
"snippet": "async def disapprove(user_ud: int):\n pm = await is_approved()\n pm.remove(user_ud)\n await permitdb.update_one(\n {'permit': 'protection'},\n {\n '$set': {\n 'users': pm\n }\n },\n upsert=True\n )"
},
{
"identifier": "is_approved",
"path": "Branded/modules/data.py",
"snippet": "async def is_approved() -> list:\n pm = await permitdb.find_one({'permit': 'protection'})\n if not pm:\n return []\n return pm['users']"
}
] | import asyncio
from pyrogram import Client, filters
from pyrogram.enums import ChatType
from pyrogram.types import *
from .. import *
from ..modules.data import approve, disapprove, is_approved | 1,611 |
DEFAULT = """
WELCOME....
ʜɪ, ᴛʜɪꜱ ɪꜱ ᴛʜᴇ ᴋᴇᴇᴘᴇʀ ᴏꜰ ᴘʀɪᴠᴀᴛᴇ ᴍᴇꜱꜱᴀɢᴇꜱ. ᴅᴏɴ'ᴛ ꜱᴘᴀᴍ ʏᴀ ᴏʀ ɪ'ʟʟ ʙʟᴏᴄᴋ ʏᴏᴜ. ᴡᴀɪᴛ ᴜɴᴛɪʟ ᴍʏ ᴍᴀꜱᴛᴇʀ ʀᴇᴄᴇɪᴠᴇꜱ ʏᴏᴜʀ ᴍᴇꜱꜱᴀɢᴇ.ɪ ᴀᴍ ᴀɴ ᴀᴅᴠᴀɴᴄᴇᴅ ᴀɴᴅ sᴜᴘᴇʀғᴀsᴛ ᴜꜱᴇʀʙᴏᴛ ᴡɪᴛʜ 24x7 ᴀᴄᴛɪᴠᴇ » ғᴏʀ ᴛᴇʟᴇɢʀᴀᴍ ɪᴅ
"""
@app.on_message(
(
filters.private
& filters.incoming
& ~filters.service
& ~filters.me
& ~filters.bot
& ~filters.via_bot
)
)
async def pmpermit_func(client: Client, message: Message):
user_ = message.from_user
approved = await is_approved()
pmper = var.PMPERMIT
if pmper == str(False):
return True
if user_.is_bot:
return
if user_.is_self:
return
if user_.is_contact:
return
if user_.is_verified:
return
if user_.is_scam:
await message.reply_text("Imposter Detected!\nAutomatic Blocking!!!")
await client.block_user(user_.id)
return
if user_.is_support:
return
if user_.id in approved:
return
limits = var.PERMIT_LIMIT
async for m in client.get_chat_history(user_.id, limit=limits):
if m.reply_markup:
await m.delete()
if str(user_.id) in flood:
flood[str(user_.id)] += 1
else:
flood[str(user_.id)] = 1
if flood[str(user_.id)] > limits:
await message.reply_text("Spammer Detected!\nAutomatic Blocking User!!!")
if str(user_.id) in OLD_MSG:
OLD_MSG.pop(str(user_.id))
flood.update({user_.id: 0})
return await client.block_user(user_.id)
getmsg = Config.PERMIT_MSG
pm_message = DEFAULT if not getmsg else getmsg
msg_dlt = await client.send_message(
user_.id,
MSG_PERMIT.format(pm_message, flood[str(user_.id)], limits),
)
if str(user_.id) in OLD_MSG:
try:
await OLD_MSG[str(user_.id)].delete()
except BaseException:
pass
OLD_MSG[str(user_.id)] = msg_dlt
@app.on_message(commandx(["approve", "a"]))
async def pm_approve(client: Client, message: Message):
permit = await is_approved()
if message.reply_to_message:
reply = message.reply_to_message
replied_user = reply.from_user
if replied_user.is_self:
await message.edit("You can't do that to yourself.")
return
uid = replied_user.id
if uid in permit:
return await message.reply("This user already exists in the database.")
await approve(uid)
xnxx = await message.reply("Your message was received.")
if str(uid) in OLD_MSG and str(uid) in flood:
await OLD_MSG[str(uid)].delete()
flood[str(uid)] = 0
await asyncio.sleep(3)
await xnxx.delete()
else:
aname = message.chat
if not aname.type == ChatType.PRIVATE:
await message.reply(
"You're not currently in PM and you haven't replied to someone's messages."
)
return
uid = aname.id
if uid in permit:
return await message.reply("This user already exists in the database")
await approve(uid)
xnxx = await message.reply("Your message was received.")
try:
if str(uid) in OLD_MSG and str(uid) in flood:
await OLD_MSG[str(uid)].delete()
flood[str(uid)] = 0
except BaseException:
pass
await asyncio.sleep(3)
await xnxx.delete()
@app.on_message(commandx(["disapprove", "da"]))
async def pm_disapprove(client: Client, message: Message):
permit = await is_approved()
if message.reply_to_message:
reply = message.reply_to_message
replied_user = reply.from_user
if replied_user.is_self:
await message.reply("You can't do that to yourself.")
return
uid = replied_user.id
if uid not in permit:
return await message.reply("User does not exist in database.")
|
MSG_PERMIT = """
PM_SECURITY BRANDED-USERBOT
{}
await message.reply_photo="https://te.legra.ph/file/11cfa74175b590014bd16.jpg"
▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂
⍟ You have {}/{} warning!!!
"""
DEFAULT = """
WELCOME....
ʜɪ, ᴛʜɪꜱ ɪꜱ ᴛʜᴇ ᴋᴇᴇᴘᴇʀ ᴏꜰ ᴘʀɪᴠᴀᴛᴇ ᴍᴇꜱꜱᴀɢᴇꜱ. ᴅᴏɴ'ᴛ ꜱᴘᴀᴍ ʏᴀ ᴏʀ ɪ'ʟʟ ʙʟᴏᴄᴋ ʏᴏᴜ. ᴡᴀɪᴛ ᴜɴᴛɪʟ ᴍʏ ᴍᴀꜱᴛᴇʀ ʀᴇᴄᴇɪᴠᴇꜱ ʏᴏᴜʀ ᴍᴇꜱꜱᴀɢᴇ.ɪ ᴀᴍ ᴀɴ ᴀᴅᴠᴀɴᴄᴇᴅ ᴀɴᴅ sᴜᴘᴇʀғᴀsᴛ ᴜꜱᴇʀʙᴏᴛ ᴡɪᴛʜ 24x7 ᴀᴄᴛɪᴠᴇ » ғᴏʀ ᴛᴇʟᴇɢʀᴀᴍ ɪᴅ
"""
@app.on_message(
(
filters.private
& filters.incoming
& ~filters.service
& ~filters.me
& ~filters.bot
& ~filters.via_bot
)
)
async def pmpermit_func(client: Client, message: Message):
user_ = message.from_user
approved = await is_approved()
pmper = var.PMPERMIT
if pmper == str(False):
return True
if user_.is_bot:
return
if user_.is_self:
return
if user_.is_contact:
return
if user_.is_verified:
return
if user_.is_scam:
await message.reply_text("Imposter Detected!\nAutomatic Blocking!!!")
await client.block_user(user_.id)
return
if user_.is_support:
return
if user_.id in approved:
return
limits = var.PERMIT_LIMIT
async for m in client.get_chat_history(user_.id, limit=limits):
if m.reply_markup:
await m.delete()
if str(user_.id) in flood:
flood[str(user_.id)] += 1
else:
flood[str(user_.id)] = 1
if flood[str(user_.id)] > limits:
await message.reply_text("Spammer Detected!\nAutomatic Blocking User!!!")
if str(user_.id) in OLD_MSG:
OLD_MSG.pop(str(user_.id))
flood.update({user_.id: 0})
return await client.block_user(user_.id)
getmsg = Config.PERMIT_MSG
pm_message = DEFAULT if not getmsg else getmsg
msg_dlt = await client.send_message(
user_.id,
MSG_PERMIT.format(pm_message, flood[str(user_.id)], limits),
)
if str(user_.id) in OLD_MSG:
try:
await OLD_MSG[str(user_.id)].delete()
except BaseException:
pass
OLD_MSG[str(user_.id)] = msg_dlt
@app.on_message(commandx(["approve", "a"]))
async def pm_approve(client: Client, message: Message):
permit = await is_approved()
if message.reply_to_message:
reply = message.reply_to_message
replied_user = reply.from_user
if replied_user.is_self:
await message.edit("You can't do that to yourself.")
return
uid = replied_user.id
if uid in permit:
return await message.reply("This user already exists in the database.")
await approve(uid)
xnxx = await message.reply("Your message was received.")
if str(uid) in OLD_MSG and str(uid) in flood:
await OLD_MSG[str(uid)].delete()
flood[str(uid)] = 0
await asyncio.sleep(3)
await xnxx.delete()
else:
aname = message.chat
if not aname.type == ChatType.PRIVATE:
await message.reply(
"You're not currently in PM and you haven't replied to someone's messages."
)
return
uid = aname.id
if uid in permit:
return await message.reply("This user already exists in the database")
await approve(uid)
xnxx = await message.reply("Your message was received.")
try:
if str(uid) in OLD_MSG and str(uid) in flood:
await OLD_MSG[str(uid)].delete()
flood[str(uid)] = 0
except BaseException:
pass
await asyncio.sleep(3)
await xnxx.delete()
@app.on_message(commandx(["disapprove", "da"]))
async def pm_disapprove(client: Client, message: Message):
permit = await is_approved()
if message.reply_to_message:
reply = message.reply_to_message
replied_user = reply.from_user
if replied_user.is_self:
await message.reply("You can't do that to yourself.")
return
uid = replied_user.id
if uid not in permit:
return await message.reply("User does not exist in database.") | await disapprove(uid) | 1 | 2023-11-14 13:24:26+00:00 | 4k |
kudelskisecurity/fuzzomatic | fuzzomatic/approaches/functions.py | [
{
"identifier": "prompts",
"path": "fuzzomatic/tools/prompts.py",
"snippet": "def load_file_contents(path):\ndef readme_prompt(readme):\ndef fix_prompt(code_snippet, error):\ndef example_prompt(example_code):\ndef unit_test_prompt(test_source_code, use_statements):\ndef unit_test_prompt_with_additional_function(\n test_function_code, additional_function_code, use_statements\n):"
},
{
"identifier": "llm_attempt_fix_error",
"path": "fuzzomatic/approaches/common.py",
"snippet": "def llm_attempt_fix_error(\n codebase_dir, target_name, code_snippet, error, remaining_attempts=2\n):\n # try to fix missing cargo dependencies deterministically\n build_success, error, code_snippet = add_missing_cargo_dependencies(\n codebase_dir, error, code_snippet, target_name\n )\n if build_success:\n fuzz_target_path = write_fuzz_target(code_snippet, codebase_dir, target_name)\n return True, fuzz_target_path\n else:\n print(\"Failed to fix cargo dependencies. Resuming...\")\n\n fix_prompt = prompts.fix_prompt(code_snippet, error)\n print(\"Asking LLM to fix the code...\")\n response = llm.ask_llm(fix_prompt)\n print(\"Response:\")\n print(response)\n code_snippet = llm.extract_fuzz_target(response, codebase_dir)\n print(\"Extracted code snippet\")\n print(\"======\")\n print(code_snippet)\n fix = True\n\n if code_snippet is not None:\n fuzz_target_path = write_fuzz_target(code_snippet, codebase_dir, target_name)\n # try to build the target\n build_success, error, built_code = build_target(codebase_dir, target_name)\n else:\n build_success = False\n error = None\n remaining_attempts = 0\n fix = False\n\n if build_success:\n return build_success, fuzz_target_path\n elif remaining_attempts > 0:\n if fix:\n print(\"Failed to fix the code. Retrying...\")\n return llm_attempt_fix_error(\n codebase_dir,\n target_name,\n built_code,\n error,\n remaining_attempts=remaining_attempts - 1,\n )\n else:\n print(\"None snippet detected\")\n return False, None\n else:\n print(\"Failed to fix the code and no more remaining attempts\")\n return False, None"
},
{
"identifier": "parse_cargo_doc_json",
"path": "fuzzomatic/tools/cargo_doc.py",
"snippet": "def parse_cargo_doc_json(path):\n with open(path) as f:\n jso = json.loads(f.read())\n\n # get functions that take only one parameter and that are public\n root = jso[\"root\"]\n index = jso[\"index\"]\n root_elem = index[root]\n root_inner_items = root_elem[\"inner\"][\"module\"][\"items\"]\n\n functions = []\n\n for elem in root_inner_items:\n path = []\n e = index[elem]\n funcs = parse_item(index, e, path)\n functions.extend(funcs)\n\n return functions"
},
{
"identifier": "generate_cargo_doc_json",
"path": "fuzzomatic/tools/cargo_doc.py",
"snippet": "def generate_cargo_doc_json(codebase_dir, root_codebase_dir=None):\n cmd = [\n \"cargo\",\n \"+nightly\",\n \"rustdoc\",\n \"--lib\",\n \"--\",\n \"--output-format\",\n \"json\",\n \"-Z\",\n \"unstable-options\",\n \"-A\",\n \"rustdoc::all\",\n ]\n\n json_file_path = None\n\n try:\n subprocess.check_call(cmd, cwd=codebase_dir)\n target_root = codebase_dir\n if root_codebase_dir is not None:\n target_root = root_codebase_dir\n target = os.path.join(target_root, \"target\", \"doc\")\n crate_name = detect_crate_name(codebase_dir)\n json_file_path = os.path.join(target, f\"{crate_name}.json\")\n if os.path.exists(json_file_path):\n return json_file_path\n else:\n for f in glob.glob(f\"{target}/*.json\"):\n json_file_path = f\n except subprocess.CalledProcessError:\n print(\"Error: failed to generate cargo doc json\")\n\n return json_file_path"
},
{
"identifier": "DEFAULT_TARGET_NAME",
"path": "fuzzomatic/tools/constants.py",
"snippet": "DEFAULT_TARGET_NAME = \"auto\""
},
{
"identifier": "write_fuzz_target",
"path": "fuzzomatic/tools/utils.py",
"snippet": "def write_fuzz_target(code_snippet, codebase_dir, target_name):\n # write snippet to file\n fuzz_target_path = build_fuzz_target_path(codebase_dir, target_name)\n with open(fuzz_target_path, \"w\") as fout:\n fout.write(code_snippet)\n\n return fuzz_target_path"
},
{
"identifier": "build_target",
"path": "fuzzomatic/tools/utils.py",
"snippet": "def build_target(codebase_dir, target_name):\n # sanitize fuzz target\n target_path = os.path.join(\n codebase_dir, \"fuzz\", \"fuzz_targets\", f\"{target_name}.rs\"\n )\n autofix_fuzz_target(target_path)\n\n # pretty format code\n rustfmt_target(target_path)\n\n # build target\n cmd = [\n \"cargo\",\n # force usage of nightly compiler in case there's\n # a rust toolchain override in the project\n \"+nightly\",\n \"fuzz\",\n \"build\",\n target_name,\n ]\n\n built_code = None\n with open(target_path) as f:\n built_code = f.read()\n\n try:\n print(\"Building target...\")\n # do not show warnings\n env = os.environ.copy()\n env[\"RUSTFLAGS\"] = \"-A warnings\"\n\n subprocess.check_output(\n cmd, cwd=codebase_dir, stderr=subprocess.STDOUT, env=env\n )\n print(\"Build success.\")\n return True, None, built_code\n except subprocess.CalledProcessError as e:\n print(\"Failed to build fuzz target\")\n error = e.output.decode(\"utf-8\")\n print(error)\n return False, error, built_code"
}
] | from jinja2 import Template
from fuzzomatic.tools import prompts
from fuzzomatic.approaches.common import llm_attempt_fix_error
from fuzzomatic.tools.cargo_doc import parse_cargo_doc_json, generate_cargo_doc_json
from fuzzomatic.tools.constants import DEFAULT_TARGET_NAME
from fuzzomatic.tools.utils import write_fuzz_target, build_target
import fuzzomatic.tools.utils | 3,437 | }
function_args = f[2]
template_path = str_template_path
extra_args = {}
if len(function_args) == 1:
try:
function_arg_type = function_args[0]
template_path = template_paths[function_arg_type]
except KeyError:
byte_array_length_template_path = (
"templates/fuzz_target/fuzz_target_byte_array_length.j2"
)
if type(function_arg_type) == tuple:
if function_arg_type[0] == "&array":
primitive_type = function_arg_type[1]
size = function_arg_type[2]
if primitive_type == "u8":
template_path = byte_array_length_template_path
extra_args = dict(array_length=size)
elif function_arg_type != "unknown":
# try the primitive type template
template_path = primitive_template_path
elif len(function_args) > 1:
template_path = "templates/fuzz_target/multiple_args/base.j2"
literal_args = []
struct_lifetime_needed = False
for arg in function_args:
if type(arg) == tuple and arg[0] == "&array":
primitive_type = arg[1]
size = arg[2]
struct_type = f"[{primitive_type}; {size}]"
call_prefix = "&"
literal_args.append((struct_type, call_prefix))
else:
if arg.startswith("&"):
struct_lifetime_needed = True
struct_type = arg.replace("&", "&'a ")
call_prefix = ""
literal_args.append((struct_type, call_prefix))
print("Literal args:")
print(literal_args)
extra_args = dict(
args=literal_args, struct_lifetime_needed=struct_lifetime_needed
)
success, fuzz_target_path = try_with_template(
template_path, codebase_dir, target_name, f, crate_name, extra_args
)
if success:
return True, fuzz_target_path
return False, None
def try_with_template(
template_path, codebase_dir, target_name, f, crate_name, extra_args
):
path = f[0]
function_name = f[1]
arg_type = f[2]
print(f"{arg_type=}")
if len(arg_type) == 1:
arg_type = arg_type[0]
import_path = ""
if len(path) > 0:
import_path += "::"
import_path += "::".join(path)
elif len(path) == 0:
import_path += f"::{function_name}"
usage_path = ""
if len(path) > 0:
usage_path = path[-1] + "::"
usage_path += function_name
print(f"{import_path=}")
print(f"{usage_path=}")
t = Template(prompts.load_file_contents(template_path))
fuzz_target_code = t.render(
crate_name=crate_name,
function_name=function_name,
import_path=import_path,
usage_path=usage_path,
arg_type=arg_type,
**extra_args,
)
fuzz_target_path = write_fuzz_target(fuzz_target_code, codebase_dir, target_name)
success, error, built_code = build_target(codebase_dir, target_name)
print("Generated code:")
print("-" * 10)
print(built_code)
print("-" * 10)
if success:
return True, fuzz_target_path
else:
print("Failed to build target")
print("Error:")
print(error)
# ask LLM to fix the code
fix_success, error = llm_attempt_fix_error(
codebase_dir, target_name, built_code, error
)
if fix_success:
return True, fuzz_target_path
return False, None
def find_target_functions_via_cargo_doc(codebase_dir, root_codebase_dir=None):
json_path = generate_cargo_doc_json(
codebase_dir, root_codebase_dir=root_codebase_dir
)
if json_path is not None:
print(f"Using cargo doc file: {json_path}")
|
def try_functions_approach(
codebase_dir,
target_name=DEFAULT_TARGET_NAME,
root_codebase_dir=None,
args=None,
**_kwargs,
):
functions = find_target_functions_via_cargo_doc(
codebase_dir, root_codebase_dir=root_codebase_dir
)
if functions is None:
print("Failed to detect functions")
return
ordered_functions = score_functions(functions)
print(f"{len(ordered_functions)} functions detected")
print("Detected target functions:")
for f in ordered_functions:
print(f)
max_functions = 8 # try max N functions
max_negative_score_functions = 2
negative_score_functions = 0
for f in ordered_functions[:max_functions]:
path = f[0]
function_name = f[1]
score = f[3]
# skip functions matching deny list
if args is not None and args.functions_denylist is not None:
skip_function = False
fully_qualified_function_name = "::".join(path)
if len(fully_qualified_function_name) > 0:
fully_qualified_function_name += "::"
fully_qualified_function_name += function_name
for word in args.functions_denylist:
if word in fully_qualified_function_name:
skip_function = True
if skip_function:
print(
f"Skipping function {fully_qualified_function_name} "
f"because of deny list: {args.functions_denylist}"
)
continue
print("Attempting function:")
print(f)
if score <= 0:
negative_score_functions += 1
success, fuzz_target_path = try_function(f, codebase_dir, target_name)
if success:
yield fuzz_target_path
if negative_score_functions >= max_negative_score_functions:
break
def score_functions(functions):
interesting_function_names = ["parse", "load", "read", "str", "eval"]
# order functions by most interesting first
ordered_functions = []
for f in functions:
function_name = f[1]
args = f[2]
priority = 0
is_name_interesting = False
for pattern in interesting_function_names:
if pattern in function_name:
is_name_interesting = True
if len(args) == 1:
arg_type = args[0]
if arg_type == "&str":
priority = 100
elif arg_type == "&[u8]":
priority = 100
elif arg_type == "String":
priority = 100
elif arg_type == "bool":
priority = 0
elif arg_type == "unknown":
priority = 10
elif type(arg_type) == tuple and arg_type[0] == "&array":
priority = 100
elif is_name_interesting:
priority = 100
if args[0] == "self":
priority = -15
elif args[0] == "self":
# functions with "self" as first argument
priority = -50
else:
priority = 50
elif len(args) > 1:
known_types = 0
for arg in args:
if arg != "unknown":
known_types += 1
if known_types == len(args):
priority = 30
if "&str" in args or "&[u8]" in args or "String" in args:
priority = 75
if any(type(arg) == tuple and arg[0] == "&array" for arg in args):
priority = 75
else:
# functions with multiple arguments where not all types are known
priority = -10
if args[0] == "self":
# functions with "self" as first argument
priority = -50
else:
# skip functions with no arguments
priority = -100
# give low priority to functions that are likely to load something by filename
if "file" in function_name and arg_type == "&str":
priority = 0
augmented_function = [*f, priority]
ordered_functions.append(augmented_function)
ordered_functions = sorted(ordered_functions, key=lambda x: x[3], reverse=True)
return ordered_functions
def try_function(f, codebase_dir, target_name):
crate_name = fuzzomatic.tools.utils.detect_crate_name(codebase_dir)
str_template_path = "templates/fuzz_target/fuzz_target_str.j2"
string_template_path = "templates/fuzz_target/fuzz_target_string.j2"
byte_slice_template_path = "templates/fuzz_target/fuzz_target_byte_array.j2"
primitive_template_path = "templates/fuzz_target/fuzz_target_primitive.j2"
bool_template_path = "templates/fuzz_target/fuzz_target_bool.j2"
template_paths = {
"&str": str_template_path,
"String": string_template_path,
"&[u8]": byte_slice_template_path,
"bool": bool_template_path,
"unknown": str_template_path,
}
function_args = f[2]
template_path = str_template_path
extra_args = {}
if len(function_args) == 1:
try:
function_arg_type = function_args[0]
template_path = template_paths[function_arg_type]
except KeyError:
byte_array_length_template_path = (
"templates/fuzz_target/fuzz_target_byte_array_length.j2"
)
if type(function_arg_type) == tuple:
if function_arg_type[0] == "&array":
primitive_type = function_arg_type[1]
size = function_arg_type[2]
if primitive_type == "u8":
template_path = byte_array_length_template_path
extra_args = dict(array_length=size)
elif function_arg_type != "unknown":
# try the primitive type template
template_path = primitive_template_path
elif len(function_args) > 1:
template_path = "templates/fuzz_target/multiple_args/base.j2"
literal_args = []
struct_lifetime_needed = False
for arg in function_args:
if type(arg) == tuple and arg[0] == "&array":
primitive_type = arg[1]
size = arg[2]
struct_type = f"[{primitive_type}; {size}]"
call_prefix = "&"
literal_args.append((struct_type, call_prefix))
else:
if arg.startswith("&"):
struct_lifetime_needed = True
struct_type = arg.replace("&", "&'a ")
call_prefix = ""
literal_args.append((struct_type, call_prefix))
print("Literal args:")
print(literal_args)
extra_args = dict(
args=literal_args, struct_lifetime_needed=struct_lifetime_needed
)
success, fuzz_target_path = try_with_template(
template_path, codebase_dir, target_name, f, crate_name, extra_args
)
if success:
return True, fuzz_target_path
return False, None
def try_with_template(
template_path, codebase_dir, target_name, f, crate_name, extra_args
):
path = f[0]
function_name = f[1]
arg_type = f[2]
print(f"{arg_type=}")
if len(arg_type) == 1:
arg_type = arg_type[0]
import_path = ""
if len(path) > 0:
import_path += "::"
import_path += "::".join(path)
elif len(path) == 0:
import_path += f"::{function_name}"
usage_path = ""
if len(path) > 0:
usage_path = path[-1] + "::"
usage_path += function_name
print(f"{import_path=}")
print(f"{usage_path=}")
t = Template(prompts.load_file_contents(template_path))
fuzz_target_code = t.render(
crate_name=crate_name,
function_name=function_name,
import_path=import_path,
usage_path=usage_path,
arg_type=arg_type,
**extra_args,
)
fuzz_target_path = write_fuzz_target(fuzz_target_code, codebase_dir, target_name)
success, error, built_code = build_target(codebase_dir, target_name)
print("Generated code:")
print("-" * 10)
print(built_code)
print("-" * 10)
if success:
return True, fuzz_target_path
else:
print("Failed to build target")
print("Error:")
print(error)
# ask LLM to fix the code
fix_success, error = llm_attempt_fix_error(
codebase_dir, target_name, built_code, error
)
if fix_success:
return True, fuzz_target_path
return False, None
def find_target_functions_via_cargo_doc(codebase_dir, root_codebase_dir=None):
json_path = generate_cargo_doc_json(
codebase_dir, root_codebase_dir=root_codebase_dir
)
if json_path is not None:
print(f"Using cargo doc file: {json_path}") | functions = parse_cargo_doc_json(json_path) | 2 | 2023-11-14 09:52:59+00:00 | 4k |
muyuworks/myla | tests/myla/vectorstores/faiss_group_test.py | [
{
"identifier": "FAISSGroup",
"path": "myla/vectorstores/faiss_group.py",
"snippet": "class FAISSGroup(VectorStore):\n def __init__(self, path: str, embeddings: Embeddings = None) -> None:\n self._path = path\n self._embeddings = embeddings\n self._faiss = _import_faiss()\n\n os.makedirs(name=self._path, exist_ok=True)\n\n # lock the vs\n self._vs_lock = threading.Lock()\n self._col_locks = {}\n\n self._data = {}\n self._indexes = {}\n self._ids = {}\n\n def create_collection(self, collection: str, schema: Dict[str, type] = None, mode=\"create\"):\n \"\"\"Create a collection.\n\n :type mode:str\n :param mode: creation mode, create: raise FAISSGroupException if exists; overwrite: drop it if exists\n \"\"\"\n col_path = os.path.join(self._path, collection)\n\n if mode == 'create' and os.path.exists(col_path):\n raise FAISSGroupException(f\"Collection exists: {collection}\")\n\n if mode == 'overwrite' and os.path.exists(col_path):\n self.drop(collection=collection)\n\n if not os.path.exists(col_path):\n os.mkdir(col_path)\n\n def add(\n self,\n collection: str,\n records: List[Record],\n embeddings_columns: Optional[List[str]] = None,\n vectors: Optional[List[List[float]]] = None,\n **kwargs\n ):\n \"\"\"Add records to the collection.\"\"\"\n group_by = kwargs.get('group_by')\n\n self._check_collection_exists(collection=collection)\n\n if records is None:\n raise ValueError(f\"Invalid records: None\")\n\n if not os.path.exists(os.path.join(self._path, collection)):\n raise FAISSGroupException(f\"Collection not exists: {collection}\")\n\n groups = self._group_records(records=records, group_by=group_by)\n\n if vectors:\n if len(vectors) != len(records):\n raise ValueError(\"The length of records must be the same as the length of vecotors.\")\n else:\n text_to_embed = []\n for r in records:\n text_to_embed.append(Record.values_to_text(r, props=embeddings_columns))\n vectors = self._embeddings.embed_batch(texts=text_to_embed, instruction=kwargs.get('instruction'))\n\n with self._get_collection_lock(collection=collection):\n data, indexes, ids = self._load(collection=collection)\n\n idx_start = len(data)\n\n # add records to data\n for r in records:\n data.append(r)\n\n for gid, g_records_ids in groups.items():\n index = indexes.get(gid)\n id_map = ids.get(gid)\n\n if not index:\n index = self._faiss.IndexFlatL2(len(vectors[0]))\n id_map = []\n indexes[gid] = index\n ids[gid] = id_map\n\n g_vectors = []\n\n for i in g_records_ids:\n g_vectors.append(vectors[i])\n id = idx_start + i\n id_map.append(id)\n\n g_vectors_npa = np.array(g_vectors, dtype=np.float32)\n self._faiss.normalize_L2(g_vectors_npa)\n index.add(g_vectors_npa)\n\n # save data\n self._save_data(collection=collection, data=data)\n gid_to_saved = groups.keys()\n for gid in gid_to_saved:\n self._save_group(collection=collection, gid=gid, index=indexes[gid], ids=ids[gid])\n\n def _group_id(self, v=None):\n if v is None or pd.isnull(v):\n v = \"\"\n if not isinstance(v, str):\n v = str(v)\n return utils.sha256(v.encode()).hex()\n\n def _group_records(self, records: List[Record], group_by: str):\n groups = {}\n\n if not group_by:\n groups[self._group_id(\"\")] = range(len(records))\n else:\n for i in range(len(records)):\n gid = records[i].get(group_by)\n gid = self._group_id(gid)\n g = groups.get(gid)\n if not g:\n g = []\n groups[gid] = g\n g.append(i)\n return groups\n\n def _save_data(self, collection, data):\n fname = os.path.join(self._path, collection, \"data.pkl\")\n with open(fname, 'wb') as f:\n pickle.dump(data, f)\n\n def _save_group(self, collection, gid, index, ids):\n index_fname = os.path.join(self._path, collection, f\"{gid}.index\")\n self._faiss.write_index(index, index_fname)\n ids_fname = os.path.join(self._path, collection, f\"{gid}.ids\")\n with open(ids_fname, 'wb') as f:\n pickle.dump(ids, f)\n\n def delete(self, collection: str, query: str):\n return super().delete(collection, query)\n\n def search(\n self,\n collection: str = None,\n query: str = None,\n vector: List = None,\n filter: Any = None,\n limit: int = 20,\n columns: Optional[List[str]] = None,\n with_vector: bool = False,\n with_distance: bool = False,\n **kwargs\n ) -> Optional[List[Record]]:\n group_ids = kwargs.get('group_ids')\n\n self._check_collection_exists(collection=collection)\n\n if not query and not vector:\n raise FAISSGroupException(\"FAISSGroup search must provide query or vector.\")\n\n if query and not vector and self._embeddings:\n vector = self._embeddings.embed(text=query, instruction=kwargs.get('instruction'))\n if not vector:\n raise FAISSGroupException(\"FAISSGroup search must provide Embeddings function.\")\n\n vector = np.array([vector], dtype=np.float32)\n self._faiss.normalize_L2(vector)\n\n if group_ids is None:\n group_ids = [self._group_id()]\n else:\n group_ids = [self._group_id(v) for v in group_ids]\n\n if filter is not None:\n filter = {\n key: [value] if not isinstance(value, list) else value for key, value in filter.items()\n }\n\n data, indexes, ids = self._load(collection=collection)\n\n r_records = []\n for gid in group_ids:\n index = indexes.get(gid)\n id_map = ids.get(gid)\n if not index or not id_map:\n raise FAISSGroupException(f\"group_id not exists: {gid}\")\n\n distances, indices = index.search(vector, limit)\n for j, i in enumerate(indices[0]):\n if i == -1:\n # This happens when not enough docs are returned.\n continue\n _id = id_map[i]\n _distance = distances[0][j]\n record = data[_id]\n record['_distance'] = float(_distance)\n\n if filter:\n if all(record.get(key) in value for key, value in filter.items()):\n r_records.append(record)\n else:\n r_records.append(record)\n\n distance_threshold = kwargs.get(\"distance_threshold\")\n if distance_threshold is not None:\n r_records = [\n r for r in r_records if r['_distance'] < distance_threshold\n ]\n\n r_records.sort(key=lambda r: r['_distance'])\n\n return r_records[:limit]\n\n def drop(self, collection: str):\n \"\"\"\"\"\"\n\n def _check_collection_exists(self, collection):\n if collection is None:\n raise ValueError(f\"Invalid collection name: {collection}\")\n if not os.path.exists(os.path.join(self._path, collection)):\n raise FAISSGroupException(f\"Collection not exists: {collection}\")\n\n def _load(self, collection: str):\n \"\"\"Load collection data.\"\"\"\n\n with self._vs_lock:\n data = self._data.get(collection)\n indexes = self._indexes.get(collection)\n ids = self._ids.get(collection)\n\n if not data:\n data = []\n indexes = {}\n ids = {}\n\n path = os.path.join(self._path, collection)\n for fname in os.listdir(path=path):\n if fname == \"data.pkl\":\n with open(os.path.join(path, fname), \"rb\") as f:\n data = pickle.load(f)\n if fname.endswith(\".index\"):\n gid = fname.replace(\".index\", \"\")\n idx = self._faiss.read_index(os.path.join(path, fname))\n indexes[gid] = idx\n\n id_map = []\n with open(os.path.join(path, f\"{gid}.ids\"), \"rb\") as f:\n id_map = pickle.load(f)\n ids[gid] = id_map\n\n self._data[collection] = data\n self._indexes[collection] = indexes\n self._ids[collection] = ids\n\n return data, indexes, ids\n\n def _unload(self, collection: str):\n \"\"\"Unload collection.\"\"\"\n with self._vs_lock:\n if collection in self._data:\n del self._data[collection]\n del self._indexes[collection]\n del self._ids[collection]\n\n return gc.collect()\n\n def _get_collection_lock(self, collection):\n with self._vs_lock:\n lock = self._col_locks.get(collection)\n if not lock:\n lock = threading.Lock()\n self._col_locks[collection] = lock\n return lock"
},
{
"identifier": "random_id",
"path": "myla/utils.py",
"snippet": "def random_id():\n return base32(sha1(uuid().bytes)).decode().lower()"
},
{
"identifier": "sha256",
"path": "myla/utils.py",
"snippet": "def sha256(s: bytes):\n m = hashlib.sha256()\n m.update(s)\n return m.digest()"
}
] | import os
import shutil
import unittest
from myla.vectorstores.faiss_group import FAISSGroup
from myla.utils import random_id, sha256 | 2,848 |
here = os.path.abspath(os.path.dirname(__file__))
class FAISSGroupTests(unittest.TestCase):
def setUp(self) -> None:
self._vectors = [
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4]
]
self._records = [
{
'id': 0,
'gid': 'g0',
},
{
'id': 1,
'gid': 'g0',
},
{
'id': 2,
'gid': 'g2',
},
{
'id': 3,
'gid': 'g3',
},
{
'id': 4,
}
]
self._data = os.path.abspath(os.path.join(here, os.pardir, os.pardir, 'data', random_id()))
def tearDown(self) -> None:
if os.path.exists(self._data):
shutil.rmtree(self._data)
pass
def test_create_collection(self):
vs = FAISSGroup(path=self._data)
vs.create_collection(collection='col')
def test_add(self):
vs = FAISSGroup(path=self._data)
vs.create_collection(collection='col')
vs.add(collection='col', records=self._records, vectors=self._vectors, group_by='gid')
self.assertIsNotNone(vs._data.get('col'))
self.assertEqual(vs._data.get('col'), self._records)
self.assertIsNotNone(vs._indexes.get('col'))
self.assertIsNotNone(vs._ids.get('col'))
self.assertEqual(len(vs._indexes.get('col')), 4)
self.assertEqual(len(vs._ids.get('col')), 4)
self.assertEqual(vs._indexes.get('col').keys(), vs._ids.get('col').keys())
gids = list(vs._indexes.get('col').keys())
gids.sort()
gids_1 = []
for r in self._records:
|
here = os.path.abspath(os.path.dirname(__file__))
class FAISSGroupTests(unittest.TestCase):
def setUp(self) -> None:
self._vectors = [
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4]
]
self._records = [
{
'id': 0,
'gid': 'g0',
},
{
'id': 1,
'gid': 'g0',
},
{
'id': 2,
'gid': 'g2',
},
{
'id': 3,
'gid': 'g3',
},
{
'id': 4,
}
]
self._data = os.path.abspath(os.path.join(here, os.pardir, os.pardir, 'data', random_id()))
def tearDown(self) -> None:
if os.path.exists(self._data):
shutil.rmtree(self._data)
pass
def test_create_collection(self):
vs = FAISSGroup(path=self._data)
vs.create_collection(collection='col')
def test_add(self):
vs = FAISSGroup(path=self._data)
vs.create_collection(collection='col')
vs.add(collection='col', records=self._records, vectors=self._vectors, group_by='gid')
self.assertIsNotNone(vs._data.get('col'))
self.assertEqual(vs._data.get('col'), self._records)
self.assertIsNotNone(vs._indexes.get('col'))
self.assertIsNotNone(vs._ids.get('col'))
self.assertEqual(len(vs._indexes.get('col')), 4)
self.assertEqual(len(vs._ids.get('col')), 4)
self.assertEqual(vs._indexes.get('col').keys(), vs._ids.get('col').keys())
gids = list(vs._indexes.get('col').keys())
gids.sort()
gids_1 = []
for r in self._records: | gids_1.append(sha256(r.get('gid', '').encode()).hex()) | 2 | 2023-11-15 01:05:03+00:00 | 4k |
AdmTal/music-graphs | music_graphs.py | [
{
"identifier": "generate_music_graph",
"path": "src/generate_music_graph.py",
"snippet": "def generate_music_graph(\n midi_file_path,\n default_theme_file_path,\n theme_file_path,\n output_path,\n soundfont_file,\n):\n theme = Theme(theme_file_path, default_theme_file_path)\n track_events_frames = get_note_start_times_in_frames(\n midi_file_path,\n theme.frame_rate,\n squash_tracks=theme.squash_tracks,\n group_notes_by_track=theme.group_notes_by_track,\n )\n\n song_graph = create_graphviz(theme, track_events_frames)\n\n base_image, nodes, edges, offsets = parse_graph(song_graph, theme)\n\n if theme.debug_show_base_image:\n base_image.show()\n cleanup_cache_dir(get_cache_dir())\n exit()\n\n FRAMES = AnimationFrames()\n\n click.echo(\"Planning out frames...\", nl=False)\n\n for track in track_events_frames.keys():\n if theme.skip_track(track):\n continue\n curr_track = track_events_frames[track]\n curr_frame = min(curr_track) - 1\n prev_notes = None\n prev_notes_frame = None\n num_notes_processed = 0\n click.echo() # NL\n\n max_notes = len(track_events_frames[track])\n\n while num_notes_processed < max_notes and curr_frame <= max(curr_track):\n curr_frame += 1\n\n if curr_frame not in curr_track:\n continue\n\n usage = size(psutil.Process().memory_info().rss)\n click.echo(\n f\"\\r[{track}] Processing {num_notes_processed + 1} of {max_notes} notes... (memory usage={usage})\",\n nl=False,\n )\n\n num_notes_processed += 1\n curr_note_tuples = curr_track[curr_frame]\n\n # Animate the Node pulses\n for (\n current_note,\n curr_note_velocity,\n curr_note_frame_len,\n ) in curr_note_tuples:\n frames = []\n for i in range(curr_note_frame_len):\n frame = [\n animate_ellipsis_blur,\n {\n \"track\": track,\n \"points\": nodes[current_note].e_points,\n \"frame_number\": i,\n \"animation_len\": curr_note_frame_len,\n \"velocity\": curr_note_velocity,\n },\n ]\n\n frames.append(frame)\n FRAMES.add_frames_to_layer(\n f\"l2-{track}-{current_note}\", curr_frame, frames\n )\n\n if theme.pulses_only:\n continue\n\n # Animate the Chord Lines\n if len(curr_note_tuples) > 1:\n # Split notes in chord up by the frame length, cause multiple chords might be playing\n notes_in_cords = {}\n for note, velocity, frame_len in curr_note_tuples:\n if frame_len not in notes_in_cords:\n notes_in_cords[frame_len] = []\n notes_in_cords[frame_len].append(note)\n\n # For each individual chord, draw the lines\n for frame_len, all_notes in notes_in_cords.items():\n # The chord lines shoudl not overlap, so sort them according to sort order\n if theme.nodes_sorted:\n if isinstance(theme.nodes_sorted, bool):\n all_notes = sorted(\n all_notes,\n key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1]),\n )\n else:\n all_notes = filter_and_order_custom(\n theme.nodes_sorted, all_notes\n )\n # Use `overlapping_pairs` to make the notes connect as a circle\n pairs = overlapping_pairs(all_notes)\n for a, b in pairs:\n frames = []\n for i in range(frame_len):\n if b not in edges[a]:\n continue\n frames.append(\n [\n draw_fading_bezier_curve,\n {\n \"track\": track,\n \"points\": edges[a][b].b_points,\n \"frame_number\": i,\n \"animation_len\": frame_len,\n },\n ]\n )\n FRAMES.add_frames_to_layer(\n f\"l1-{track}-{a}-{b}-line\", curr_frame, frames\n )\n\n curr_notes = [curr_note_tuple[0] for curr_note_tuple in curr_note_tuples]\n\n # Animate the \"next note\" balls\n if prev_notes:\n animation_length_in_frames = curr_frame - prev_notes_frame\n drawn_to = set()\n source_usage = {note: 0 for note in prev_notes}\n\n # New Rule: Check if there are more destinations than sources to determine max usage\n max_usage = 2 if len(curr_notes) > len(prev_notes) else 1\n\n if animation_length_in_frames / theme.frame_rate <= 10:\n for a in prev_notes:\n for b in curr_notes:\n if (\n b in drawn_to\n or (a == b and not theme.allow_self_notes(track))\n or source_usage[a] >= max_usage\n or b not in edges[a]\n ):\n continue\n\n frames = []\n for i in range(animation_length_in_frames):\n frame = [\n animate_bezier_point,\n {\n \"track\": track,\n \"points\": edges[a][b].b_points,\n \"frame_number\": i,\n \"animation_length_in_frames\": animation_length_in_frames,\n },\n ]\n\n frames.append(frame)\n FRAMES.add_frames_to_layer(\n f\"l3-{track}-{a}-{b}-balls\", prev_notes_frame, frames\n )\n drawn_to.add(b)\n source_usage[a] += 1\n\n prev_notes = curr_notes\n prev_notes_frame = curr_frame\n\n num_frames = len(FRAMES)\n if theme.debug_max_frames:\n num_frames = theme.debug_max_frames\n\n writer_context = initialize_video_writer(theme.frame_rate)\n frames_written = 0\n click.echo(\"\\nDrawing frames, writing videos...\")\n NUM_WORKERS = os.cpu_count()\n with writer_context as (writer, video_file_path):\n while frames_written < num_frames:\n with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:\n future_to_frame = {\n executor.submit(\n process_frame,\n current_frame=i,\n base_image=base_image.copy(),\n theme=theme,\n offsets=offsets,\n FRAMES=FRAMES,\n ): i\n for i in range(\n frames_written, min(frames_written + NUM_WORKERS, num_frames)\n )\n }\n\n results = []\n for future in as_completed(future_to_frame):\n frame_index = future_to_frame[future]\n frame_image = future.result()\n results.append((frame_index, frame_image))\n\n for frame_index, frame_image in sorted(results, key=lambda x: x[0]):\n add_frame_to_video(writer, frame_image)\n frames_written += 1\n\n usage = size(psutil.Process().memory_info().rss)\n click.echo(\n f\"\\rProcessed {frames_written} of {num_frames}... (memory usage={usage})\",\n nl=False,\n )\n\n finalize_video_with_music(\n writer,\n video_file_path,\n output_path,\n midi_file_path,\n theme.frame_rate,\n soundfont_file,\n frames_written,\n )"
},
{
"identifier": "SOUND_FONT_FILE",
"path": "src/midi_stuff.py",
"snippet": "SOUND_FONT_FILE = \"assets/GeneralUser GS 1.471/GeneralUser GS v1.471.sf2\""
},
{
"identifier": "DARK_THEME_FILE",
"path": "src/theme_stuff.py",
"snippet": "DARK_THEME_FILE = \"assets/default_theme_dark.yaml\""
},
{
"identifier": "LIGHT_THEME_FILE",
"path": "src/theme_stuff.py",
"snippet": "LIGHT_THEME_FILE = \"assets/default_theme_light.yaml\""
}
] | import os
import click
from src.generate_music_graph import generate_music_graph
from src.midi_stuff import SOUND_FONT_FILE
from src.theme_stuff import DARK_THEME_FILE, LIGHT_THEME_FILE | 2,122 |
def get_filename_without_extension(path):
filename_with_extension = os.path.basename(path)
filename_without_extension, _ = os.path.splitext(filename_with_extension)
return filename_without_extension
@click.command()
@click.option(
"--midi",
required=True,
type=click.Path(exists=True),
help="Path to a MIDI file.",
)
@click.option(
"--theme",
type=click.Path(exists=True),
help="Path to a YAML theme file.",
)
@click.option(
"--dark",
type=bool,
help="True if dark theme should be the used.",
default=False,
is_flag=True,
)
@click.option(
"--output_filename",
type=click.Path(),
help="Output filename (path).",
default=None,
)
@click.option(
"--soundfont_file",
type=click.Path(),
help="Path to a Soundfont file",
default=SOUND_FONT_FILE,
)
def main(midi, theme, output_filename, soundfont_file, dark):
default_theme_file = LIGHT_THEME_FILE
if dark:
default_theme_file = DARK_THEME_FILE
if not theme:
theme = default_theme_file
if not output_filename:
output_filename = get_filename_without_extension(midi)
|
def get_filename_without_extension(path):
filename_with_extension = os.path.basename(path)
filename_without_extension, _ = os.path.splitext(filename_with_extension)
return filename_without_extension
@click.command()
@click.option(
"--midi",
required=True,
type=click.Path(exists=True),
help="Path to a MIDI file.",
)
@click.option(
"--theme",
type=click.Path(exists=True),
help="Path to a YAML theme file.",
)
@click.option(
"--dark",
type=bool,
help="True if dark theme should be the used.",
default=False,
is_flag=True,
)
@click.option(
"--output_filename",
type=click.Path(),
help="Output filename (path).",
default=None,
)
@click.option(
"--soundfont_file",
type=click.Path(),
help="Path to a Soundfont file",
default=SOUND_FONT_FILE,
)
def main(midi, theme, output_filename, soundfont_file, dark):
default_theme_file = LIGHT_THEME_FILE
if dark:
default_theme_file = DARK_THEME_FILE
if not theme:
theme = default_theme_file
if not output_filename:
output_filename = get_filename_without_extension(midi)
| generate_music_graph( | 0 | 2023-11-17 17:56:04+00:00 | 4k |
FISHers6/CodeLearn-Agent | codelearn/project/project_manager.py | [
{
"identifier": "LOCAL_PROJECT_PATH",
"path": "codelearn/base.py",
"snippet": "LOCAL_PROJECT_PATH = os.path.join(BASE_PROJECT_PATH, \"projects\")"
},
{
"identifier": "Indexer",
"path": "codelearn/index/indexer.py",
"snippet": "class Indexer(ABC):\n @abstractmethod\n def index(self, project: Project, splitter: Splitter, embedding: Embeddings, vector_db: VectorStoreBase) -> None:\n pass"
},
{
"identifier": "Metadata",
"path": "codelearn/index/indexer.py",
"snippet": "class Metadata:\n def __init__(self, id: str, **kwargs):\n self.id = id\n self.extra_fields = {k: v for k, v in kwargs.items()}\n \n def __str__(self):\n components = [f\"id={self.id}\"]\n for field, value in self.extra_fields:\n if field and value:\n components.append(f\"{field}={value}\")\n return f\"<({', '.join(components)})>\""
},
{
"identifier": "ProjectLoader",
"path": "codelearn/loader/loader.py",
"snippet": "class ProjectLoader(ABC):\n\n source_provider: SourceProvider\n\n @abstractmethod\n def load_project(self, project_info: Dict[str, Any]) -> Project:\n print(\"ProjectLoader load_project\")\n pass"
},
{
"identifier": "Project",
"path": "codelearn/project/project.py",
"snippet": "class Project:\n\n def __init__(self, id: str, local_dir: str, source_content: FileTree, repo_url: str = None, last_updated_time = None):\n \"\"\"\n :param name: 项目名称\n :param contents: 一个字典,其中键是文件路径,值是文件内容\n \"\"\"\n self.id = id\n self.local_dir = local_dir\n self.repo_url = repo_url\n self.contents = source_content\n self.last_updated_time = last_updated_time"
},
{
"identifier": "RetrieveResults",
"path": "codelearn/retrieval/retriever.py",
"snippet": "class RetrieveResults:\n def __init__(self, content: str, metadata: Metadata, score: float = None):\n self.content = content\n self.metadata = metadata\n self.score = score\n\n def __repr__(self):\n return f\"<RetrieveResults(content={self.content}, metadata={self.metadata}, score={self.score})>\""
},
{
"identifier": "Retriever",
"path": "codelearn/retrieval/retriever.py",
"snippet": "class Retriever(ABC):\n \n @abstractmethod\n def retrieve(self, query: str, project: Project, top_k: int = 5, search_kwargs: Optional[dict] = None) -> List[Tuple[Document, float]]:\n pass\n\n def rank(self, query: str, results: List[RetrieveResults]):\n return results"
},
{
"identifier": "Splitter",
"path": "codelearn/splitter/splitter.py",
"snippet": "class Splitter(ABC):\n @abstractmethod\n def split(self, file_tree: FileTree) -> List[ChunkInfo]:\n pass"
},
{
"identifier": "ProjectStorageManager",
"path": "codelearn/storage/project_storage.py",
"snippet": "class ProjectStorageManager:\n def __init__(self, storage: ProjectStorage, cache: ProjectCache):\n self.storage = storage\n self.cache = cache\n\n def _fetch_from_storage(self, project_id, loader, repo_url=None, local_dir = None):\n project: Optional[ProjectModel] = self.storage.get_project(project_id, repo_url=repo_url, local_dir=local_dir)\n if not project:\n print(\"_fetch_from_storage none\")\n return None\n print(\"_fetch_from_storage load start\")\n project = loader.load_project({\n \"id\": project.id,\n \"local_dir\": project.local_dir,\n \"repo_url\": project.repo_url,\n \"last_updated_time\": project.last_updated\n })\n print(f\"_fetch_from_storage load end return project from storage, {project}\")\n return project\n\n def get_project(self, project_id, loader, repo_url=None, local_dir = None) -> Optional[Project]:\n cached_project = self.cache.get_project(project_id)\n if cached_project:\n print(\"return cached_project\")\n return cached_project\n return self._fetch_from_storage(project_id, loader, repo_url, local_dir)\n\n def store_project(self, project: Project):\n self.storage.store_project(project)\n self.cache.cached_project(project)"
},
{
"identifier": "VectorStoreBase",
"path": "codelearn/storage/vector.py",
"snippet": "class VectorStoreBase(ABC):\n \"\"\"Base VectorStorage class for VectorStore.\"\"\"\n\n @abstractmethod\n def save_local(self, vector_store: VectorStore, folder_path: str, index_name: str = \"code\"):\n pass\n\n @abstractmethod\n def load_local(\n self,\n folder_path: str,\n embeddings: Embeddings,\n index_name: str = \"code\",\n **kwargs: Any\n ) -> Optional[VectorStore]:\n pass\n\n @abstractmethod\n def embending(self, project: Project, documents: List[Document], embedding: Embeddings, vector_store: Optional[VectorStore] = None, index_name=\"code\"):\n pass"
},
{
"identifier": "AsyncQueue",
"path": "codelearn/utils/clearn_task_queue.py",
"snippet": "class AsyncQueue:\n def __init__(self):\n self.tasks = asyncio.Queue()\n\n async def add_task(self, coro):\n await self.tasks.put(coro)\n\n async def run(self):\n while True:\n task = await self.tasks.get()\n await task\n self.tasks.task_done()"
},
{
"identifier": "async_cleanup",
"path": "codelearn/utils/clearn_task_queue.py",
"snippet": "async def async_cleanup(storage_path):\n max_size = project_config.max_clean_threadshold_size\n cleaned_size = project_config.after_cleaned_threadshold_size\n current_size = get_directory_size(storage_path)\n\n if current_size > max_size:\n # 删除最近最少访问的文件\n lru_files = get_least_recently_used_files(storage_path)\n for file in lru_files:\n if current_size <= cleaned_size:\n break\n size = os.path.getsize(file)\n os.remove(file)\n current_size -= size"
},
{
"identifier": "sync_cleanup",
"path": "codelearn/utils/clearn_task_queue.py",
"snippet": "def sync_cleanup(storage_path):\n max_size = project_config.max_clean_threadshold_size\n cleaned_size = project_config.after_cleaned_threadshold_size\n current_size = get_directory_size(storage_path)\n\n if current_size > max_size:\n # 删除最近最少访问的文件\n lru_files = get_least_recently_used_files(storage_path)\n for file in lru_files:\n if current_size <= cleaned_size:\n break\n size = os.path.getsize(file)\n os.remove(file)\n current_size -= size"
},
{
"identifier": "LOCAL_PROJECT_PATH",
"path": "codelearn/base.py",
"snippet": "LOCAL_PROJECT_PATH = os.path.join(BASE_PROJECT_PATH, \"projects\")"
}
] | import asyncio
import os
import time
import uuid
from typing import Any, Dict, List, Optional
from openai import Embedding
from codelearn.base import LOCAL_PROJECT_PATH
from codelearn.index.indexer import Indexer, Metadata
from codelearn.loader.loader import ProjectLoader
from datetime import datetime, timedelta
from codelearn.project.project import Project
from codelearn.retrieval.retriever import RetrieveResults, Retriever
from codelearn.splitter.splitter import Splitter
from codelearn.storage.project_storage import ProjectStorageManager
from codelearn.storage.vector import VectorStoreBase
from codelearn.utils.clearn_task_queue import AsyncQueue, async_cleanup, sync_cleanup
from codelearn.base import LOCAL_PROJECT_PATH | 1,734 |
class ProjectManager:
PROJECT_UPDATE_THRESHOLD = 30 * 24 * 60 * 60
def __init__(self,
loaders: Dict[str, ProjectLoader],
splitters: Dict[str, Splitter],
|
class ProjectManager:
PROJECT_UPDATE_THRESHOLD = 30 * 24 * 60 * 60
def __init__(self,
loaders: Dict[str, ProjectLoader],
splitters: Dict[str, Splitter], | indexers: Dict[str, Indexer], | 1 | 2023-11-12 13:13:30+00:00 | 4k |
kirill-vish/Beyond-INet | inference/modelvshuman/model_evaluator.py | [
{
"identifier": "load_model_transform",
"path": "utils/misc.py",
"snippet": "def load_model_transform(model_name, pretrained_dir, img_size=224):\n print(f\"Loading {model_name}\")\n checkpoint_path = None\n transform_val = None\n if model_name == \"deit3_21k\":\n model = models_deit.deit_base_patch16_LS(img_size=img_size)\n checkpoint_path = os.path.join(pretrained_dir,\n \"deit_3_base_224_21k.pth\")\n elif model_name == \"convnext_base_21k\":\n model = models_convnextv1.convnext_base()\n checkpoint_path = os.path.join(pretrained_dir,\n \"convnext_base_22k_1k_224.pth\")\n elif model_name == \"vit_clip\":\n model, _, transform_val = open_clip.create_model_and_transforms(\n 'ViT-B-16', pretrained='laion400m_e31', force_image_size=img_size)\n model = models_clip.CLIPModel(model=model, model_name='ViT-B-16')\n checkpoint_path = None\n elif model_name == \"convnext_clip\":\n model, _, transform_val = open_clip.create_model_and_transforms(\n 'convnext_base',\n pretrained='laion400m_s13b_b51k',\n force_image_size=img_size)\n model = models_clip.CLIPModel(model=model, model_name='convnext_base')\n checkpoint_path = None\n\n if checkpoint_path is not None:\n checkpoint = torch.load(checkpoint_path)\n state_dict = checkpoint['model']\n if img_size != 224 and model_name == 'deit3_21k':\n state_dict = interpolate_pos_embed(model, state_dict)\n msg = model.load_state_dict(state_dict, strict=False)\n print(msg)\n assert set(checkpoint['model'].keys()) == set(\n model.state_dict().keys())\n assert len(msg.missing_keys) == 0 and len(\n msg.unexpected_keys\n ) == 0, \"Some keys in the state dict do not match\"\n\n return model, transform_val"
},
{
"identifier": "evaluate",
"path": "inference/modelvshuman/evaluation/evaluate.py",
"snippet": "IMAGENET_LABEL_FILE = pjoin(c.CODE_DIR, \"evaluation\", \"imagenet_labels.txt\")\ndef print_performance_to_csv(model_name,\n dataset_name,\n performance,\n metric_name,\n data_parent_dir=c.PERFORMANCES_DIR):\ndef print_predictions_to_console(softmax_output,\n top_n=5,\n labels_path=IMAGENET_LABEL_FILE):\n def __init__(self, model_name, dataset, data_parent_dir=c.RAW_DATA_DIR):\n def create_session_csv(self, session):\n def print_batch_to_csv(self, object_response, batch_targets, paths):\nclass ResultPrinter():"
},
{
"identifier": "load_dataset",
"path": "inference/modelvshuman/utils.py",
"snippet": "def load_dataset(name, *args, **kwargs):\n default_kwargs = {\"batch_size\": 16, \"num_workers\": 4}\n kwargs = {**default_kwargs, **kwargs}\n logger.info(f\"Loading dataset {name}\")\n supported_datasets = dataset_module.list_datasets()\n module_name = supported_datasets.get(name, None)\n if module_name is None:\n raise NameError(\n f\"Dataset {name} is not supported, \"\n f\"please select from {list(supported_datasets.keys())}\")\n elif os.path.exists(join(c.DATASET_DIR, name)):\n return eval(f\"dataset_module.{module_name}\")(*args, **kwargs)\n elif try_download_dataset_from_github(name):\n return eval(f\"dataset_module.{module_name}\")(*args, **kwargs)\n else:\n raise NotImplementedError(\n f\"Dataset {name} not available for download, please obtain the dataset \"\n f\"yourself and save it to {c.DATASET_DIR}\")"
},
{
"identifier": "load_model",
"path": "inference/modelvshuman/utils.py",
"snippet": "def load_model(model_name, *args):\n if model_name in zoomodels.__dict__:\n model = eval(\"pytorch_model_zoo.model_pytorch\")(model_name, *args)\n framework = 'pytorch'\n else:\n model = eval(f\"pytorch_model_zoo.model_timm\")(model_name, *args)\n framework = 'pytorch'\n return model, framework"
}
] | import copy
import datetime
import logging
import os
import matplotlib as mpl
import torch
from torch.nn.functional import softmax
from tqdm import tqdm
from utils.misc import load_model_transform
from .evaluation import evaluate as e
from .utils import load_dataset, load_model | 1,705 |
logger = logging.getLogger(__name__)
MAX_NUM_MODELS_IN_CACHE = 3
mpl.rcParams['font.size'] = 22
def device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ModelEvaluator:
def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs):
"""
Evaluate Model on the given dataset and return the accuracy.
Args:
model_name:
model:
dataset:
*args:
**kwargs:
"""
logging_info = f"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator"
logger.info(logging_info)
print(logging_info)
for metric in dataset.metrics:
metric.reset()
with torch.no_grad():
result_writer = e.ResultPrinter(model_name=model_name,
dataset=dataset)
for images, target, paths in tqdm(dataset.loader):
images = images.to(device())
if "forward_batch" in dir(model):
logits = model.forward_batch(images)
softmax_output = model.softmax(logits)
else:
logits = model(images)
softmax_output = softmax(logits,
dim=1).detach().cpu().numpy()
if isinstance(target, torch.Tensor):
batch_targets = model.to_numpy(target)
else:
batch_targets = target
predictions = dataset.decision_mapping(softmax_output)
for metric in dataset.metrics:
metric.update(predictions, batch_targets, paths)
if kwargs["print_predictions"]:
result_writer.print_batch_to_csv(
object_response=predictions,
batch_targets=batch_targets,
paths=paths)
def _get_datasets(self, dataset_names, *args, **kwargs):
dataset_list = []
for dataset in dataset_names:
dataset = load_dataset(dataset, *args, **kwargs)
dataset_list.append(dataset)
return dataset_list
def _get_evaluator(self, framework):
if framework == 'pytorch':
return self._pytorch_evaluator
else:
raise NameError("Unsupported evaluator")
def _remove_model_from_cache(self, framework, model_name):
def _format_name(name):
return name.lower().replace("-", "_")
try:
if framework == "pytorch":
cachedir = "/root/.cache/torch/checkpoints/"
downloaded_models = os.listdir(cachedir)
for dm in downloaded_models:
if _format_name(dm).startswith(_format_name(model_name)):
os.remove(os.path.join(cachedir, dm))
except:
pass
def __call__(self, models, dataset_names, *args, **kwargs):
"""
Wrapper call to _evaluate function.
Args:
models:
dataset_names:
*args:
**kwargs:
Returns:
"""
logging.info("Model evaluation.")
_datasets = self._get_datasets(dataset_names, *args, **kwargs)
for model_name in models:
datasets = _datasets
if model_name in [
"deit3_21k", "convnext_base_21k", "convnextv2_base",
"vit_clip", "convnext_clip"
]:
|
logger = logging.getLogger(__name__)
MAX_NUM_MODELS_IN_CACHE = 3
mpl.rcParams['font.size'] = 22
def device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ModelEvaluator:
def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs):
"""
Evaluate Model on the given dataset and return the accuracy.
Args:
model_name:
model:
dataset:
*args:
**kwargs:
"""
logging_info = f"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator"
logger.info(logging_info)
print(logging_info)
for metric in dataset.metrics:
metric.reset()
with torch.no_grad():
result_writer = e.ResultPrinter(model_name=model_name,
dataset=dataset)
for images, target, paths in tqdm(dataset.loader):
images = images.to(device())
if "forward_batch" in dir(model):
logits = model.forward_batch(images)
softmax_output = model.softmax(logits)
else:
logits = model(images)
softmax_output = softmax(logits,
dim=1).detach().cpu().numpy()
if isinstance(target, torch.Tensor):
batch_targets = model.to_numpy(target)
else:
batch_targets = target
predictions = dataset.decision_mapping(softmax_output)
for metric in dataset.metrics:
metric.update(predictions, batch_targets, paths)
if kwargs["print_predictions"]:
result_writer.print_batch_to_csv(
object_response=predictions,
batch_targets=batch_targets,
paths=paths)
def _get_datasets(self, dataset_names, *args, **kwargs):
dataset_list = []
for dataset in dataset_names:
dataset = load_dataset(dataset, *args, **kwargs)
dataset_list.append(dataset)
return dataset_list
def _get_evaluator(self, framework):
if framework == 'pytorch':
return self._pytorch_evaluator
else:
raise NameError("Unsupported evaluator")
def _remove_model_from_cache(self, framework, model_name):
def _format_name(name):
return name.lower().replace("-", "_")
try:
if framework == "pytorch":
cachedir = "/root/.cache/torch/checkpoints/"
downloaded_models = os.listdir(cachedir)
for dm in downloaded_models:
if _format_name(dm).startswith(_format_name(model_name)):
os.remove(os.path.join(cachedir, dm))
except:
pass
def __call__(self, models, dataset_names, *args, **kwargs):
"""
Wrapper call to _evaluate function.
Args:
models:
dataset_names:
*args:
**kwargs:
Returns:
"""
logging.info("Model evaluation.")
_datasets = self._get_datasets(dataset_names, *args, **kwargs)
for model_name in models:
datasets = _datasets
if model_name in [
"deit3_21k", "convnext_base_21k", "convnextv2_base",
"vit_clip", "convnext_clip"
]: | model, transform_val = load_model_transform( | 0 | 2023-11-15 22:22:06+00:00 | 4k |
shengliu66/ICV | tasks/base.py | [
{
"identifier": "hf_datasets_root",
"path": "anchor.py",
"snippet": ""
},
{
"identifier": "TokenizedForStyleRightPad",
"path": "tasks/loader.py",
"snippet": "class TokenizedForStyleRightPad(Dataset):\n def __init__(self, data, tok: PreTrainedTokenizer, prompt_fn, mode = 'eval', no_padding=False, prefix=''):\n # data: [query: str, choices: list(str)]\n self.tok = tok\n self.prompt_fn = prompt_fn\n self.references = None\n self.max_length = self._find_max_length(data, mode=mode)\n if mode == 'ft':\n self.data = self._build_ft_data(data)\n elif mode == 'eval':\n self.data, self.references = self._build_eval_data(data, no_padding=no_padding, prefix=prefix)\n else:\n raise NotImplementedError\n logger.info(f\"Tokenization finished: {len(self.data)}, max_length={self.max_length}\")\n\n def _find_max_length(self, data, mode=eval):\n max_len = 0\n\n def tok_len(t):\n return len(self.tok.encode(t))\n\n for ex in tqdm(data, desc=\"Data preprocessing(1/2)\"):\n query = ex[\"query\"]\n if mode == 'eval':\n len_query = len(self.prompt_fn(query)[0])\n elif mode == 'ft':\n len_query = len(self.prompt_fn(query)[1])\n else:\n raise NotImplementedError\n max_len = max(max_len, len_query)\n return max_len\n\n def _build_eval_data(self, data, no_padding=False, prefix=''):\n processed = []\n references = []\n for ex in tqdm(data, desc=\"Data preprocessing(2/2)\"):\n query = ex[\"query\"]\n processed_input = self.prompt_fn(query, return_reference = True, Instruction = prefix)\n t_query, t_full, t_reference = processed_input\n processed_input = self.tokenize(t_full, t_query, no_padding=no_padding)\n processed.append(processed_input)\n references.append(t_reference)\n\n logger.info(\"Style dataset: finish!\")\n return processed, references\n\n def _build_ft_data(self, data):\n processed = []\n for ex in tqdm(data, desc=\"Data preprocessing(2/2)\"):\n query = ex[\"query\"]\n processed_input = self.prompt_fn(query)\n t_query, t_full = processed_input\n processed_input = self.tokenize(t_query, t_full)\n processed.append(processed_input)\n\n logger.info(\"Finetuning dataset: finish!\")\n return processed\n\n def tokenize_demonstration(self, demonstration):\n e = self.tok(demonstration)\n return torch.LongTensor(e[\"input_ids\"]), torch.LongTensor(e[\"attention_mask\"]) # no padding\n\n def tokenize_each_demonstration(self, demonstration_list, dataset_name=None):\n tokenized_demonstration_list = []\n for exp_id in range(len(demonstration_list)):\n demonstration_list[exp_id] = (demonstration_list[exp_id][0].strip(\" .\").strip(\".\"), demonstration_list[exp_id][1].strip(\" .\").strip(\".\"))\n\n e_original = self.tok(demonstration_list[exp_id][0]) \n e_rewrite = self.tok(demonstration_list[exp_id][1])\n tokenized_demonstration_list.append((e_original, e_rewrite)) \n return tokenized_demonstration_list\n\n def tokenize(self, only_query, full_text, no_padding = False):\n tok_only_query = self.tok(only_query, add_special_tokens=False)\n tok_full_no_padding = self.tok(full_text, add_special_tokens=False)\n tok_full = self.tok(\n full_text,\n padding=\"max_length\",\n max_length=self.max_length,\n add_special_tokens=False,\n ) # <pad> is not a special token\n\n if no_padding: \n e = {\n \"input_ids\": tok_full_no_padding.input_ids,\n \"attention_mask\": tok_full_no_padding.attention_mask,\n }\n else:\n e = {\n \"input_ids\": tok_full.input_ids,\n \"attention_mask\": tok_full.attention_mask,\n }\n\n return e\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n\n es = self.data[idx]\n\n if self.references:\n return torch.LongTensor(es[\"input_ids\"]), torch.LongTensor(es[\"attention_mask\"]), self.references[idx]\n else:\n return es"
},
{
"identifier": "RandomContext",
"path": "utils/rng_ctx.py",
"snippet": "class RandomContext:\n \"\"\"Save and restore state of PyTorch, NumPy, Python RNGs.\"\"\"\n\n def __init__(self, seed=None):\n outside_state = RandomState()\n\n random.seed(seed)\n np.random.seed(seed)\n if seed is None:\n torch.manual_seed(random.randint(-sys.maxsize - 1, sys.maxsize))\n else:\n torch.manual_seed(seed)\n # torch.cuda.manual_seed_all is called by torch.manual_seed\n self.inside_state = RandomState()\n\n outside_state.restore()\n\n self._active = False\n\n def __enter__(self):\n if self._active:\n raise Exception(\"RandomContext can be active only once\")\n\n self.outside_state = RandomState()\n self.inside_state.restore()\n self._active = True\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.inside_state = RandomState()\n self.outside_state.restore()\n self.outside_state = None\n\n self._active = False"
},
{
"identifier": "EmptyContext",
"path": "utils/rng_ctx.py",
"snippet": "class EmptyContext:\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass"
},
{
"identifier": "PCA",
"path": "utils/pca.py",
"snippet": "class PCA(nn.Module):\n def __init__(self, n_components):\n super().__init__()\n self.n_components = n_components\n\n @torch.no_grad()\n def fit(self, X):\n n, d = X.size()\n if self.n_components is not None:\n d = min(self.n_components, d)\n self.register_buffer(\"mean_\", X.mean(0, keepdim=True))\n Z = X - self.mean_ # center\n U, S, Vh = torch.linalg.svd(Z, full_matrices=False)\n Vt = Vh\n U, Vt = svd_flip(U, Vt)\n self.register_buffer(\"components_\", Vt[:d])\n return self\n\n def forward(self, X):\n return self.transform(X)\n\n def transform(self, X):\n assert hasattr(self, \"components_\"), \"PCA must be fit before use.\"\n return torch.matmul(X - self.mean_, self.components_.t())\n\n def fit_transform(self, X):\n self.fit(X)\n return self.transform(X)\n\n def inverse_transform(self, Y):\n assert hasattr(self, \"components_\"), \"PCA must be fit before use.\"\n return torch.matmul(Y, self.components_) + self.mean_"
},
{
"identifier": "modified_forward_context_manager",
"path": "utils/context_manager.py",
"snippet": "def modified_forward_context_manager(model, forward_modifiers=()):\n context_manager = CombinedContextManager([*forward_modifiers])\n return context_manager"
},
{
"identifier": "traced_forward_context_manager",
"path": "utils/context_manager.py",
"snippet": "def traced_forward_context_manager(model, with_submodules=False):\n forward_trace = ForwardTrace()\n context_manager = ForwardTracer(model, forward_trace, with_submodules=with_submodules)\n return context_manager, forward_trace"
}
] | import json
import logging
import random
import re
import torch
import numpy as np
import datasets
from collections import defaultdict
from anchor import hf_datasets_root
from tasks.loader import TokenizedForStyleRightPad
from utils.rng_ctx import RandomContext, EmptyContext
from utils.pca import PCA
from utils.context_manager import modified_forward_context_manager, traced_forward_context_manager | 2,004 |
logger = logging.getLogger("task")
class BaseProbInference:
def __init__(self, prompt_version):
if prompt_version == "default":
self.prompt_version = self.default_prompt_version()
else:
self.prompt_version = prompt_version
self.raw_data_sample = None
self.raw_data_dev = None
self.can_be_stratified = False
self.num_base_shot = 1
self._rng_context = EmptyContext()
self._cached_prefix = None
self._cached_ex_list = None
self._cahced_selected_exemplar = None
self.shuffled_mapping = None
def default_prompt_version(self):
raise NotImplementedError
def set_seed(self, seed):
|
logger = logging.getLogger("task")
class BaseProbInference:
def __init__(self, prompt_version):
if prompt_version == "default":
self.prompt_version = self.default_prompt_version()
else:
self.prompt_version = prompt_version
self.raw_data_sample = None
self.raw_data_dev = None
self.can_be_stratified = False
self.num_base_shot = 1
self._rng_context = EmptyContext()
self._cached_prefix = None
self._cached_ex_list = None
self._cahced_selected_exemplar = None
self.shuffled_mapping = None
def default_prompt_version(self):
raise NotImplementedError
def set_seed(self, seed): | self._rng_context = RandomContext(seed=seed) | 2 | 2023-11-11 18:20:45+00:00 | 4k |
Mohamad-Hussein/speech-assistant | src/parent.py | [
{
"identifier": "service",
"path": "src/model_inference.py",
"snippet": "def service(queue, event):\n # Configure the logging settings\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(levelname)s - %(message)s\",\n filename=join(\"logs\", \"model.log\"),\n filemode=\"w\",\n )\n logger = logging.getLogger(__name__)\n\n # Checking for GPU\n device, device_name, torch_dtype = find_gpu_config(logger)\n\n # Setting cache dir\n local_cache_dir = join(\".\", \"model\")\n\n # Creating model\n model = AutoModelForSpeechSeq2Seq.from_pretrained(\n MODEL_ID,\n torch_dtype=torch_dtype,\n low_cpu_mem_usage=True,\n use_safetensors=True,\n cache_dir=local_cache_dir,\n )\n model.to(device)\n\n # Makes inference faster for transformers\n if \"cuda\" in device.type or \"cpu\" in device.type:\n from optimum.bettertransformer import BetterTransformer\n\n model = BetterTransformer.transform(model)\n\n # Making pipeline for inference\n processor = AutoProcessor.from_pretrained(MODEL_ID, cache_dir=local_cache_dir)\n\n model_pipe = pipeline(\n \"automatic-speech-recognition\",\n model=model,\n tokenizer=processor.tokenizer,\n feature_extractor=processor.feature_extractor,\n max_new_tokens=128,\n chunk_length_s=15,\n batch_size=16,\n torch_dtype=torch_dtype,\n device=device,\n )\n\n # Checking if GPU or CPU used\n if device_name:\n print(f\"\\n\\n\\033[1m{MODEL_ID}\\033[0m loaded to {device_name}\\n\\n\")\n else:\n print(\n f\"\\n\\033[1m{MODEL_ID}\\033[0m loaded to physical memory and CPU is used.\\n\"\n + \"WARNING: Unfortunatly these models are not optimal to be computed on CPU!\\n\\n\"\n )\n del device, torch_dtype, local_cache_dir, processor\n\n # Telling parent that model is loaded\n event.set()\n # To make sure event is cleared before model inference\n sleep(1)\n\n # Make sure event is cleared before then\n try:\n while 1:\n # Waits in standy for inference, no need for this.\n # event.wait()\n\n # Get audio bytes from queue\n audio_bytes = queue.get(block=True)\n t0 = time()\n\n # Transcribing.\n result = model_pipe(audio_bytes)\n logger.info(f\"Time for inference: {time() - t0:.4f} seconds\")\n\n # Process text\n processed_text = process_text(result[\"text\"])\n\n # Write text\n WRITE(processed_text)\n\n # Action report\n speech_to_text_time = time() - t0\n print(\n f\"\\nPrinted text: {result['text']}\\nSpeech-to-text time: {speech_to_text_time:.3f}s\\n\"\n )\n\n # Resetting\n logger.debug(f\"Result: {result}\")\n event.clear()\n\n except KeyboardInterrupt:\n print(\"\\n\\033[92m\\033[4mmodel_inference.py\\033[0m \\033[92mprocess ended\\033[0m\")\n except Exception as e:\n logger.error(f\"Exception hit: {e}\")\n print(\"\\n\\033[91m\\033[4mmodel_inference.py\\033[0m \\033[91mprocess ended\\033[0m\")\n exit(1)\n finally:\n pass"
},
{
"identifier": "run_listener",
"path": "src/funcs.py",
"snippet": "def run_listener(child_pipe, start_event, model_event):\n \"\"\"\n Runs the key listener based on the OS.\n\n Args:\n child_pipe (multiprocessing.Pipe): Pipe for communication with the child process\n start_event (multiprocessing.Event): Event to tell the child process that the model is loaded\n model_event (multiprocessing.Event): Event to tell the child process that the model is loaded\n\n Returns:\n None\n \"\"\"\n # Differentiate between windows and linux\n if system() == \"Windows\":\n from src.key_listener_win import Listener\n else:\n from src.key_listener import Listener\n\n a = Listener(child_pipe, start_event, model_event)\n a.run()"
},
{
"identifier": "get_audio",
"path": "src/funcs.py",
"snippet": "def get_audio():\n \"\"\"Creates the audio stream for recording audio from the microphone.\"\"\"\n\n audio = PyAudio()\n stream_input = audio.open(\n format=paInt16,\n channels=1,\n rate=44100,\n input=True,\n frames_per_buffer=1024,\n )\n\n return audio, stream_input"
},
{
"identifier": "create_sound_file",
"path": "src/funcs.py",
"snippet": "def create_sound_file(file_name=\"tmp.wav\"):\n \"\"\"Creates a sound file for writing\"\"\"\n # Copying soundbyte for debugging purposes\n sound_file = wave.open(file_name, \"wb\")\n sound_file.setnchannels(1)\n sound_file.setsampwidth(2) # 2 bytes = 16 bits p\n sound_file.setframerate(44100)\n\n return sound_file"
},
{
"identifier": "pcm_to_wav",
"path": "src/funcs.py",
"snippet": "def pcm_to_wav(input_pcm):\n \"\"\"\n Converts PCM bytes to WAV bytes so that the HuggingFace pipeline receives\n bytes that ffmpeg could interpret.\n\n Args:\n input_pcm (bytes): PCM bytes from pyaudio\n\n Returns:\n wav_data (bytes): WAV bytes\n \"\"\"\n with io.BytesIO() as wav_file:\n wav_writer = wave.open(wav_file, \"wb\")\n\n try:\n wav_writer.setframerate(44100)\n wav_writer.setsampwidth(2)\n wav_writer.setnchannels(1)\n wav_writer.writeframes(input_pcm)\n wav_data = wav_file.getvalue()\n except Exception:\n logger.error(f\"Exception on pcm_to_wav: {traceback.format_exc()}\")\n finally:\n wav_writer.close()\n\n return wav_data"
}
] | from time import time, sleep
from os.path import join
from shutil import copy
from multiprocessing import Process, Event, Pipe, Queue
from threading import Thread
from src.model_inference import service
from src.funcs import run_listener
from src.funcs import get_audio, create_sound_file, pcm_to_wav
from playsound import playsound
import logging
import traceback | 1,851 |
# Global variables
# -------------------------
# Change to true if you want to save audio to file called recording.wav
SAVE_AUDIO = False
# Create a logger instance
logger = logging.getLogger(__name__)
# Getting audio inputs
audio, stream_input = get_audio()
# No audio being recorded
stream_input.stop_stream()
# -------------------------
def start_recording(start_event, model_event, queue):
logger.info("sound-high played")
t0 = time()
# This line to wake device from sleep state
# Huge performance gain from Threading and playsound
sound1 = Thread(
target=playsound, args=(join("effects", "button-high.wav"),), name="play-sound1"
)
sound2 = Thread(
target=playsound, args=(join("effects", "button-low.wav"),), name="play-sound2"
)
# Start stream
stream_input.start_stream()
logger.debug(f"Get read: {stream_input.get_read_available()}")
if not stream_input.is_active():
print("Stream is not active")
return
# Capturing audio
frames = []
try:
# Playing start sound
sound1.start()
logger.info(f"From start to capture: {time() - t0:.2f}s")
# Capturing audio
print("Capture STARTED")
while start_event.is_set():
data = stream_input.read(1024)
frames.append(data)
print("Capture FINISHED")
# Converting to wav
|
# Global variables
# -------------------------
# Change to true if you want to save audio to file called recording.wav
SAVE_AUDIO = False
# Create a logger instance
logger = logging.getLogger(__name__)
# Getting audio inputs
audio, stream_input = get_audio()
# No audio being recorded
stream_input.stop_stream()
# -------------------------
def start_recording(start_event, model_event, queue):
logger.info("sound-high played")
t0 = time()
# This line to wake device from sleep state
# Huge performance gain from Threading and playsound
sound1 = Thread(
target=playsound, args=(join("effects", "button-high.wav"),), name="play-sound1"
)
sound2 = Thread(
target=playsound, args=(join("effects", "button-low.wav"),), name="play-sound2"
)
# Start stream
stream_input.start_stream()
logger.debug(f"Get read: {stream_input.get_read_available()}")
if not stream_input.is_active():
print("Stream is not active")
return
# Capturing audio
frames = []
try:
# Playing start sound
sound1.start()
logger.info(f"From start to capture: {time() - t0:.2f}s")
# Capturing audio
print("Capture STARTED")
while start_event.is_set():
data = stream_input.read(1024)
frames.append(data)
print("Capture FINISHED")
# Converting to wav | sound_byte_wav = pcm_to_wav(b"".join(frames)) | 4 | 2023-11-12 01:20:50+00:00 | 4k |
codereport/jello | jello.py | [
{
"identifier": "Grid",
"path": "grid.py",
"snippet": "class Grid:\n def __init__(self, n):\n self.n = n * 2\n self.grid = [[\" \"] * self.n, [\" \"] * self.n]\n\n def add_level(self):\n self.grid.append([\" \"] * self.n)\n self.grid.append([\" \"] * self.n)\n\n def add_subtree(self, level, start, end, s):\n if s in [\"W\", \"m\", \"d\"]:\n self.grid[level * 2 ][start] = VERT\n self.grid[level * 2 + 1][start] = s\n return\n if (level + 1) * 2 > len(self.grid):\n self.add_level()\n mid = (start + end) // 2\n self.grid[level * 2][start ] = START\n self.grid[level * 2][end ] = END\n self.grid[level * 2][start + 1:end ] = list(HORIZ * (end - start -1 ))\n self.grid[level * 2][(start + end) // 2] = MID\n self.grid[level * 2 + 1][mid - len(s) // 2:mid - len(s) // 2 + len(s)] = list(s)\n\n def fill_in_vertical_bars(self):\n for column in range(0, self.n):\n found_start_end = False\n for row in reversed(range(len(self.grid))):\n c = self.grid[row][column]\n if c in [START, END]:\n found_start_end = True\n elif found_start_end:\n if c == \" \":\n self.grid[row][column] = \"⋮\" # │ alternative\n else:\n found_start_end = False\n\n # combinator chain sequence\n def ccs(self):\n first_two = \"\".join(\"\".join(row).strip()[0:2] for row in self.grid)\n no_bars = \"\".join(c for c in first_two if c not in \"─└ ⋮┬│\")\n while \"h₁\" in no_bars:\n no_bars = no_bars.replace(\"h₁\", \"\")\n return no_bars\n\n def display(self, indent = 0):\n for row in self.grid:\n print(\" \" * indent + \"\".join(row))"
},
{
"identifier": "Chain",
"path": "utils.py",
"snippet": "class Chain(Enum):\n MONADIC = 1\n DYADIC = 2"
},
{
"identifier": "Quick",
"path": "utils.py",
"snippet": "class Quick(Enum):\n QUICK = 3\n EACH = 10\n FLIP = 50"
},
{
"identifier": "Separator",
"path": "utils.py",
"snippet": "class Separator(Enum):\n MONADIC = 20\n DYADIC = 21"
}
] | import subprocess
import algorithm
import arity_notation
import draw
import tokens
import utils
from colorama import Fore, init
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.shortcuts import CompleteStyle
from grid import Grid
from utils import Chain, Quick, Separator | 1,843 |
def clear_screen():
subprocess.call("clear", shell=True)
def run_jelly(expr: str, args: list[str]):
try:
command = ["jelly", "eun", expr, *args]
result = subprocess.run(command, text=True, capture_output=True, check=True)
output_text = result.stdout.strip()
draw.cprint(output_text, Fore.GREEN, True)
except subprocess.CalledProcessError as e:
# Print the stderr output for more information about the error
print(Fore.RED + f"Error: {e}")
print(Fore.RED + "stderr:", e.stderr)
completer = WordCompleter(
[k for k in sorted(
list(tokens.niladic.keys()) +
list(tokens.monadic.keys()) +
list(tokens.dyadic.keys()) +
list(tokens.quick.keys()) +
list(tokens.separators.keys())) if len(k) > 1])
history = FileHistory("jello_history.txt")
def is_nilad_array(s: str) -> bool:
return set(list(s)).issubset(list("0123456789,[]"))
def to_jelly(token: str) -> str:
if token in tokens.monadic: return tokens.monadic[token]
if token in tokens.dyadic: return tokens.dyadic[token]
if token in tokens.niladic: return tokens.niladic[token]
if token in tokens.quick: return tokens.quick[token]
if token in tokens.separators: return tokens.separators[token]
if is_nilad_array(token): return token
raise Exception(f"{token} is not a valid Jello keyword.")
def convert(expr: list[str]) -> str:
return "".join([to_jelly(t) for t in expr])
def keyword_arity(k: str) -> int:
if k in tokens.niladic: return 0
if k in tokens.monadic: return 1
if k in tokens.dyadic: return 2
if k == "each": return Quick.EACH
if k == "c": return Quick.FLIP
if k in tokens.quick: return Quick.QUICK
if k == ".": return Separator.MONADIC
if k == ":": return Separator.DYADIC
if is_nilad_array(k): return 0
raise Exception(f"{k} not handled in keyword_arity function.")
def arity_chain_repr(i: int) -> str:
if i in [Quick.QUICK, Quick.EACH, Quick.FLIP]: return "q"
if i in [Separator.MONADIC, Separator.DYADIC]: return "s"
return str(i)
def chain_arity_to_string(chain_arity: list[int]) -> str:
return "-".join([arity_chain_repr(e) for e in chain_arity])
def keyword_color(k: str):
if k in tokens.monadic: return Fore.GREEN
if k in tokens.dyadic: return Fore.BLUE
if k in tokens.quick: return Fore.RED
return Fore.WHITE
def colored_keywords(args, expr):
print(f"> {args} :: {' '.join(keyword_color(k) + k for k in expr.split())}")
def spaced_jelly_atoms(args, expr):
indent = " " * (2 + len(args) + 4)
spaced_jelly_atoms = " ".join(to_jelly(k).center(len(k)) for k in expr.split())
draw.cprint(indent + spaced_jelly_atoms, Fore.YELLOW, True)
def skip_trace(converted_expr: list[str], i: int) -> bool:
if converted_expr[i - 1] in list(tokens.separators.values()) + ["Œ", "œ"]:
return True
if i < len(converted_expr) and converted_expr[i] in tokens.quick.values():
return True
return False
if __name__ == "__main__":
init() # for colorama
print("🟢🟡🔴 Jello 🔴🟡🟢\n")
while True:
try:
user_input = prompt("> ",
completer=completer,
history=history,
reserve_space_for_menu=0,
complete_style=CompleteStyle.MULTI_COLUMN)
if user_input.strip().lower() == "q": break
if user_input.strip() == "?":
arity_notation.explain()
continue
clear_screen()
print("🟢🟡🔴 Jello 🔴🟡🟢\n")
if "::" not in user_input:
print(f"> {user_input}")
draw.cprint(" error: missing :: after args", Fore.RED, True)
continue
[args, expr] = [s.strip() for s in user_input.strip().split("::")] # should consist of keywords
colored_keywords(args, expr)
spaced_jelly_atoms(args, expr)
algorithm.advisor(expr)
expr = utils.remove_all(utils.split_keep_multiple_delimiters(expr, r" \(\)"), ["", " "])
args = args.split()
converted_expr = convert(expr)
| #!/usr/bin/env python3
def clear_screen():
subprocess.call("clear", shell=True)
def run_jelly(expr: str, args: list[str]):
try:
command = ["jelly", "eun", expr, *args]
result = subprocess.run(command, text=True, capture_output=True, check=True)
output_text = result.stdout.strip()
draw.cprint(output_text, Fore.GREEN, True)
except subprocess.CalledProcessError as e:
# Print the stderr output for more information about the error
print(Fore.RED + f"Error: {e}")
print(Fore.RED + "stderr:", e.stderr)
completer = WordCompleter(
[k for k in sorted(
list(tokens.niladic.keys()) +
list(tokens.monadic.keys()) +
list(tokens.dyadic.keys()) +
list(tokens.quick.keys()) +
list(tokens.separators.keys())) if len(k) > 1])
history = FileHistory("jello_history.txt")
def is_nilad_array(s: str) -> bool:
return set(list(s)).issubset(list("0123456789,[]"))
def to_jelly(token: str) -> str:
if token in tokens.monadic: return tokens.monadic[token]
if token in tokens.dyadic: return tokens.dyadic[token]
if token in tokens.niladic: return tokens.niladic[token]
if token in tokens.quick: return tokens.quick[token]
if token in tokens.separators: return tokens.separators[token]
if is_nilad_array(token): return token
raise Exception(f"{token} is not a valid Jello keyword.")
def convert(expr: list[str]) -> str:
return "".join([to_jelly(t) for t in expr])
def keyword_arity(k: str) -> int:
if k in tokens.niladic: return 0
if k in tokens.monadic: return 1
if k in tokens.dyadic: return 2
if k == "each": return Quick.EACH
if k == "c": return Quick.FLIP
if k in tokens.quick: return Quick.QUICK
if k == ".": return Separator.MONADIC
if k == ":": return Separator.DYADIC
if is_nilad_array(k): return 0
raise Exception(f"{k} not handled in keyword_arity function.")
def arity_chain_repr(i: int) -> str:
if i in [Quick.QUICK, Quick.EACH, Quick.FLIP]: return "q"
if i in [Separator.MONADIC, Separator.DYADIC]: return "s"
return str(i)
def chain_arity_to_string(chain_arity: list[int]) -> str:
return "-".join([arity_chain_repr(e) for e in chain_arity])
def keyword_color(k: str):
if k in tokens.monadic: return Fore.GREEN
if k in tokens.dyadic: return Fore.BLUE
if k in tokens.quick: return Fore.RED
return Fore.WHITE
def colored_keywords(args, expr):
print(f"> {args} :: {' '.join(keyword_color(k) + k for k in expr.split())}")
def spaced_jelly_atoms(args, expr):
indent = " " * (2 + len(args) + 4)
spaced_jelly_atoms = " ".join(to_jelly(k).center(len(k)) for k in expr.split())
draw.cprint(indent + spaced_jelly_atoms, Fore.YELLOW, True)
def skip_trace(converted_expr: list[str], i: int) -> bool:
if converted_expr[i - 1] in list(tokens.separators.values()) + ["Œ", "œ"]:
return True
if i < len(converted_expr) and converted_expr[i] in tokens.quick.values():
return True
return False
if __name__ == "__main__":
init() # for colorama
print("🟢🟡🔴 Jello 🔴🟡🟢\n")
while True:
try:
user_input = prompt("> ",
completer=completer,
history=history,
reserve_space_for_menu=0,
complete_style=CompleteStyle.MULTI_COLUMN)
if user_input.strip().lower() == "q": break
if user_input.strip() == "?":
arity_notation.explain()
continue
clear_screen()
print("🟢🟡🔴 Jello 🔴🟡🟢\n")
if "::" not in user_input:
print(f"> {user_input}")
draw.cprint(" error: missing :: after args", Fore.RED, True)
continue
[args, expr] = [s.strip() for s in user_input.strip().split("::")] # should consist of keywords
colored_keywords(args, expr)
spaced_jelly_atoms(args, expr)
algorithm.advisor(expr)
expr = utils.remove_all(utils.split_keep_multiple_delimiters(expr, r" \(\)"), ["", " "])
args = args.split()
converted_expr = convert(expr) | chain_type = Chain.MONADIC if len(args) == 1 else Chain.DYADIC | 1 | 2023-11-18 17:34:06+00:00 | 4k |
davep/tinboard | tinboard/widgets/details.py | [
{
"identifier": "CopyBookmarkURL",
"path": "tinboard/messages/commands.py",
"snippet": "class CopyBookmarkURL(Command):\n \"\"\"Copy the URL for the bookmark to the clipboard.\"\"\""
},
{
"identifier": "EditBookmark",
"path": "tinboard/messages/commands.py",
"snippet": "class EditBookmark(Command):\n \"\"\"Edit the current bookmark.\"\"\""
},
{
"identifier": "ToggleBookmarkRead",
"path": "tinboard/messages/commands.py",
"snippet": "class ToggleBookmarkRead(Command):\n \"\"\"Toggle the read status of the current bookmark.\"\"\""
},
{
"identifier": "ToggleBookmarkPublic",
"path": "tinboard/messages/commands.py",
"snippet": "class ToggleBookmarkPublic(Command):\n \"\"\"Toggle the public status of the current bookmark.\"\"\""
},
{
"identifier": "Bookmark",
"path": "tinboard/widgets/bookmarks.py",
"snippet": "class Bookmark(Option):\n \"\"\"An individual bookmark.\"\"\"\n\n PRIVATE_ICON: Final[str] = Emoji.replace(\":lock:\")\n \"\"\"The icon to use for a private bookmark.\"\"\"\n\n UNREAD_ICON: Final[str] = Emoji.replace(\":see-no-evil_monkey:\")\n \"\"\"The icon to use for an unread bookmark.\"\"\"\n\n def __init__(self, data: BookmarkData) -> None:\n \"\"\"Initialise the bookmark.\n\n Args:\n data: The bookmark data gathered from the server.\n \"\"\"\n self._data = data\n super().__init__(self.prompt, id=data.hash)\n\n @property\n def tags(self) -> list[str]:\n \"\"\"The tags of the bookmark, as a list.\"\"\"\n return self._data.tags.split()\n\n @property\n def prompt(self) -> Group:\n \"\"\"The prompt for the bookmark.\"\"\"\n # Create the title and icons line.\n title = Table.grid(expand=True)\n title.add_column(ratio=1)\n title.add_column(justify=\"right\")\n title.add_row(\n self._data.description,\n f\" {'' if self._data.shared else self.PRIVATE_ICON}{self.UNREAD_ICON if self._data.to_read else ''}\",\n )\n # Create the details line.\n details = Table.grid(expand=True)\n details.add_column(ratio=1)\n details.add_column()\n details.add_row(\n f\"[dim][i]{naturaltime(self._data.time)}[/][/]\",\n f\"[dim]{', '.join(sorted(self.tags, key = str.casefold))}[/]\",\n )\n # Combine them and add a rule afterwards.\n return Group(title, details, Rule(style=\"dim\"))\n\n def is_all(self, *checks: Callable[[\"Bookmark\"], bool]) -> bool:\n \"\"\"Does this bookmark pass all the given tests?\n\n Args:\n checks: The checks to run against the bookmark.\n\n Returns:\n `True` if all tests pass, `False` if not.\n \"\"\"\n return all(check(self) for check in checks)\n\n def is_tagged(self, *tags: str) -> bool:\n \"\"\"Is this bookmark tagged with the given tags?\n\n Args:\n tags: The tags to check for.\n\n Returns:\n `True` if the bookmark has all the tags, `False` if not.\n \"\"\"\n return {tag.casefold() for tag in tags}.issubset(\n {tag.casefold() for tag in self.tags}\n )\n\n def has_text(self, search_text: str) -> bool:\n \"\"\"Does the bookmark contain the given text?\n\n Note that this is a case-insensitive test.\n \"\"\"\n return (\n search_text.casefold()\n in f\"{self._data.description} {self._data.extended} {self._data.tags}\".casefold()\n )\n\n @classmethod\n def from_json(cls, data: dict[str, Any]) -> \"Bookmark\":\n \"\"\"Create a bookmark from some JSON data.\n\n Args:\n data: The data to create the bookmark from.\n\n Returns:\n The `Bookmark` instance.\n \"\"\"\n if \"time\" in data:\n data[\"time\"] = datetime.fromisoformat(data[\"time\"])\n return cls(BookmarkData(**data))\n\n @property\n def data(self) -> BookmarkData:\n \"\"\"The bookmark as the underlying bookmark data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data: BookmarkData) -> None:\n self._data = data\n self.set_prompt(self.prompt)"
},
{
"identifier": "InlineTags",
"path": "tinboard/widgets/tags.py",
"snippet": "class InlineTags(Tags):\n \"\"\"A version of the `Tags` widget intended to embed in another.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n InlineTags > .option-list--option {\n padding: 0;\n }\n \"\"\"\n\n _ICON: Final[str] = Emoji.replace(\":bookmark: \")\n \"\"\"The icon to show before tags.\"\"\"\n\n def _prompt(self, tag: str, count: int) -> RenderableType:\n \"\"\"A prompt for the given tag.\n\n Args:\n The tag to build a prompt for.\n\n Returns:\n The prompt for the tag.\n \"\"\"\n del count\n return f\"{self._ICON} {tag}\""
}
] | from webbrowser import open as open_url
from humanize import naturaltime
from textual import on
from textual.app import ComposeResult
from textual.binding import Binding
from textual.containers import VerticalScroll
from textual.message import Message
from textual.reactive import var
from textual.widgets import Label
from ..messages import (
CopyBookmarkURL,
EditBookmark,
ToggleBookmarkPublic,
ToggleBookmarkRead,
)
from .bookmarks import Bookmark
from .tags import InlineTags | 1,852 | """The details display widget."""
##############################################################################
# Python imports.
##############################################################################
# Humanize imports.
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class Link(Label):
"""Widget for showing the link.
This is here mostly to work around the fact that a click action doesn't
propagate in the way you'd expect.
https://github.com/Textualize/textual/issues/3690
"""
class Visit(Message):
"""Message to indicate that the link should be visited."""
def action_visit(self) -> None:
"""Handle a UI request to visit the link."""
self.post_message(self.Visit())
##############################################################################
class Details(VerticalScroll):
"""A widget for displaying details of a bookmark."""
DEFAULT_CSS = """
Details {
scrollbar-gutter: stable;
.hidden {
visibility: hidden;
}
.empty {
display: none;
}
Label {
margin: 0 2 1 2;
width: 1fr;
color: $text;
}
#title {
background: $primary;
padding: 1 2 1 2;
text-align: center;
}
.detail {
background: $boost;
padding: 1 2 1 2;
}
#added-ish {
margin: 0 2 0 2;
padding: 1 2 0 2;
}
#added-exact {
margin: 0 2 1 2;
padding: 0 2 1 2;
text-align: right;
color: $text-muted;
text-style: italic;
}
InlineTags, InlineTags:focus {
margin: 0 2 1 2;
}
}
"""
BINDINGS = [
Binding("enter", "visit_bookmark", "Visit"),
Binding("c", "copy", "Copy to Clipboard"),
Binding("e", "edit", "Edit"),
Binding("ctrl+r", "read"),
Binding("ctrl+v", "public"),
]
CONTEXT_HELP = """
## Bookmark details keys
The following keys are available in the bookmark details:
| Key | Description |
| - | - |
| <kbd>Enter</kbd> | Visit the current bookmark. |
| <kbd>c</kbd> | Copy the URL of the bookmark to the clipboard. |
| <kbd>e</kbd> | Edit the details of the bookmark. |
| <kbd>Ctrl</kbd>+<kbd>r</kbd> | Toggle the read/unread status of the bookmark. |
| <kbd>Ctrl</kbd>+<kbd>v</kbd> | Toggle the visibility of the bookmark. |
"""
| """The details display widget."""
##############################################################################
# Python imports.
##############################################################################
# Humanize imports.
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class Link(Label):
"""Widget for showing the link.
This is here mostly to work around the fact that a click action doesn't
propagate in the way you'd expect.
https://github.com/Textualize/textual/issues/3690
"""
class Visit(Message):
"""Message to indicate that the link should be visited."""
def action_visit(self) -> None:
"""Handle a UI request to visit the link."""
self.post_message(self.Visit())
##############################################################################
class Details(VerticalScroll):
"""A widget for displaying details of a bookmark."""
DEFAULT_CSS = """
Details {
scrollbar-gutter: stable;
.hidden {
visibility: hidden;
}
.empty {
display: none;
}
Label {
margin: 0 2 1 2;
width: 1fr;
color: $text;
}
#title {
background: $primary;
padding: 1 2 1 2;
text-align: center;
}
.detail {
background: $boost;
padding: 1 2 1 2;
}
#added-ish {
margin: 0 2 0 2;
padding: 1 2 0 2;
}
#added-exact {
margin: 0 2 1 2;
padding: 0 2 1 2;
text-align: right;
color: $text-muted;
text-style: italic;
}
InlineTags, InlineTags:focus {
margin: 0 2 1 2;
}
}
"""
BINDINGS = [
Binding("enter", "visit_bookmark", "Visit"),
Binding("c", "copy", "Copy to Clipboard"),
Binding("e", "edit", "Edit"),
Binding("ctrl+r", "read"),
Binding("ctrl+v", "public"),
]
CONTEXT_HELP = """
## Bookmark details keys
The following keys are available in the bookmark details:
| Key | Description |
| - | - |
| <kbd>Enter</kbd> | Visit the current bookmark. |
| <kbd>c</kbd> | Copy the URL of the bookmark to the clipboard. |
| <kbd>e</kbd> | Edit the details of the bookmark. |
| <kbd>Ctrl</kbd>+<kbd>r</kbd> | Toggle the read/unread status of the bookmark. |
| <kbd>Ctrl</kbd>+<kbd>v</kbd> | Toggle the visibility of the bookmark. |
"""
| bookmark: var[Bookmark | None] = var(None, always_update=True) | 4 | 2023-11-13 08:19:41+00:00 | 4k |
wurenkai/MHA-UNet | engine.py | [
{
"identifier": "save_imgs",
"path": "utils.py",
"snippet": "def save_imgs(img, msk, msk_pred, i, save_path, datasets, threshold=0.5, test_data_name=None):\r\n img = img.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n img = img / 255. if img.max() > 1.1 else img\r\n if datasets == 'retinal':\r\n msk = np.squeeze(msk, axis=0)\r\n msk_pred = np.squeeze(msk_pred, axis=0)\r\n else:\r\n msk = np.where(np.squeeze(msk, axis=0) > 0.5, 1, 0)\r\n msk_pred = np.where(np.squeeze(msk_pred, axis=0) > threshold, 1, 0) \r\n\r\n plt.figure(figsize=(7,15))\r\n\r\n plt.subplot(3,1,1)\r\n plt.imshow(img)\r\n plt.axis('off')\r\n\r\n plt.subplot(3,1,2)\r\n plt.imshow(msk, cmap= 'gray')\r\n plt.axis('off')\r\n\r\n plt.subplot(3,1,3)\r\n plt.imshow(msk_pred, cmap = 'gray')\r\n plt.axis('off')\r\n\r\n if test_data_name is not None:\r\n save_path = save_path + test_data_name + '_'\r\n plt.savefig(save_path + str(i) +'.png')\r\n plt.close()\r"
},
{
"identifier": "save_imgs_explainable",
"path": "utils.py",
"snippet": "def save_imgs_explainable(ig,img,ig2,ig3,ig4,ig5, i,a, save_path, datasets, threshold=0.5, test_data_name=None):\r\n ig = ig.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n ig = ig / 255. if ig.max() > 1.1 else ig\r\n\r\n img = img.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n img = img / 255. if img.max() > 1.1 else img\r\n\r\n ig2 = ig2.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n ig2 = ig2 / 255. if ig2.max() > 1.1 else ig2\r\n\r\n ig3 = ig3.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n ig3 = ig3 / 255. if ig3.max() > 1.1 else ig3\r\n\r\n ig4 = ig4.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n ig4 = ig4 / 255. if ig4.max() > 1.1 else ig4\r\n\r\n ig5 = ig5.squeeze(0).permute(1,2,0).detach().cpu().numpy()\r\n ig5 = ig5 / 255. if ig5.max() > 1.1 else ig5\r\n\r\n images=[]\r\n\r\n plt.figure(figsize=(7,15))\r\n\r\n plt.subplot(6,1,1)\r\n plt.imshow(ig)\r\n plt.axis('off')\r\n\r\n plt.subplot(6,1,2)\r\n plt.imshow(img[:, :, 0], extent=[0, 32, 0, 32])\r\n plt.axis('off')\r\n images.append(img)\r\n\r\n plt.subplot(6,1,3)\r\n plt.imshow(ig2[:, :, 0], extent=[0, 32, 0, 32])\r\n plt.axis('off')\r\n images.append(ig2)\r\n\r\n plt.subplot(6,1,4)\r\n plt.imshow(ig3[:, :, 0], extent=[0, 32, 0, 32])\r\n plt.axis('off')\r\n images.append(ig3)\r\n\r\n plt.subplot(6,1,5)\r\n plt.imshow(ig4[:, :, 0], extent=[0, 32, 0, 32])\r\n plt.axis('off')\r\n images.append(ig4)\r\n\r\n plt.subplot(6,1,6)\r\n plt.imshow(ig5[:, :, 0], extent=[0, 32, 0, 32])\r\n plt.axis('off')\r\n images.append(ig5)\r\n\r\n # EICA\r\n \"\"\"\r\n Explainable Inference Classification Algorithm\r\n Created on Wed Nov 15 14:23:43 2023\r\n @author: Renkai Wu\r\n \"\"\"\r\n img_gray = (img[:, :, 0] * 255).astype(np.uint8)\r\n ig2_gray = (ig2[:, :, 0] * 255).astype(np.uint8)\r\n ig3_gray = (ig2[:, :, 0] * 255).astype(np.uint8)\r\n ig4_gray = (ig4[:, :, 0] * 255).astype(np.uint8)\r\n ig5_gray = (ig5[:, :, 0] * 255).astype(np.uint8)\r\n\r\n center_x, center_y = img.shape[1] // 2, img.shape[0] // 2\r\n\r\n # img\r\n quadrant_1 = img_gray[:center_y, center_x:]\r\n quadrant_2 = img_gray[:center_y, :center_x]\r\n quadrant_3 = img_gray[center_y:, :center_x]\r\n quadrant_4 = img_gray[center_y:, center_x:]\r\n\r\n # ig2\r\n ig2quadrant_1 = ig2_gray[:center_y, center_x:]\r\n ig2quadrant_2 = ig2_gray[:center_y, :center_x]\r\n ig2quadrant_3 = ig2_gray[center_y:, :center_x]\r\n ig2quadrant_4 = ig2_gray[center_y:, center_x:]\r\n\r\n # ig3\r\n ig3quadrant_1 = ig4_gray[:center_y, center_x:]\r\n ig3quadrant_2 = ig4_gray[:center_y, :center_x]\r\n ig3quadrant_3 = ig4_gray[center_y:, :center_x]\r\n ig3quadrant_4 = ig4_gray[center_y:, center_x:]\r\n\r\n # ig4\r\n ig4quadrant_1 = ig5_gray[:center_y, center_x:]\r\n ig4quadrant_2 = ig5_gray[:center_y, :center_x]\r\n ig4quadrant_3 = ig5_gray[center_y:, :center_x]\r\n ig4quadrant_4 = ig5_gray[center_y:, center_x:]\r\n\r\n # ig5\r\n ig6quadrant_1 = ig3_gray[:center_y, center_x:]\r\n ig6quadrant_2 = ig3_gray[:center_y, :center_x]\r\n ig6quadrant_3 = ig3_gray[center_y:, :center_x]\r\n ig6quadrant_4 = ig3_gray[center_y:, center_x:]\r\n\r\n threshold = 225\r\n\r\n condition_1 = np.max(quadrant_1) > threshold or np.max(quadrant_2) > threshold\r\n condition_2 = np.max(ig2quadrant_4) > threshold\r\n condition_3 = np.max(ig3quadrant_1) > threshold\r\n condition_4 = np.max(ig4quadrant_2) > threshold or np.max(ig4quadrant_3) > threshold\r\n\r\n conditions_met = sum([condition_1, condition_2, condition_3, condition_4])\r\n\r\n output = 1 if conditions_met >= 4 else 0\r\n print(output)\r\n s = a + output\r\n\r\n\r\n if test_data_name is not None:\r\n save_path = save_path + test_data_name + '_'\r\n plt.savefig(save_path + str(i) +'.png')\r\n plt.close()\r\n\r\n return s\r"
}
] | import numpy as np
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
from tqdm import tqdm
from torch.cuda.amp import autocast as autocast
from sklearn.metrics import confusion_matrix
from utils import save_imgs,save_imgs_explainable
from PIL import Image
| 2,981 | optimizer,
scheduler,
epoch,
logger,
config,
scaler=None):
'''
train model for one epoch
'''
# switch to train mode
model.train()
loss_list = []
for iter, data in enumerate(train_loader):
optimizer.zero_grad()
images, targets = data
images, targets = images.cuda(non_blocking=True).float(), targets.cuda(non_blocking=True).float()
if config.amp:
with autocast():
out, x0, x1, x2, x3, x4 = model(images)
loss = criterion(out, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
out,x0,x1,x2,x3,x4 = model(images)
loss = criterion(out, targets)
loss.backward()
optimizer.step()
loss_list.append(loss.item())
now_lr = optimizer.state_dict()['param_groups'][0]['lr']
if iter % config.print_interval == 0:
log_info = f'train: epoch {epoch}, iter:{iter}, loss: {np.mean(loss_list):.4f}, lr: {now_lr}'
print(log_info)
logger.info(log_info)
scheduler.step()
def val_one_epoch(test_loader,
model,
criterion,
epoch,
logger,
config):
# switch to evaluate mode
model.eval()
preds = []
gts = []
loss_list = []
with torch.no_grad():
for data in tqdm(test_loader):
img, msk = data
img, msk = img.cuda(non_blocking=True).float(), msk.cuda(non_blocking=True).float()
out,x0,x1,x2,x3,x4 = model(img)
loss = criterion(out, msk)
loss_list.append(loss.item())
gts.append(msk.squeeze(1).cpu().detach().numpy())
if type(out) is tuple:
out = out[0]
out = out.squeeze(1).cpu().detach().numpy()
preds.append(out)
if epoch % config.val_interval == 0:
preds = np.array(preds).reshape(-1)
gts = np.array(gts).reshape(-1)
y_pre = np.where(preds>=config.threshold, 1, 0)
y_true = np.where(gts>=0.5, 1, 0)
confusion = confusion_matrix(y_true, y_pre)
TN, FP, FN, TP = confusion[0,0], confusion[0,1], confusion[1,0], confusion[1,1]
accuracy = float(TN + TP) / float(np.sum(confusion)) if float(np.sum(confusion)) != 0 else 0
sensitivity = float(TP) / float(TP + FN) if float(TP + FN) != 0 else 0
specificity = float(TN) / float(TN + FP) if float(TN + FP) != 0 else 0
f1_or_dsc = float(2 * TP) / float(2 * TP + FP + FN) if float(2 * TP + FP + FN) != 0 else 0
miou = float(TP) / float(TP + FP + FN) if float(TP + FP + FN) != 0 else 0
log_info = f'val epoch: {epoch}, loss: {np.mean(loss_list):.4f}, miou: {miou}, f1_or_dsc: {f1_or_dsc}, accuracy: {accuracy}, \
specificity: {specificity}, sensitivity: {sensitivity}, confusion_matrix: {confusion}'
print(log_info)
logger.info(log_info)
else:
log_info = f'val epoch: {epoch}, loss: {np.mean(loss_list):.4f}'
print(log_info)
logger.info(log_info)
return np.mean(loss_list)
def test_one_epoch_explainable(test_loader,
model,
criterion,
logger,
config,
test_data_name=None):
# switch to evaluate mode
model.eval()
preds = []
gts = []
a=0
loss_list = []
with torch.no_grad():
for i, data in enumerate(tqdm(test_loader)):
img, msk = data
img, msk = img.cuda(non_blocking=True).float(), msk.cuda(non_blocking=True).float()
out,x0,x1,x2,x3,x4 = model(img)
loss = criterion(out, msk)
loss_list.append(loss.item())
msk = msk.squeeze(1).cpu().detach().numpy()
gts.append(msk)
if type(out) is tuple:
out = out[0]
out = out.squeeze(1).cpu().detach().numpy()
preds.append(out)
|
def train_one_epoch(train_loader,
model,
criterion,
optimizer,
scheduler,
epoch,
logger,
config,
scaler=None):
'''
train model for one epoch
'''
# switch to train mode
model.train()
loss_list = []
for iter, data in enumerate(train_loader):
optimizer.zero_grad()
images, targets = data
images, targets = images.cuda(non_blocking=True).float(), targets.cuda(non_blocking=True).float()
if config.amp:
with autocast():
out, x0, x1, x2, x3, x4 = model(images)
loss = criterion(out, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
out,x0,x1,x2,x3,x4 = model(images)
loss = criterion(out, targets)
loss.backward()
optimizer.step()
loss_list.append(loss.item())
now_lr = optimizer.state_dict()['param_groups'][0]['lr']
if iter % config.print_interval == 0:
log_info = f'train: epoch {epoch}, iter:{iter}, loss: {np.mean(loss_list):.4f}, lr: {now_lr}'
print(log_info)
logger.info(log_info)
scheduler.step()
def val_one_epoch(test_loader,
model,
criterion,
epoch,
logger,
config):
# switch to evaluate mode
model.eval()
preds = []
gts = []
loss_list = []
with torch.no_grad():
for data in tqdm(test_loader):
img, msk = data
img, msk = img.cuda(non_blocking=True).float(), msk.cuda(non_blocking=True).float()
out,x0,x1,x2,x3,x4 = model(img)
loss = criterion(out, msk)
loss_list.append(loss.item())
gts.append(msk.squeeze(1).cpu().detach().numpy())
if type(out) is tuple:
out = out[0]
out = out.squeeze(1).cpu().detach().numpy()
preds.append(out)
if epoch % config.val_interval == 0:
preds = np.array(preds).reshape(-1)
gts = np.array(gts).reshape(-1)
y_pre = np.where(preds>=config.threshold, 1, 0)
y_true = np.where(gts>=0.5, 1, 0)
confusion = confusion_matrix(y_true, y_pre)
TN, FP, FN, TP = confusion[0,0], confusion[0,1], confusion[1,0], confusion[1,1]
accuracy = float(TN + TP) / float(np.sum(confusion)) if float(np.sum(confusion)) != 0 else 0
sensitivity = float(TP) / float(TP + FN) if float(TP + FN) != 0 else 0
specificity = float(TN) / float(TN + FP) if float(TN + FP) != 0 else 0
f1_or_dsc = float(2 * TP) / float(2 * TP + FP + FN) if float(2 * TP + FP + FN) != 0 else 0
miou = float(TP) / float(TP + FP + FN) if float(TP + FP + FN) != 0 else 0
log_info = f'val epoch: {epoch}, loss: {np.mean(loss_list):.4f}, miou: {miou}, f1_or_dsc: {f1_or_dsc}, accuracy: {accuracy}, \
specificity: {specificity}, sensitivity: {sensitivity}, confusion_matrix: {confusion}'
print(log_info)
logger.info(log_info)
else:
log_info = f'val epoch: {epoch}, loss: {np.mean(loss_list):.4f}'
print(log_info)
logger.info(log_info)
return np.mean(loss_list)
def test_one_epoch_explainable(test_loader,
model,
criterion,
logger,
config,
test_data_name=None):
# switch to evaluate mode
model.eval()
preds = []
gts = []
a=0
loss_list = []
with torch.no_grad():
for i, data in enumerate(tqdm(test_loader)):
img, msk = data
img, msk = img.cuda(non_blocking=True).float(), msk.cuda(non_blocking=True).float()
out,x0,x1,x2,x3,x4 = model(img)
loss = criterion(out, msk)
loss_list.append(loss.item())
msk = msk.squeeze(1).cpu().detach().numpy()
gts.append(msk)
if type(out) is tuple:
out = out[0]
out = out.squeeze(1).cpu().detach().numpy()
preds.append(out)
| a = save_imgs_explainable(img,x0,x1,x2,x3,x4,i,a, config.work_dir + 'outputs/', config.datasets, config.threshold,
| 1 | 2023-11-13 06:59:52+00:00 | 4k |
buptlihang/CVLM | evaluation/MME/evaluate.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "model/utils.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "model/utils.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "model/utils.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "model/utils.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
},
{
"identifier": "build_conversation",
"path": "model/utils.py",
"snippet": "def build_conversation():\n conversation = Conversation(\n system=\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the human's questions.\",\n roles=[\"USER\", \"ASSISTANT\"],\n version=\"TWO\",\n messages=[],\n offset=0,\n sep_style=SeparatorStyle.TWO,\n sep=\" \",\n sep2=\"</s>\",\n )\n return conversation"
},
{
"identifier": "load_pretrained_model",
"path": "model/utils.py",
"snippet": "def load_pretrained_model(model_path,\n load_8bit=False,\n load_4bit=False,\n device_map=\"auto\",\n device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4')\n else:\n kwargs['torch_dtype'] = torch.float16\n\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model, output_loading_info = AutoModelForCausalLM.from_pretrained(\n model_path, output_loading_info=True, **kwargs)\n model.resize_token_embeddings(len(tokenizer))\n image_processor = model.model.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len"
},
{
"identifier": "disable_torch_init",
"path": "model/utils.py",
"snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)"
},
{
"identifier": "get_model_name_from_path",
"path": "model/utils.py",
"snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]"
},
{
"identifier": "tokenizer_image_token",
"path": "model/utils.py",
"snippet": "def tokenizer_image_token(prompt,\n tokenizer,\n image_token_index=IMAGE_TOKEN_INDEX,\n return_tensors=None):\n prompt_chunks = [\n tokenizer(chunk).input_ids for chunk in prompt.split('<image>')\n ]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X))\n for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(\n prompt_chunks[0]\n ) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks,\n [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "process_images",
"path": "model/utils.py",
"snippet": "def process_images(images, image_processor, model_cfg):\n new_images = []\n for image in images:\n image = expand2square(\n image, tuple(int(x * 255) for x in image_processor.image_mean))\n image = image_processor.preprocess(\n image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images"
}
] | import argparse
import torch
import os
import json
import math
from tqdm import tqdm
from model.utils import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from model.utils import build_conversation, load_pretrained_model, disable_torch_init, get_model_name_from_path
from model.utils import tokenizer_image_token, process_images
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from collections import defaultdict | 1,765 |
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def get_gt(data_path):
GT = {}
for category in os.listdir(data_path):
category_dir = os.path.join(data_path, category)
if not os.path.isdir(category_dir):
continue
if os.path.exists(os.path.join(category_dir, 'images')):
image_path = os.path.join(category_dir, 'images')
qa_path = os.path.join(category_dir, 'questions_answers_YN')
else:
image_path = qa_path = category_dir
assert os.path.isdir(image_path), image_path
assert os.path.isdir(qa_path), qa_path
for file in os.listdir(qa_path):
if not file.endswith('.txt'):
continue
for line in open(os.path.join(qa_path, file)):
question, answer = line.strip().split('\t')
GT[(category, file, question)] = answer
return GT
# Custom dataset class
class CustomDataset(Dataset):
def __init__(self, questions, image_folder, tokenizer, image_processor,
model_config):
self.questions = questions
self.image_folder = image_folder
self.tokenizer = tokenizer
self.image_processor = image_processor
self.model_config = model_config
def __getitem__(self, index):
line = self.questions[index]
image_file = line["image"]
qs = line["text"]
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
conv = build_conversation()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
image = Image.open(os.path.join(self.image_folder,
image_file)).convert('RGB')
image_tensor = process_images([image], self.image_processor,
self.model_config)[0]
input_ids = tokenizer_image_token(prompt,
self.tokenizer,
IMAGE_TOKEN_INDEX,
return_tensors='pt')
return input_ids, image_tensor
def __len__(self):
return len(self.questions)
# DataLoader
def create_data_loader(questions,
image_folder,
tokenizer,
image_processor,
model_config,
batch_size=1,
num_workers=4):
assert batch_size == 1, "batch_size must be 1"
dataset = CustomDataset(questions, image_folder, tokenizer,
image_processor, model_config)
data_loader = DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False)
return data_loader
def eval_model(args):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
|
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def get_gt(data_path):
GT = {}
for category in os.listdir(data_path):
category_dir = os.path.join(data_path, category)
if not os.path.isdir(category_dir):
continue
if os.path.exists(os.path.join(category_dir, 'images')):
image_path = os.path.join(category_dir, 'images')
qa_path = os.path.join(category_dir, 'questions_answers_YN')
else:
image_path = qa_path = category_dir
assert os.path.isdir(image_path), image_path
assert os.path.isdir(qa_path), qa_path
for file in os.listdir(qa_path):
if not file.endswith('.txt'):
continue
for line in open(os.path.join(qa_path, file)):
question, answer = line.strip().split('\t')
GT[(category, file, question)] = answer
return GT
# Custom dataset class
class CustomDataset(Dataset):
def __init__(self, questions, image_folder, tokenizer, image_processor,
model_config):
self.questions = questions
self.image_folder = image_folder
self.tokenizer = tokenizer
self.image_processor = image_processor
self.model_config = model_config
def __getitem__(self, index):
line = self.questions[index]
image_file = line["image"]
qs = line["text"]
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
conv = build_conversation()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
image = Image.open(os.path.join(self.image_folder,
image_file)).convert('RGB')
image_tensor = process_images([image], self.image_processor,
self.model_config)[0]
input_ids = tokenizer_image_token(prompt,
self.tokenizer,
IMAGE_TOKEN_INDEX,
return_tensors='pt')
return input_ids, image_tensor
def __len__(self):
return len(self.questions)
# DataLoader
def create_data_loader(questions,
image_folder,
tokenizer,
image_processor,
model_config,
batch_size=1,
num_workers=4):
assert batch_size == 1, "batch_size must be 1"
dataset = CustomDataset(questions, image_folder, tokenizer,
image_processor, model_config)
data_loader = DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False)
return data_loader
def eval_model(args):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path) | tokenizer, model, image_processor, context_len = load_pretrained_model( | 5 | 2023-11-10 03:52:46+00:00 | 4k |
vvvm23/TchAIkovsky | train.py | [
{
"identifier": "generate_splits",
"path": "data/dataset.py",
"snippet": "def generate_splits(dataset, splits: Tuple[float, float]):\n length = len(dataset)\n split_size = int(splits[0] * length)\n\n return torch.utils.data.Subset(dataset, range(split_size)), torch.utils.data.Subset(\n dataset, range(split_size, length)\n )"
},
{
"identifier": "get_dataloader",
"path": "data/dataset.py",
"snippet": "def get_dataloader(dataset, **dataloader_kwargs):\n collator = DataCollator(pad_token_id=0, shift_labels=False)\n return torch.utils.data.DataLoader(dataset, collate_fn=collator, **dataloader_kwargs)"
},
{
"identifier": "get_dataset",
"path": "data/dataset.py",
"snippet": "def get_dataset(\n dataset_root: str = \"tokenized_dataset\",\n min_sequence_length=128,\n max_sequence_length=1024,\n subset: float = 1.0,\n):\n files = list(Path(dataset_root).glob(\"**/*.json\"))\n files = files[: int(len(files) * subset)]\n ds = DatasetTok(\n files,\n min_seq_len=min_sequence_length,\n max_seq_len=max_sequence_length,\n one_token_stream=False,\n )\n return ds"
},
{
"identifier": "TchAIkovskyModel",
"path": "model/model.py",
"snippet": "class TchAIkovskyModel(eqx.Module):\n id_embeddings: eqx.Module\n pos_embeddings: eqx.Module\n decoder: eqx.Module\n norm_out: eqx.Module\n out_head: eqx.Module\n\n dtype: jnp.dtype = eqx.field(static=True)\n output_dtype: jnp.dtype = eqx.field(static=True)\n\n def __init__(\n self,\n dim: int,\n num_heads: int,\n num_layers: int,\n vocab_size: int,\n max_positions: int,\n head_dim: Optional[int] = None,\n dropout: float = 0.0,\n key: PRNGKey = None,\n dtype: jnp.dtype = jnp.float32,\n output_dtype: jnp.dtype = jnp.float32,\n ):\n self.dtype = dtype\n self.output_dtype = output_dtype\n id_embeddings_key, pos_embeddings_key, decoder_key, out_key = jax.random.split(key, 4)\n\n self.id_embeddings = eqx.nn.Embedding(vocab_size, dim, key=id_embeddings_key)\n self.pos_embeddings = eqx.nn.Embedding(max_positions, dim, key=pos_embeddings_key)\n\n self.decoder = Decoder(\n decoder_key,\n dim,\n num_heads,\n num_layers,\n head_dim=head_dim,\n dropout=dropout,\n dtype=dtype,\n )\n\n self.norm_out = eqx.nn.LayerNorm(dim)\n self.out_head = eqx.nn.Linear(dim, vocab_size, use_bias=True, key=out_key)\n\n def __call__(self, input_ids, position_ids, mask, key=None):\n causal_mask = make_causal_mask(input_ids)[0]\n mask = jnp.where(mask, causal_mask, 0)\n\n x = jax.vmap(self.id_embeddings)(input_ids) + jax.vmap(self.pos_embeddings)(position_ids)\n x = self.decoder(x, mask, key)\n\n x = jax.vmap(self.norm_out)(x)\n logits = jax.vmap(self.out_head)(x)\n logits = logits.astype(self.output_dtype)\n return logits"
},
{
"identifier": "seed_others",
"path": "utils.py",
"snippet": "def seed_others(seed):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)"
}
] | import json
import equinox as eqx
import jax
import jax.numpy as jnp
import optax
import orbax.checkpoint as ocp
import tqdm
import wandb
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from jax.experimental import mesh_utils
from jax.sharding import PositionalSharding
from loguru import logger
from data import generate_splits, get_dataloader, get_dataset
from model import TchAIkovskyModel
from utils import seed_others | 1,972 |
def prepare_batch(batch, key=None):
input_ids = jnp.copy(batch["input_ids"][:, :-1])
labels = jnp.copy(batch["input_ids"][:, 1:])
labels = jnp.where(labels == 0, -100, labels)
position_ids = jnp.expand_dims(jnp.arange(labels.shape[-1]), 0).repeat(labels.shape[0], 0)
mask = jnp.asarray(batch["attention_mask"][:, :-1], dtype=bool)
keys = jax.random.split(key, input_ids.shape[0]) if key is not None else None
return dict(input_ids=input_ids, position_ids=position_ids, mask=mask), labels, keys
def loss_fn(model, batch, labels, keys=None):
if keys is None:
logits = jax.vmap(model[0])(**batch)
else:
logits = jax.vmap(model[0])(**batch, key=keys)
num_tokens = (labels != -100).sum()
accuracy = jnp.argmax(logits, axis=-1) == labels
loss = optax.softmax_cross_entropy_with_integer_labels(logits, labels)
accuracy = jnp.where(labels == -100, 0, accuracy).sum() / num_tokens
loss = jnp.where(labels == -100, 0, loss).sum() / num_tokens
return loss, accuracy
def create_train_step(model, optimiser):
opt_state = optimiser.init(eqx.filter(model, eqx.is_inexact_array))
# @eqx.debug.assert_max_traces(max_traces=1)
@eqx.filter_jit
def train_step(model, opt_state, batch, key):
# TODO: some of these arguments are different between first and second step
# need to investigate to avoid a double compile.
batch, labels, keys = prepare_batch(batch, key)
(loss, _), grads = eqx.filter_value_and_grad(loss_fn, has_aux=True)(model, batch, labels, keys)
updates, opt_state = optimiser.update(grads, opt_state, eqx.filter(model, eqx.is_inexact_array))
model = eqx.apply_updates(model, updates)
return model, opt_state, loss
# @eqx.debug.assert_max_traces(max_traces=1)
@eqx.filter_jit
def eval_step(model, batch):
batch, labels, _ = prepare_batch(batch)
loss, accuracy = loss_fn(model, batch, labels)
return loss, accuracy
return train_step, eval_step, opt_state
def wandb_init(args):
return wandb.init(
project="tchaikovsky",
config=vars(args),
mode=None if args.wandb else "disabled",
)
def setup_sharding(args):
devices = mesh_utils.create_device_mesh((len(jax.devices()),))
logger.info(devices)
sharding = PositionalSharding(devices)
return sharding, len(devices)
PRINT_INTERVAL = 4
def main(args):
logger.info("Beginning training script.")
key = jax.random.PRNGKey(args.seed)
seed_others(args.seed)
logger.info(f"Using PRNG key {args.seed}")
sharding, num_devices = setup_sharding(args)
if args.micro_batch_size is None:
args.micro_batch_size = args.batch_size
assert args.batch_size % args.micro_batch_size == 0
model_key, key = jax.random.split(key)
logger.info("Initialising model.")
model = TchAIkovskyModel(
dim=args.dim,
num_heads=args.heads,
num_layers=args.num_layers,
vocab_size=args.vocab_size,
max_positions=args.max_sequence_length,
head_dim=args.head_dim,
dropout=args.dropout,
key=model_key,
dtype=jnp.bfloat16 if args.use_bf16 else jnp.float32,
)
num_parameters = jax.tree_util.tree_reduce(lambda s, p: s + (p.size if eqx.is_inexact_array(p) else 0), model, 0)
logger.info(f"Model has {num_parameters:,} parameters.")
if args.use_bf16:
# map all params to bf16
logger.info("Training with bfloat16.")
model = jax.tree_util.tree_map(lambda p: p.astype(jnp.bfloat16) if eqx.is_inexact_array(p) else p, model)
logger.info("Initialising dataset.")
dataset = get_dataset(
dataset_root=args.dataset,
min_sequence_length=args.min_sequence_length,
max_sequence_length=args.max_sequence_length,
subset=args.subset_proportion,
)
val_dataset, train_dataset = generate_splits(dataset, (args.val_proportion, 1.0 - args.val_proportion))
logger.info(f"Training set size: {len(train_dataset):,} Validation set size: {len(val_dataset):,}")
|
def prepare_batch(batch, key=None):
input_ids = jnp.copy(batch["input_ids"][:, :-1])
labels = jnp.copy(batch["input_ids"][:, 1:])
labels = jnp.where(labels == 0, -100, labels)
position_ids = jnp.expand_dims(jnp.arange(labels.shape[-1]), 0).repeat(labels.shape[0], 0)
mask = jnp.asarray(batch["attention_mask"][:, :-1], dtype=bool)
keys = jax.random.split(key, input_ids.shape[0]) if key is not None else None
return dict(input_ids=input_ids, position_ids=position_ids, mask=mask), labels, keys
def loss_fn(model, batch, labels, keys=None):
if keys is None:
logits = jax.vmap(model[0])(**batch)
else:
logits = jax.vmap(model[0])(**batch, key=keys)
num_tokens = (labels != -100).sum()
accuracy = jnp.argmax(logits, axis=-1) == labels
loss = optax.softmax_cross_entropy_with_integer_labels(logits, labels)
accuracy = jnp.where(labels == -100, 0, accuracy).sum() / num_tokens
loss = jnp.where(labels == -100, 0, loss).sum() / num_tokens
return loss, accuracy
def create_train_step(model, optimiser):
opt_state = optimiser.init(eqx.filter(model, eqx.is_inexact_array))
# @eqx.debug.assert_max_traces(max_traces=1)
@eqx.filter_jit
def train_step(model, opt_state, batch, key):
# TODO: some of these arguments are different between first and second step
# need to investigate to avoid a double compile.
batch, labels, keys = prepare_batch(batch, key)
(loss, _), grads = eqx.filter_value_and_grad(loss_fn, has_aux=True)(model, batch, labels, keys)
updates, opt_state = optimiser.update(grads, opt_state, eqx.filter(model, eqx.is_inexact_array))
model = eqx.apply_updates(model, updates)
return model, opt_state, loss
# @eqx.debug.assert_max_traces(max_traces=1)
@eqx.filter_jit
def eval_step(model, batch):
batch, labels, _ = prepare_batch(batch)
loss, accuracy = loss_fn(model, batch, labels)
return loss, accuracy
return train_step, eval_step, opt_state
def wandb_init(args):
return wandb.init(
project="tchaikovsky",
config=vars(args),
mode=None if args.wandb else "disabled",
)
def setup_sharding(args):
devices = mesh_utils.create_device_mesh((len(jax.devices()),))
logger.info(devices)
sharding = PositionalSharding(devices)
return sharding, len(devices)
PRINT_INTERVAL = 4
def main(args):
logger.info("Beginning training script.")
key = jax.random.PRNGKey(args.seed)
seed_others(args.seed)
logger.info(f"Using PRNG key {args.seed}")
sharding, num_devices = setup_sharding(args)
if args.micro_batch_size is None:
args.micro_batch_size = args.batch_size
assert args.batch_size % args.micro_batch_size == 0
model_key, key = jax.random.split(key)
logger.info("Initialising model.")
model = TchAIkovskyModel(
dim=args.dim,
num_heads=args.heads,
num_layers=args.num_layers,
vocab_size=args.vocab_size,
max_positions=args.max_sequence_length,
head_dim=args.head_dim,
dropout=args.dropout,
key=model_key,
dtype=jnp.bfloat16 if args.use_bf16 else jnp.float32,
)
num_parameters = jax.tree_util.tree_reduce(lambda s, p: s + (p.size if eqx.is_inexact_array(p) else 0), model, 0)
logger.info(f"Model has {num_parameters:,} parameters.")
if args.use_bf16:
# map all params to bf16
logger.info("Training with bfloat16.")
model = jax.tree_util.tree_map(lambda p: p.astype(jnp.bfloat16) if eqx.is_inexact_array(p) else p, model)
logger.info("Initialising dataset.")
dataset = get_dataset(
dataset_root=args.dataset,
min_sequence_length=args.min_sequence_length,
max_sequence_length=args.max_sequence_length,
subset=args.subset_proportion,
)
val_dataset, train_dataset = generate_splits(dataset, (args.val_proportion, 1.0 - args.val_proportion))
logger.info(f"Training set size: {len(train_dataset):,} Validation set size: {len(val_dataset):,}")
| train_loader = get_dataloader( | 1 | 2023-11-13 07:31:30+00:00 | 4k |
LiquidFun/aoc_tiles | aoc_tiles/drawer.py | [
{
"identifier": "color_similarity",
"path": "aoc_tiles/colors.py",
"snippet": "def color_similarity(color_a, color_b, threshold):\n return abs(luminance(color_a) - luminance(color_b)) < threshold"
},
{
"identifier": "darker_color",
"path": "aoc_tiles/colors.py",
"snippet": "def darker_color(c: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:\n return c[0] - 10, c[1] - 10, c[2] - 10, 255"
},
{
"identifier": "extension_to_colors",
"path": "aoc_tiles/colors.py",
"snippet": "@lru_cache\ndef extension_to_colors() -> Dict[str, str]:\n extension_to_color = {}\n with open(GITHUB_LANGUAGES_PATH) as file:\n logger.debug(\"Loading github_languages.yaml from {}\", GITHUB_LANGUAGES_PATH)\n yaml_loader = yaml.CLoader if yaml.__with_libyaml__ else yaml.Loader\n if not yaml.__with_libyaml__:\n logger.warning(\"Using slow yaml parser (0.5s vs 0.1s)!\")\n github_languages = yaml.load(file, Loader=yaml_loader)\n logger.debug(\"Loaded github_languages.yaml from {}\", GITHUB_LANGUAGES_PATH)\n for language, data in github_languages.items():\n if \"color\" in data and \"extensions\" in data and data[\"type\"] == \"programming\" and language not in excludes:\n for extension in data[\"extensions\"]:\n extension_to_color[extension.lower()] = data[\"color\"]\n\n extension_to_color.update(includes)\n\n return extension_to_color"
},
{
"identifier": "Config",
"path": "aoc_tiles/config.py",
"snippet": "class Config:\n aoc_dir: Union[str, Path] = field(default=\"./\", metadata={\"help\": \"Path to the AoC directory.\", \"type\": str})\n readme_path: Union[str, Path] = field(init=False)\n session_cookie_path: Union[str, Path] = field(init=False)\n aoc_tiles_dir: Union[str, Path] = field(init=False)\n image_dir: Union[str, Path] = field(init=False)\n cache_dir: Union[str, Path] = field(init=False)\n\n verbose: bool = field(default=False, metadata={\"help\": \"Whether to print debug information.\"})\n\n what_to_show_on_right_side: Literal[\"auto\", \"checkmark\", \"time_and_rank\", \"loc\"] = field(\n default=\"auto\", metadata={\n \"help\": \"What information to display on the right side of each tile. \"\n \"'checkmark' only displays a checkmark for each part if the day is solved. \"\n \"'time_and_rank' displays the time and rank on the global leaderboard (requires session.cookie). \"\n \"'loc' displays the number of lines of code of the solution (not implemented). \"\n \"'auto' will use 'time_and_rank' if session.cookie exists, otherwise 'checkmark'.\"}\n )\n count_as_solved_when: Literal[\"auto\", \"on_leaderboard\", \"file_exists\", \"either\", \"both\"] = field(\n default=\"auto\",\n metadata={\n \"help\": \"Condition to count a task as solved. Note that 'on_leaderboard', 'either' and 'both' require a \"\n \"session cookie. 'auto' will use 'both' if session.cookie exists, otherwise 'file_exists'.\"\n },\n )\n language_sorting: List[str] = field(\n default_factory=list,\n metadata={\n \"help\": \"Preferred language extensions order for sorting. For example 'py,rs,js' will make Python \"\n \"solutions appear first, then Rust, then JavaScript, then everything else (alphabetically).\"\n },\n )\n create_all_days: bool = field(default=False, metadata={\"help\": \"Whether to create entries for all days upfront.\"})\n\n auto_add_tiles_to_git: Literal[\"no\", \"add\", \"amend\"] = field(default=\"no\", metadata={\n \"help\": \"Whether to automatically add the tile images to git. 'add' will add new files, 'amend' will add \"\n \"and amend the commit with the new files. 'no' will not add the files to git.\"})\n only_use_solutions_in_git: bool = field(default=True, metadata={\n \"help\": \"If true, only solutions will be considered which are tracked by git (git added), \"\n \"otherwise all solutions will be used. This is useful for example to ignore auto-generated\"\n \"files, like '.d' in Rust or '.o' files in C++.\"})\n\n show_total_stars_for_all_years: Literal[\"no\", \"auto\", \"yes\"] = field(default=\"auto\", metadata={\n \"help\": \"Whether to add an additional header in front which shows the total collected stars for all years.\"\n \"'auto' will only show the header if you have stars in at least 3 separate years. \"\n \"'yes' will always show the header. 'no' will never show the header.\"})\n\n year_pattern: str = field(\n default=r\"(?<!\\d)(20[123]\\d)(?!\\d)\",\n metadata={\n \"help\": \"Regex pattern for matching years. This extracts the first group as the year and parses it as an \"\n \"integer. Make sure that other numbers are not matched by this pattern! For example, \"\n \"using negative lookbehind and lookaheads is encouraged to avoid matching longer numbers!\"\n },\n )\n day_pattern: str = field(\n default=r\"(?<!\\d)([012]?\\d)(?!\\d)\", metadata={\"help\": \"Regex pattern for matching days. Same as year_pattern.\"}\n )\n exclude_patterns: List[str] = field(\n default_factory=list, metadata={\n \"help\": \"A list of comma separated glob patterns to ignore when looking for solutions. \"\n \"Listing the paths works too. \"\n \"For example: '*.py,*.js', '2023/05/05.c' or '2021/**.py'.\"\n \"Make sure to escape the patterns with single quotes when running from the shell! \"\n \"Do NOT escape them when using the flag in the yaml! \"\n \"Otherwise the qoute will be part of the pattern.\"\n }\n )\n\n overwrite_year: int = field(\n default=None,\n metadata={\n \"help\": \"If your repository only contains a single year and it cannot be parsed from the path, then you \"\n \"should use this to overwrite the year. Every solution is presumed to be for this year.\"\n },\n )\n\n contrast_improvement_type: Literal[\"none\", \"outline\", \"dark\"] = field(\n default=\"outline\",\n metadata={\n \"help\": \"Some languages have very light colors and are hard to see with a white font. Here you can choose \"\n \"how the text color changes when the background is too light. 'dark' makes the font dark, \"\n \"'outline' adds a black outline.\"\n },\n )\n contrast_improvement_threshold: int = field(\n default=30, metadata={\"help\": \"Threshold for contrast improvement feature (between 0 and 255).\"}\n )\n outline_color: Union[str, Tuple] = field(\n default=\"#6C6A6A\", metadata={\"help\": \"Color used for outlining elements.\", \"type\": str}\n )\n not_completed_color: Union[str, Tuple] = field(\n default=\"#333333\", metadata={\"help\": \"Color to signify incomplete tasks.\", \"type\": str}\n )\n top100_color: Union[str, Tuple] = field(\n default=\"#ffdd00\", metadata={\"help\": \"Color to highlight top 100 ranking. Only used if session\"\n \"cookie is provided.\", \"type\": str}\n )\n text_color: Union[str, Tuple] = field(default=\"#FFFFFF\", metadata={\"help\": \"Text color.\", \"type\": str})\n\n tile_width_px: str = field(default=\"161px\", metadata={\"help\": \"Width of tiles in pixels. You likely don't need\"\n \"to change this.\"})\n\n def __post_init__(self):\n self.aoc_dir = Path(self.aoc_dir)\n\n if not hasattr(self, \"readme_path\"):\n readmes = [path for path in self.aoc_dir.iterdir() if path.name.lower() == \"readme.md\"]\n if len(readmes) == 0:\n exit(f\"[ERROR] No README.md found in the root directory of the repository '{self.aoc_dir}'.\")\n elif len(readmes) > 1:\n exit(f\"[ERROR] Multiple README.md files found in the root directory of the repository {readmes}.\")\n self.readme_path = readmes[0]\n\n if not hasattr(self, \"aoc_tiles_dir\"):\n self.aoc_tiles_dir = self.aoc_dir / \".aoc_tiles\"\n self.aoc_tiles_dir.mkdir(exist_ok=True)\n\n self.running_lock_path = self.aoc_tiles_dir / \"running.lock\"\n\n if not hasattr(self, \"session_cookie_path\"):\n self.session_cookie_path = self.aoc_tiles_dir / \"session.cookie\"\n if not self.session_cookie_path.exists():\n self.session_cookie_path = self.aoc_dir / \"session.cookie\"\n\n if not hasattr(self, \"image_dir\"):\n self.image_dir = self.aoc_tiles_dir / \"tiles\"\n\n if not hasattr(self, \"cache_dir\"):\n self.cache_dir = self.aoc_tiles_dir / \"cache\"\n\n if self.count_as_solved_when == \"auto\":\n self.count_as_solved_when = \"both\" if self.session_cookie_path.exists() else \"file_exists\"\n\n if self.what_to_show_on_right_side == \"auto\":\n self.what_to_show_on_right_side = \"time_and_rank\" if self.session_cookie_path.exists() else \"checkmark\"\n\n self.outline_color = ImageColor.getrgb(self.outline_color)\n self.not_completed_color = ImageColor.getrgb(self.not_completed_color)\n self.text_color = ImageColor.getrgb(self.text_color)\n self.top100_color = ImageColor.getrgb(self.top100_color)\n\n for i, suffix in enumerate(self.language_sorting):\n if not suffix.startswith(\".\"):\n self.language_sorting[i] = \".\" + suffix\n\n logger.remove()\n if self.verbose:\n logger.add(sys.stderr, level=\"DEBUG\")\n\n logger.debug(self)"
},
{
"identifier": "main_font",
"path": "aoc_tiles/fonts.py",
"snippet": "def main_font(size: int) -> ImageFont:\n return get_font(size, FONTS_PATH / \"PaytoneOne.ttf\")"
},
{
"identifier": "secondary_font",
"path": "aoc_tiles/fonts.py",
"snippet": "def secondary_font(size: int) -> ImageFont:\n return get_font(size, FONTS_PATH / \"SourceCodePro-Regular.otf\")"
},
{
"identifier": "DayScores",
"path": "aoc_tiles/leaderboard.py",
"snippet": "class DayScores:\n time1: Union[str, None] = None\n rank1: Union[str, None] = None\n score1: Union[str, None] = None\n time2: Union[str, None] = None\n rank2: Union[str, None] = None\n score2: Union[str, None] = None"
}
] | import math
from functools import partial
from pathlib import Path
from typing import List, Tuple, Union, Dict
from PIL import ImageColor, Image
from PIL.ImageDraw import ImageDraw
from aoc_tiles.colors import color_similarity, darker_color, extension_to_colors
from aoc_tiles.config import Config
from aoc_tiles.fonts import main_font, secondary_font
from aoc_tiles.leaderboard import DayScores | 2,929 |
def format_time(time: str) -> str:
"""Formats time as mm:ss if the time is below 1 hour, otherwise it returns >1h to a max of >24h
>>> format_time("00:58:32")
'58:32'
>>> format_time(">1h")
' >1h'
"""
time = time.replace(">", ">")
if ">" in time:
formatted = time
else:
h, m, s = time.split(":")
formatted = f">{h}h" if int(h) >= 1 else f"{m:02}:{s:02}"
return f"{formatted:>5}"
class TileDrawer:
def __init__(self, config: Config):
self.config = config
def draw_tile(
self, day: str, languages: List[str], day_scores: Union[DayScores, None], path: Path, stars: int
):
"""Saves a graphic for a given day and year. Returns the path to it."""
image = self.get_alternating_background(languages, stars == 2)
drawer = ImageDraw(image)
text_kwargs = {"fill": self.config.text_color}
# Get all colors of the day, check if any one is similar to TEXT_COLOR
# If yes, add outline
for language in languages:
color = ImageColor.getrgb(extension_to_colors()[language])
|
def format_time(time: str) -> str:
"""Formats time as mm:ss if the time is below 1 hour, otherwise it returns >1h to a max of >24h
>>> format_time("00:58:32")
'58:32'
>>> format_time(">1h")
' >1h'
"""
time = time.replace(">", ">")
if ">" in time:
formatted = time
else:
h, m, s = time.split(":")
formatted = f">{h}h" if int(h) >= 1 else f"{m:02}:{s:02}"
return f"{formatted:>5}"
class TileDrawer:
def __init__(self, config: Config):
self.config = config
def draw_tile(
self, day: str, languages: List[str], day_scores: Union[DayScores, None], path: Path, stars: int
):
"""Saves a graphic for a given day and year. Returns the path to it."""
image = self.get_alternating_background(languages, stars == 2)
drawer = ImageDraw(image)
text_kwargs = {"fill": self.config.text_color}
# Get all colors of the day, check if any one is similar to TEXT_COLOR
# If yes, add outline
for language in languages:
color = ImageColor.getrgb(extension_to_colors()[language]) | if color_similarity(color, self.config.text_color, self.config.contrast_improvement_threshold): | 0 | 2023-11-14 21:41:12+00:00 | 4k |
etri-crossmodal/gbswt5 | gbswt5/modeling_gbst5.py | [
{
"identifier": "GBSWT5Config",
"path": "gbswt5/configuration_gbst5.py",
"snippet": "class GBSWT5Config(PretrainedConfig):\n \"\"\" Based on models.t5. configuration_t5. T5Config in hf Transformers. \"\"\"\n model_type = \"gbswt5\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n attribute_map = {\"hidden_size\": \"d_model\",\n \"num_attention_heads\": \"num_heads\",\n \"num_hidden_layers\": \"num_layers\"}\n\n def __init__(\n self,\n vocab_size=384,\n d_model=512,\n d_kv=64,\n d_ff=2048,\n num_layers=6,\n num_decoder_layers=None,\n num_heads=8,\n relative_attention_num_buckets=32,\n relative_attention_max_distance=128,\n dropout_rate=0.1,\n layer_norm_epsilon=1e-6,\n initializer_factor=1.0,\n feed_forward_proj=\"relu\",\n is_encoder_decoder=True,\n use_cache=True,\n pad_token_id=0,\n eos_token_id=1,\n max_subword_block_size=None, # GBSWT-related options here from\n subword_blocks=_BLOCKS,\n downsample_factor=1,\n score_consensus_attn=True,\n z_loss=1e-4,\n gbst_batchnorm=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.d_model = d_model\n self.d_kv = d_kv\n self.d_ff = d_ff\n self.num_layers = num_layers\n self.num_decoder_layers = (\n num_decoder_layers if num_decoder_layers is not None else self.num_layers\n ) # default = symmetry\n self.num_heads = num_heads\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.relative_attention_max_distance = relative_attention_max_distance\n self.dropout_rate = dropout_rate\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_factor = initializer_factor\n self.feed_forward_proj = feed_forward_proj\n self.use_cache = use_cache\n\n act_info = self.feed_forward_proj.split(\"-\")\n self.dense_act_fn = act_info[-1]\n self.is_gated_act = act_info[0] == \"gated\"\n\n # GBSWT-related configurations\n self.max_subword_block_size = max_subword_block_size\n self.subword_blocks = subword_blocks\n self.downsample_factor = downsample_factor\n self.score_consensus_attn = score_consensus_attn\n self.gbst_batchnorm = gbst_batchnorm\n\n # z_loss for computational stability.\n # see https://github.com/tensorflow/mesh/blob \\\n # /fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n # (1) logits이 0으로 부터 너무 멀어지게 드리프팅 되지 않도록 하여, bf16에서 발생하는\n # round-off error를 방지하기 위함. (2) 로짓이 normalized log-probabilities가 되도록 제고한다.\n self.z_loss = z_loss\n\n if self.subword_blocks is not None and isinstance(self.subword_blocks, list):\n for idx, elem in enumerate(self.subword_blocks):\n self.subword_blocks[idx] = tuple(elem)\n self.subword_blocks = tuple(self.subword_blocks)\n\n if len(act_info) > 1 and act_info[0] != \"gated\" or len(act_info) > 2:\n raise ValueError(\n f\"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.\"\n \"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. \"\n \"'gated-gelu' or 'relu'\"\n )\n\n # for backwards compatibility\n if feed_forward_proj == \"gated-gelu\":\n self.dense_act_fn = \"gelu_new\"\n\n super().__init__(\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n is_encoder_decoder=is_encoder_decoder,\n **kwargs,\n )"
},
{
"identifier": "GBSWT",
"path": "gbswt5/gbst.py",
"snippet": "class GBSWT(nn.Module):\n \"\"\" Gradient-based Sub-Word Tokenizer implementation. \"\"\"\n def __init__(self, embed_tokens,\n max_block_size=None,\n blocks=_BLOCKS,\n downsample_factor=1,\n score_consensus_attn=True,\n use_bn=False,):\n super().__init__()\n num_tokens, dim = embed_tokens.weight.shape\n\n assert (max_block_size is not None) ^ (blocks is not None), \\\n 'max_block_size or blocks must be given.'\n if blocks is None:\n self.blocks = tuple(map(lambda elem: (elem, 0), range(1, max_block_size+1)))\n else:\n if not isinstance(blocks, tuple):\n raise ValueError('blocks must be assigned as a tuple')\n self.blocks = tuple(map(lambda elem: elem if isinstance(elem, tuple) else (elem, 0), blocks))\n if not all([(offset < block_size) for block_size, offset in self.blocks]):\n raise ValueError('Offset must be smaller than given block size.')\n max_block_size = max(list(map(lambda x: x[0], self.blocks)))\n\n assert downsample_factor <= max_block_size, \\\n 'downsample factor must be less than the max_block_size.'\n\n self.downsample_factor = downsample_factor\n self.score_consensus_attn = score_consensus_attn\n self.use_bn = use_bn\n logger.debug(f\"GBSWT Subword Block Combinations: {self.blocks}\")\n logger.debug(f\"GBSWT Downsampling factor: {self.downsample_factor}, use BatchNorm: {self.use_bn}\")\n\n def lcm(*num):\n return int(functools.reduce(lambda x, y: int((x * y) / math.gcd(x, y)), num, 1))\n\n self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])\n #print(f\"block_pad_multiple: {self.block_pad_multiple}\")\n\n # layer definition\n self.embeds = embed_tokens\n self.positional_convol = nn.Sequential(\n Padding((0, 0, 0, max_block_size-1)),\n Rearrange('b s d -> b d s'),\n Depthwise1dConv(dim, dim, krnl_size=max_block_size, use_bn=self.use_bn,),\n Rearrange('b d s -> b s d'))\n self.cand_scoring = nn.Sequential(\n nn.Linear(dim, 1),\n Rearrange('... () -> ...'))\n\n def _init_weights(self, factor:float=0.05):\n self.positional_convol[2]._init_weights(factor)\n #print(f\"GBSTW weight initialization called: before: {self.cand_scoring[0].weight.data}\")\n self.cand_scoring[0].weight.data.normal_(mean=0.0, std=factor * 1.0)\n #print(f\"GBSTW weight initialization called: after: {self.cand_scoring[0].weight.data}\")\n\n def get_blocks(self):\n \"\"\" return GBST candidate blocking list. \"\"\"\n return self.blocks\n\n @torch.cuda.amp.autocast()\n def forward(self, in_tensor, attention_mask=None):\n b, s = in_tensor.shape\n #print(f\"initial shape: b, s : {b}, {s}, in_tensor.shape: {in_tensor.shape}\")\n mask = attention_mask\n #print(f\"mask: {mask}\")\n block_multi, ds_factor = self.block_pad_multiple, self.downsample_factor\n\n in_tensor = self.embeds(in_tensor)\n in_tensor = self.positional_convol(in_tensor)\n in_tensor = pad_to_multiple(in_tensor, block_multi,\n seq_dim=1, dim=-2, value=0.0)\n if mask is not None:\n mask = pad_to_multiple(mask, block_multi,\n seq_dim=1, dim=-1, value=False)\n\n def _masked_mean(in_tensor:Tensor, mask:Tensor, dim:int=-1):\n len_diff = len(in_tensor.shape) - len(mask.shape)\n mask = torch.unsqueeze(mask, dim=-len_diff)\n in_tensor.masked_fill_(~(mask.bool()), 0.)\n\n total_elems = mask.sum(dim=dim)\n mean = in_tensor.sum(dim=dim) / total_elems.clamp(min=1.)\n mean.masked_fill_((total_elems == 0), 0.)\n return mean.float()\n\n block_reprs, block_masks = [], []\n\n # 이제 입력 시퀀스를 cloning해서 후보를 세팅\n for block_size, offset in self.blocks:\n block_in = in_tensor.clone()\n if mask is not None:\n block_mask = mask.clone()\n need_padding = offset > 0\n\n if need_padding:\n loff, roff = (block_size - offset), offset\n #print(f\"loff: {loff}, roff: {roff}\")\n block_in = F.pad(block_in, (0, 0, loff, roff), value=0.0)\n if mask is not None:\n block_mask = F.pad(block_mask, (0, 0, loff, roff), value=False)\n\n blks = rearrange(block_in, 'b (s m) d -> b s m d', m=block_size)\n if mask is not None:\n mask_blks = rearrange(block_mask, 'b (s m) -> b s m', m=block_size)\n blk_repr = _masked_mean(blks, mask_blks, dim=-2)\n else:\n blk_repr = blks.mean(dim=-2)\n\n blk_repr = repeat(blk_repr, 'b s d -> b (s m) d', m=block_size)\n\n if need_padding:\n blk_repr = blk_repr[:, loff:-roff]\n\n block_reprs.append(blk_repr)\n\n if mask is not None:\n mask_blks = torch.any(mask_blks, dim=-1)\n mask_blks = repeat(mask_blks, 'b s -> b (s m)', m=block_size)\n if need_padding:\n mask_blks = mask_blks[:, loff:-roff]\n block_masks.append(mask_blks)\n\n # stack them all\n block_reprs = torch.stack(block_reprs, dim=2,)\n scores = self.cand_scoring(block_reprs)\n\n if mask is not None:\n block_masks = torch.stack(block_masks, dim=2)\n max_neg_val = -torch.finfo(scores.dtype).max\n scores = scores.masked_fill(~block_masks, max_neg_val)\n\n scores = scores.softmax(dim=2)\n\n # cheap consensus attention, as equation (5) in paper.\n if self.score_consensus_attn:\n score_sim = einsum('b i d, b j d -> b i j', scores, scores)\n\n if mask is not None:\n cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')\n max_neg_val = -torch.finfo(score_sim.dtype).max\n score_sim = score_sim.masked_fill((~(cross_mask.bool())), max_neg_val)\n\n score_attn = score_sim.softmax(dim=-1)\n scores = einsum('b i j, b j m -> b i m', score_attn, scores)\n\n scores = rearrange(scores, 'b n m -> b n m ()')\n in_tensor = (block_reprs * scores).sum(dim=2)\n\n @torch.jit.script\n def _reshape_input_tensor(in_tensor:Tensor, s:int, d:int):\n # get divisible length to pad\n m = int(math.ceil(s / d) * d)\n #print(f\"_reshape_input_tensor: {m}\")\n return in_tensor[:, :m]\n\n in_tensor = _reshape_input_tensor(in_tensor, s, ds_factor)\n if mask is not None:\n mask = _reshape_input_tensor(mask, s, ds_factor)\n\n # downsample with mean pooling\n in_tensor = rearrange(in_tensor, 'b (n m) d -> b n m d', m=ds_factor)\n if mask is not None:\n mask = rearrange(mask, 'b (n m) -> b n m', m=ds_factor)\n in_tensor = _masked_mean(in_tensor, mask, dim=2)\n mask = torch.any(mask, dim=-1)\n else:\n in_tensor = in_tensor.mean(dim=-2)\n\n # tuple을 반환하기 때문에, forward()에서 [0]을 취해 바꿔줘야 한다\n return in_tensor, mask"
}
] | import copy
import torch
from typing import Optional, Union, Tuple
from torch import nn
from transformers import add_start_docstrings
from transformers.utils import logging
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.models.t5.modeling_t5 import (
T5LayerNorm, T5Block, T5Stack,
T5Model, T5PreTrainedModel, T5ForConditionalGeneration, T5EncoderModel,
T5DenseActDense, T5DenseGatedActDense, T5Attention,
T5_START_DOCSTRING
)
from .configuration_gbst5 import GBSWT5Config
from .gbst import GBSWT | 3,357 | """
hf transformers-compatible GBST + T5 Model implementation.
several methods are copying from huggingface/transformers/models/t5/modeling_t5.py
as Implementation Standards for compatibility. (version 4.28.1)
hf transformers' modeling_t5.py file is distributed under Apache 2.0 License.
Copyright (C) 2023, ETRI LIRS, Jong-hun Shin.
"""
logger = logging.get_logger(__name__)
class GBSWT5PreTrainedModel(T5PreTrainedModel):
| """
hf transformers-compatible GBST + T5 Model implementation.
several methods are copying from huggingface/transformers/models/t5/modeling_t5.py
as Implementation Standards for compatibility. (version 4.28.1)
hf transformers' modeling_t5.py file is distributed under Apache 2.0 License.
Copyright (C) 2023, ETRI LIRS, Jong-hun Shin.
"""
logger = logging.get_logger(__name__)
class GBSWT5PreTrainedModel(T5PreTrainedModel): | config_class = GBSWT5Config | 0 | 2023-11-17 02:04:46+00:00 | 4k |
GOAT-AI-lab/GOAT-Storytelling-Agent | goat_storytelling_agent/storytelling_agent.py | [
{
"identifier": "utils",
"path": "goat_storytelling_agent/utils.py",
"snippet": "def split_into_words_w_newline(text):\ndef remove_last_n_words(text, n):\ndef keep_last_n_words(text, n):"
},
{
"identifier": "Plan",
"path": "goat_storytelling_agent/plan.py",
"snippet": "class Plan:\n @staticmethod\n def split_by_act(original_plan):\n # removes only Act texts with newline prepended soemwhere near\n acts = re.split('\\n.{0,5}?Act ', original_plan)\n # remove random short garbage from re split\n acts = [text.strip() for text in acts[:]\n if (text and (len(text.split()) > 3))]\n if len(acts) == 4:\n acts = acts[1:]\n elif len(acts) != 3:\n print('Fail: split_by_act, attempt 1', original_plan)\n acts = original_plan.split('Act ')\n if len(acts) == 4:\n acts = acts[-3:]\n elif len(acts) != 3:\n print('Fail: split_by_act, attempt 2', original_plan)\n return []\n\n # [act1, act2, act3], [Act + act1, act2, act3]\n if acts[0].startswith('Act '):\n acts = [acts[0]] + ['Act ' + act for act in acts[1:]]\n else:\n acts = ['Act ' + act for act in acts[:]]\n return acts\n\n @staticmethod\n def parse_act(act):\n act = re.split(r'\\n.{0,20}?Chapter .+:', act.strip())\n chapters = [text.strip() for text in act[1:]\n if (text and (len(text.split()) > 3))]\n return {'act_descr': act[0].strip(), 'chapters': chapters}\n\n @staticmethod\n def parse_text_plan(text_plan):\n acts = Plan.split_by_act(text_plan)\n if not acts:\n return []\n plan = [Plan.parse_act(act) for act in acts if act]\n plan = [act for act in plan if act['chapters']]\n return plan\n\n @staticmethod\n def normalize_text_plan(text_plan):\n plan = Plan.parse_text_plan(text_plan)\n text_plan = Plan.plan_2_str(plan)\n return text_plan\n\n @staticmethod\n def act_2_str(plan, act_num):\n text_plan = ''\n chs = []\n ch_num = 1\n for i, act in enumerate(plan):\n act_descr = act['act_descr'] + '\\n'\n if not re.search(r'Act \\d', act_descr[0:50]):\n act_descr = f'Act {i+1}:\\n' + act_descr\n for chapter in act['chapters']:\n if (i + 1) == act_num:\n act_descr += f'- Chapter {ch_num}: {chapter}\\n'\n chs.append(ch_num)\n elif (i + 1) > act_num:\n return text_plan.strip(), chs\n ch_num += 1\n text_plan += act_descr + '\\n'\n return text_plan.strip(), chs\n\n @staticmethod\n def plan_2_str(plan):\n text_plan = ''\n ch_num = 1\n for i, act in enumerate(plan):\n act_descr = act['act_descr'] + '\\n'\n if not re.search(r'Act \\d', act_descr[0:50]):\n act_descr = f'Act {i+1}:\\n' + act_descr\n for chapter in act['chapters']:\n act_descr += f'- Chapter {ch_num}: {chapter}\\n'\n ch_num += 1\n text_plan += act_descr + '\\n'\n return text_plan.strip()\n\n @staticmethod\n def save_plan(plan, fpath):\n with open(fpath, 'w') as fp:\n json.dump(plan, fp, indent=4)"
}
] | import sys
import time
import re
import json
import requests
import traceback
from goat_storytelling_agent import utils
from goat_storytelling_agent.plan import Plan
from transformers import LlamaTokenizerFast
from goat_storytelling_agent import prompts | 3,172 | act = self.query_chat(messages)
if act:
act_dict = Plan.parse_act(act)
while len(act_dict['chapters']) < 2:
act = self.query_chat(messages)
act_dict = Plan.parse_act(act)
else:
plan[act_num] = act_dict
text_plan = Plan.plan_2_str(plan)
all_messages.append(messages)
return all_messages, plan
def split_chapters_into_scenes(self, plan):
"""Creates a by-scene breakdown of all chapters
Parameters
----------
plan : Dict
Dict with book plan
Returns
-------
List[Dict]
Used messages for logging
dict
Dict with updated book plan
"""
all_messages = []
act_chapters = {}
for i, act in enumerate(plan, start=1):
text_act, chs = Plan.act_2_str(plan, i)
act_chapters[i] = chs
messages = self.prompt_engine.split_chapters_into_scenes_messages(
i, text_act, self.form)
act_scenes = self.query_chat(messages)
act['act_scenes'] = act_scenes
all_messages.append(messages)
for i, act in enumerate(plan, start=1):
act_scenes = act['act_scenes']
act_scenes = re.split(r'Chapter (\d+)', act_scenes.strip())
act['chapter_scenes'] = {}
chapters = [text.strip() for text in act_scenes[:]
if (text and text.strip())]
current_ch = None
merged_chapters = {}
for snippet in chapters:
if snippet.isnumeric():
ch_num = int(snippet)
if ch_num != current_ch:
current_ch = snippet
merged_chapters[ch_num] = ''
continue
if merged_chapters:
merged_chapters[ch_num] += snippet
ch_nums = list(merged_chapters.keys()) if len(
merged_chapters) <= len(act_chapters[i]) else act_chapters[i]
merged_chapters = {ch_num: merged_chapters[ch_num]
for ch_num in ch_nums}
for ch_num, chapter in merged_chapters.items():
scenes = re.split(r'Scene \d+.{0,10}?:', chapter)
scenes = [text.strip() for text in scenes[1:]
if (text and (len(text.split()) > 3))]
if not scenes:
continue
act['chapter_scenes'][ch_num] = scenes
return all_messages, plan
@staticmethod
def prepare_scene_text(text):
lines = text.split('\n')
ch_ids = [i for i in range(5)
if 'Chapter ' in lines[i]]
if ch_ids:
lines = lines[ch_ids[-1]+1:]
sc_ids = [i for i in range(5)
if 'Scene ' in lines[i]]
if sc_ids:
lines = lines[sc_ids[-1]+1:]
placeholder_i = None
for i in range(len(lines)):
if lines[i].startswith('Chapter ') or lines[i].startswith('Scene '):
placeholder_i = i
break
if placeholder_i is not None:
lines = lines[:i]
text = '\n'.join(lines)
return text
def write_a_scene(
self, scene, sc_num, ch_num, plan, previous_scene=None):
"""Generates a scene text for a form
Parameters
----------
scene : str
Scene description
sc_num : int
Scene number
ch_num : int
Chapter number
plan : Dict
Dict with book plan
previous_scene : str, optional
Previous scene text, by default None
Returns
-------
List[Dict]
Used messages for logging
str
Generated scene text
"""
text_plan = Plan.plan_2_str(plan)
messages = self.prompt_engine.scene_messages(
scene, sc_num, ch_num, text_plan, self.form)
if previous_scene:
|
SUPPORTED_BACKENDS = ["hf", "llama.cpp"]
def generate_prompt_parts(
messages, include_roles=set(('user', 'assistant', 'system'))):
last_role = None
messages = [m for m in messages if m['role'] in include_roles]
for idx, message in enumerate(messages):
nl = "\n" if idx > 0 else ""
if message['role'] == 'system':
if idx > 0 and last_role not in (None, "system"):
raise ValueError("system message not at start")
yield f"{message['content']}"
elif message['role'] == 'user':
yield f"{nl}### USER: {message['content']}"
elif message['role'] == 'assistant':
yield f"{nl}### ASSISTANT: {message['content']}"
last_role = message['role']
if last_role != 'assistant':
yield '\n### ASSISTANT:'
def _query_chat_hf(endpoint, messages, tokenizer, retries=3,
request_timeout=120, max_tokens=4096,
extra_options={'do_sample': True}):
endpoint = endpoint.rstrip('/')
prompt = ''.join(generate_prompt_parts(messages))
tokens = tokenizer(prompt, add_special_tokens=True,
truncation=False)['input_ids']
data = {
"inputs": prompt,
"parameters": {
'max_new_tokens': max_tokens - len(tokens),
**extra_options
}
}
headers = {'Content-Type': 'application/json'}
while retries > 0:
try:
response = requests.post(
f"{endpoint}/generate", headers=headers, data=json.dumps(data),
timeout=request_timeout)
if messages and messages[-1]["role"] == "assistant":
result_prefix = messages[-1]["content"]
else:
result_prefix = ''
generated_text = result_prefix + json.loads(
response.text)['generated_text']
return generated_text
except Exception:
traceback.print_exc()
print('Timeout error, retrying...')
retries -= 1
time.sleep(5)
else:
return ''
def _query_chat_llamacpp(endpoint, messages, retries=3, request_timeout=120,
max_tokens=4096, extra_options={}):
endpoint = endpoint.rstrip('/')
headers = {'Content-Type': 'application/json'}
prompt = ''.join(generate_prompt_parts(messages))
print(f"\n\n========== Submitting prompt: >>\n{prompt}", end="")
sys.stdout.flush()
response = requests.post(
f"{endpoint}/tokenize", headers=headers,
data=json.dumps({"content": prompt}),
timeout=request_timeout, stream=False)
tokens = [1, *response.json()["tokens"]]
data = {
"prompt": tokens,
"stream": True,
"n_predict": max_tokens - len(tokens),
**extra_options,
}
jdata = json.dumps(data)
request_kwargs = dict(headers=headers, data=jdata,
timeout=request_timeout, stream=True)
response = requests.post(f"{endpoint}/completion", **request_kwargs)
result = bytearray()
if messages and messages[-1]["role"] == "assistant":
result += messages[-1]["content"].encode("utf-8")
is_first = True
for line in response.iter_lines():
line = line.strip()
if not line:
continue
if line.startswith(b"error:"):
retries -= 1
print(f"\nError(retry={retries}): {line!r}")
if retries < 0:
break
del response
time.sleep(5)
response = requests.post(f"{endpoint}/completion", **request_kwargs)
is_first = True
result.clear()
continue
if not line.startswith(b"data: "):
raise ValueError(f"Got unexpected response: {line!r}")
parsed = json.loads(line[6:])
content = parsed.get("content", b"")
result += bytes(content, encoding="utf-8")
if is_first:
is_first = False
print("<<|", end="")
sys.stdout.flush()
print(content, end="")
sys.stdout.flush()
if parsed.get("stop") is True:
break
print("\nDone reading response.")
return str(result, encoding="utf-8").strip()
class StoryAgent:
def __init__(self, backend_uri, backend="hf", request_timeout=120,
max_tokens=4096, n_crop_previous=400,
prompt_engine=None, form='novel',
extra_options={}, scene_extra_options={}):
self.backend = backend.lower()
if self.backend not in SUPPORTED_BACKENDS:
raise ValueError("Unknown backend")
if self.backend == "hf":
self.tokenizer = LlamaTokenizerFast.from_pretrained(
"GOAT-AI/GOAT-70B-Storytelling")
if prompt_engine is None:
self.prompt_engine = prompts
else:
self.prompt_engine = prompt_engine
self.form = form
self.max_tokens = max_tokens
self.extra_options = extra_options
self.scene_extra_options = extra_options.copy()
self.scene_extra_options.update(scene_extra_options)
self.backend_uri = backend_uri
self.n_crop_previous = n_crop_previous
self.request_timeout = request_timeout
def query_chat(self, messages, retries=3):
if self.backend == "hf":
result = _query_chat_hf(
self.backend_uri, messages, self.tokenizer, retries=retries,
request_timeout=self.request_timeout,
max_tokens=self.max_tokens, extra_options=self.extra_options)
elif self.backend == "llama.cpp":
result = _query_chat_llamacpp(
self.backend_uri, messages, retries=retries,
request_timeout=self.request_timeout,
max_tokens=self.max_tokens, extra_options=self.extra_options)
return result
def parse_book_spec(self, text_spec):
# Initialize book spec dict with empty fields
fields = self.prompt_engine.book_spec_fields
spec_dict = {field: '' for field in fields}
last_field = None
if "\"\"\"" in text_spec[:int(len(text_spec)/2)]:
header, sep, text_spec = text_spec.partition("\"\"\"")
text_spec = text_spec.strip()
# Process raw spec into dict
for line in text_spec.split('\n'):
pseudokey, sep, value = line.partition(':')
pseudokey = pseudokey.lower().strip()
matched_key = [key for key in fields
if (key.lower().strip() in pseudokey)
and (len(pseudokey) < (2 * len(key.strip())))]
if (':' in line) and (len(matched_key) == 1):
last_field = matched_key[0]
if last_field in spec_dict:
spec_dict[last_field] += value.strip()
elif ':' in line:
last_field = 'other'
spec_dict[last_field] = ''
else:
if last_field:
# If line does not contain ':' it should be
# the continuation of the last field's value
spec_dict[last_field] += ' ' + line.strip()
spec_dict.pop('other', None)
return spec_dict
def init_book_spec(self, topic):
"""Creates initial book specification
Parameters
----------
topic : str
Short initial topic
Returns
-------
List[Dict]
Used messages for logging
str
Book specification text
"""
messages = self.prompt_engine.init_book_spec_messages(topic, self.form)
text_spec = self.query_chat(messages)
spec_dict = self.parse_book_spec(text_spec)
text_spec = "\n".join(f"{key}: {value}"
for key, value in spec_dict.items())
# Check and fill in missing fields
for field in self.prompt_engine.book_spec_fields:
while not spec_dict[field]:
messages = self.prompt_engine.missing_book_spec_messages(
field, text_spec)
missing_part = self.query_chat(messages)
key, sep, value = missing_part.partition(':')
if key.lower().strip() == field.lower().strip():
spec_dict[field] = value.strip()
text_spec = "\n".join(f"{key}: {value}"
for key, value in spec_dict.items())
return messages, text_spec
def enhance_book_spec(self, book_spec):
"""Make book specification more detailed
Parameters
----------
book_spec : str
Book specification
Returns
-------
List[Dict]
Used messages for logging
str
Book specification text
"""
messages = self.prompt_engine.enhance_book_spec_messages(
book_spec, self.form)
text_spec = self.query_chat(messages)
spec_dict_old = self.parse_book_spec(book_spec)
spec_dict_new = self.parse_book_spec(text_spec)
# Check and fill in missing fields
for field in self.prompt_engine.book_spec_fields:
if not spec_dict_new[field]:
spec_dict_new[field] = spec_dict_old[field]
text_spec = "\n".join(f"{key}: {value}"
for key, value in spec_dict_new.items())
return messages, text_spec
def create_plot_chapters(self, book_spec):
"""Create initial by-plot outline of form
Parameters
----------
book_spec : str
Book specification
Returns
-------
List[Dict]
Used messages for logging
dict
Dict with book plan
"""
messages = self.prompt_engine.create_plot_chapters_messages(book_spec, self.form)
plan = []
while not plan:
text_plan = self.query_chat(messages)
if text_plan:
plan = Plan.parse_text_plan(text_plan)
return messages, plan
def enhance_plot_chapters(self, book_spec, plan):
"""Enhances the outline to make the flow more engaging
Parameters
----------
book_spec : str
Book specification
plan : Dict
Dict with book plan
Returns
-------
List[Dict]
Used messages for logging
dict
Dict with updated book plan
"""
text_plan = Plan.plan_2_str(plan)
all_messages = []
for act_num in range(3):
messages = self.prompt_engine.enhance_plot_chapters_messages(
act_num, text_plan, book_spec, self.form)
act = self.query_chat(messages)
if act:
act_dict = Plan.parse_act(act)
while len(act_dict['chapters']) < 2:
act = self.query_chat(messages)
act_dict = Plan.parse_act(act)
else:
plan[act_num] = act_dict
text_plan = Plan.plan_2_str(plan)
all_messages.append(messages)
return all_messages, plan
def split_chapters_into_scenes(self, plan):
"""Creates a by-scene breakdown of all chapters
Parameters
----------
plan : Dict
Dict with book plan
Returns
-------
List[Dict]
Used messages for logging
dict
Dict with updated book plan
"""
all_messages = []
act_chapters = {}
for i, act in enumerate(plan, start=1):
text_act, chs = Plan.act_2_str(plan, i)
act_chapters[i] = chs
messages = self.prompt_engine.split_chapters_into_scenes_messages(
i, text_act, self.form)
act_scenes = self.query_chat(messages)
act['act_scenes'] = act_scenes
all_messages.append(messages)
for i, act in enumerate(plan, start=1):
act_scenes = act['act_scenes']
act_scenes = re.split(r'Chapter (\d+)', act_scenes.strip())
act['chapter_scenes'] = {}
chapters = [text.strip() for text in act_scenes[:]
if (text and text.strip())]
current_ch = None
merged_chapters = {}
for snippet in chapters:
if snippet.isnumeric():
ch_num = int(snippet)
if ch_num != current_ch:
current_ch = snippet
merged_chapters[ch_num] = ''
continue
if merged_chapters:
merged_chapters[ch_num] += snippet
ch_nums = list(merged_chapters.keys()) if len(
merged_chapters) <= len(act_chapters[i]) else act_chapters[i]
merged_chapters = {ch_num: merged_chapters[ch_num]
for ch_num in ch_nums}
for ch_num, chapter in merged_chapters.items():
scenes = re.split(r'Scene \d+.{0,10}?:', chapter)
scenes = [text.strip() for text in scenes[1:]
if (text and (len(text.split()) > 3))]
if not scenes:
continue
act['chapter_scenes'][ch_num] = scenes
return all_messages, plan
@staticmethod
def prepare_scene_text(text):
lines = text.split('\n')
ch_ids = [i for i in range(5)
if 'Chapter ' in lines[i]]
if ch_ids:
lines = lines[ch_ids[-1]+1:]
sc_ids = [i for i in range(5)
if 'Scene ' in lines[i]]
if sc_ids:
lines = lines[sc_ids[-1]+1:]
placeholder_i = None
for i in range(len(lines)):
if lines[i].startswith('Chapter ') or lines[i].startswith('Scene '):
placeholder_i = i
break
if placeholder_i is not None:
lines = lines[:i]
text = '\n'.join(lines)
return text
def write_a_scene(
self, scene, sc_num, ch_num, plan, previous_scene=None):
"""Generates a scene text for a form
Parameters
----------
scene : str
Scene description
sc_num : int
Scene number
ch_num : int
Chapter number
plan : Dict
Dict with book plan
previous_scene : str, optional
Previous scene text, by default None
Returns
-------
List[Dict]
Used messages for logging
str
Generated scene text
"""
text_plan = Plan.plan_2_str(plan)
messages = self.prompt_engine.scene_messages(
scene, sc_num, ch_num, text_plan, self.form)
if previous_scene: | previous_scene = utils.keep_last_n_words(previous_scene, | 0 | 2023-11-17 11:53:00+00:00 | 4k |
dazhangyu123/ACMIL | modules/topk/polynomial/sp.py | [
{
"identifier": "divide_and_conquer",
"path": "modules/topk/polynomial/divide_conquer.py",
"snippet": "def divide_and_conquer(x, k, mul):\n \"\"\"\n Divide and conquer method for polynomial expansion\n x is a 2d tensor of size (n_classes, n_roots)\n The objective is to obtain the k first coefficients of the expanded\n polynomial\n \"\"\"\n\n to_merge = []\n\n while x[0].dim() > 1 and x[0].size(0) > 1:\n size = x[0].size(0)\n half = size // 2\n if 2 * half < size:\n to_merge.append([t[-1] for t in x])\n x = mul([t[:half] for t in x],\n [t[half: 2 * half] for t in x])\n\n for row in to_merge:\n x = mul(x, row)\n x = torch.cat(x)\n return x"
},
{
"identifier": "Multiplication",
"path": "modules/topk/polynomial/multiplication.py",
"snippet": "def Multiplication(k):\n \"\"\"\n Generate a function that performs a polynomial multiplication and return coefficients up to degree k\n \"\"\"\n assert isinstance(k, int) and k > 0\n\n def isum(factors):\n init = next(factors)\n return reduce(operator.iadd, factors, init)\n\n def mul_function(x1, x2):\n\n # prepare indices for convolution\n l1, l2 = len(x1), len(x2)\n M = min(k + 1, l1 + l2 - 1)\n indices = [[] for _ in range(M)]\n for (i, j) in itertools.product(range(l1), range(l2)):\n if i + j >= M:\n continue\n indices[i + j].append((i, j))\n\n # wrap with log-tensors for stability\n X1 = [LogTensor(x1[i]) for i in range(l1)]\n X2 = [LogTensor(x2[i]) for i in range(l2)]\n\n # perform convolution\n coeff = []\n for c in range(M):\n coeff.append(isum(X1[i] * X2[j] for (i, j) in indices[c]).torch())\n return coeff\n\n return mul_function"
},
{
"identifier": "d_logS_d_expX",
"path": "modules/topk/polynomial/grad.py",
"snippet": "def d_logS_d_expX(S, X, j, p, grad, thresh, eps=1e-5):\n \"\"\"\n Compute the gradient of log S[j] w.r.t. exp(X).\n For unstable cases, use p-th order approximnation.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # Detect unstabilites\n # ------------------------------------------------------------------------\n\n _X_ = LogTensor(X)\n _S_ = [LogTensor(S[i]) for i in range(S.size(0))]\n\n # recursion of gradient formula (separate terms for stability)\n _N_, _P_ = recursion(_S_, _X_, j)\n\n # deal with edge case where _N_ or _P_ is 0 instead of a LogTensor (happens for k=2):\n # fill with large negative values (numerically equivalent to 0 in log-space)\n if not isinstance(_N_, LogTensor):\n _N_ = LogTensor(-1.0 / eps * torch.ones_like(X))\n if not isinstance(_P_, LogTensor):\n _P_ = LogTensor(-1.0 / eps * torch.ones_like(X))\n\n P, N = _P_.torch(), _N_.torch()\n\n # detect instability: small relative difference in log-space\n diff = (P - N) / (N.abs() + eps)\n\n # split into stable and unstable indices\n u_indices = torch.lt(diff, thresh) # unstable\n s_indices = u_indices.eq(0) # stable\n\n # ------------------------------------------------------------------------\n # Compute d S[j] / d X\n # ------------------------------------------------------------------------\n\n # make grad match size and type of X\n grad = grad.type_as(X).resize_as_(X)\n\n # exact gradient for s_indices (stable) elements\n if s_indices.sum():\n # re-use positive and negative parts of recursion (separate for stability)\n _N_ = LogTensor(_N_.torch()[s_indices])\n _P_ = LogTensor(_P_.torch()[s_indices])\n _X_ = LogTensor(X[s_indices])\n _S_ = [LogTensor(S[i][s_indices]) for i in range(S.size(0))]\n\n # d log S[j] / d exp(X) = (d S[j] / d X) * X / S[j]\n _SG_ = (_P_ - _N_) * _X_ / _S_[j]\n grad.masked_scatter_(s_indices, _SG_.torch().exp())\n\n # approximate gradients for u_indices (unstable) elements\n if u_indices.sum():\n _X_ = LogTensor(X[u_indices])\n _S_ = [LogTensor(S[i][u_indices]) for i in range(S.size(0))]\n\n # positive and negative parts of approximation (separate for stability)\n _N_, _P_ = approximation(_S_, _X_, j, p)\n\n # d log S[j] / d exp(X) = (d S[j] / d X) * X / S[j]\n _UG_ = (_P_ - _N_) * _X_ / _S_[j]\n grad.masked_scatter_(u_indices, _UG_.torch().exp())\n\n return grad"
}
] | import torch
import torch.nn as nn
import torch.autograd as ag
from .divide_conquer import divide_and_conquer
from .multiplication import Multiplication
from .grad import d_logS_d_expX | 1,761 |
class LogSumExp(nn.Module):
def __init__(self, k, p=None, thresh=1e-5):
super(LogSumExp, self).__init__()
self.k = k
self.p = int(1 + 0.2 * k) if p is None else p
self.mul = Multiplication(self.k + self.p - 1)
self.thresh = thresh
self.register_buffer('grad_k', torch.Tensor(0))
self.register_buffer('grad_km1', torch.Tensor(0))
self.buffers = (self.grad_km1, self.grad_k)
def forward(self, x):
f = LogSumExp_F()
return f.apply(x, self.k, self.p, self.thresh, self.mul, self.buffers)
class LogSumExp_F(ag.Function):
@staticmethod
def forward(self, x, k, p, thresh, mul, buffers):
"""
Returns a matrix of size (2, n_samples) with sigma_{k-1} and sigma_{k}
for each sample of the mini-batch.
"""
self.save_for_backward(x)
self.k, self.p, self.thresh = k, p, thresh
# unpack buffers
self.grad_km1, self.grad_k = buffers
# number of samples and number of coefficients to compute
n_s = x.size(0)
kp = self.k + self.p - 1
assert kp <= x.size(1)
# clone to allow in-place operations
x = x.clone()
# pre-compute normalization
x_summed = x.sum(1)
# invert in log-space
x.t_().mul_(-1)
# initialize polynomials (in log-space)
x = [x, x.clone().fill_(0)]
# polynomial multiplications
|
class LogSumExp(nn.Module):
def __init__(self, k, p=None, thresh=1e-5):
super(LogSumExp, self).__init__()
self.k = k
self.p = int(1 + 0.2 * k) if p is None else p
self.mul = Multiplication(self.k + self.p - 1)
self.thresh = thresh
self.register_buffer('grad_k', torch.Tensor(0))
self.register_buffer('grad_km1', torch.Tensor(0))
self.buffers = (self.grad_km1, self.grad_k)
def forward(self, x):
f = LogSumExp_F()
return f.apply(x, self.k, self.p, self.thresh, self.mul, self.buffers)
class LogSumExp_F(ag.Function):
@staticmethod
def forward(self, x, k, p, thresh, mul, buffers):
"""
Returns a matrix of size (2, n_samples) with sigma_{k-1} and sigma_{k}
for each sample of the mini-batch.
"""
self.save_for_backward(x)
self.k, self.p, self.thresh = k, p, thresh
# unpack buffers
self.grad_km1, self.grad_k = buffers
# number of samples and number of coefficients to compute
n_s = x.size(0)
kp = self.k + self.p - 1
assert kp <= x.size(1)
# clone to allow in-place operations
x = x.clone()
# pre-compute normalization
x_summed = x.sum(1)
# invert in log-space
x.t_().mul_(-1)
# initialize polynomials (in log-space)
x = [x, x.clone().fill_(0)]
# polynomial multiplications | log_res = divide_and_conquer(x, kp, mul=mul) | 0 | 2023-11-12 14:07:34+00:00 | 4k |
Kav-K/Described | discord_service/cogs/image_service_cog.py | [
{
"identifier": "EmbedStatics",
"path": "discord_service/embeds/embed_helper.py",
"snippet": "class EmbedStatics:\n def __init__(self):\n pass\n\n def status_to_string(status):\n if status:\n return \"enabled\"\n else:\n return \"disabled\"\n\n @staticmethod\n def build_status_display_embed(status):\n embed = discord.Embed(\n title=\"Describer\",\n description=f\"The image descriptions status for this server is: `{EmbedStatics.status_to_string(status)}`\",\n color=discord.Color.blurple(),\n )\n embed.set_thumbnail(url=\"https://i.imgur.com/txHhNzL.png\")\n return embed\n\n @staticmethod\n def build_status_change_success_embed(status):\n embed = discord.Embed(\n title=\"Describer\",\n description=f\"Successfully changed image descriptions for this server to the status:\\n`{EmbedStatics.status_to_string(status)}`\",\n color=discord.Color.green(),\n )\n # thumbnail of https://i.imgur.com/I5dIdg6.png\n embed.set_thumbnail(url=\"https://i.imgur.com/I5dIdg6.png\")\n return embed\n\n @staticmethod\n def build_status_set_failure_embed(message):\n embed = discord.Embed(\n title=\"Describer\",\n description=f\"There was an error changing the image descriptions status for this server: \"\n + message,\n color=discord.Color.red(),\n )\n embed.set_thumbnail(url=\"https://i.imgur.com/hbdBZfG.png\")\n return embed\n\n @staticmethod\n def build_image_analysis_failure_embed(message):\n embed = discord.Embed(\n title=\"Describer\",\n description=f\"There was an error describing the image sent: \" + message,\n color=discord.Color.red(),\n )\n embed.set_thumbnail(url=\"https://i.imgur.com/hbdBZfG.png\")\n return embed\n\n @staticmethod\n def build_described_image_embed(\n message: discord.Message, image_url: str, description: str\n ):\n embed = discord.Embed(\n title=f\"{message.author.display_name} sent an image that was automatically described\",\n description=f\"{description}\",\n color=discord.Color.light_gray(),\n )\n embed.set_thumbnail(url=image_url)\n embed.set_author(\n name=message.author.display_name, icon_url=message.author.avatar.url\n )\n embed.set_footer(\n text=f\"Automatically described for an image sent by {message.author.display_name}\",\n icon_url=message.author.avatar.url,\n )\n return embed"
},
{
"identifier": "Check",
"path": "services/check_service.py",
"snippet": "class Check:\n @staticmethod\n def check_admin_roles() -> Callable:\n async def inner(ctx: discord.ApplicationContext):\n admin_roles = EnvService.get_admin_roles()\n if EnvService.get_admin_roles() == [None]:\n admin_roles = [\"admin\"]\n\n if not any(role.name.lower() in admin_roles for role in ctx.user.roles):\n await ctx.defer(ephemeral=True)\n await ctx.respond(\n f\"You don't have permission, list of roles is {admin_roles}\",\n ephemeral=True,\n delete_after=10,\n )\n return False\n return True\n\n return inner"
},
{
"identifier": "EnvService",
"path": "services/environment_service.py",
"snippet": "class EnvService:\n # To be expanded upon later!\n def __init__(self):\n self.env = {}\n\n @staticmethod\n def environment_path_with_fallback(env_name, relative_fallback=None):\n directory = os.getenv(env_name)\n if directory is not None:\n return Path(directory).resolve()\n\n if relative_fallback:\n app_relative = (app_root_path() / relative_fallback).resolve()\n if app_relative.exists():\n return app_relative\n\n return Path.cwd()\n\n @staticmethod\n def save_path():\n share_dir = os.getenv(\"SHARE_DIR\")\n if share_dir is not None:\n return Path(share_dir)\n return app_root_path()\n\n @staticmethod\n def find_shared_file(file_name):\n share_file_paths = []\n share_dir = os.getenv(\"SHARE_DIR\")\n if share_dir is not None:\n share_file_paths.append(Path(share_dir) / file_name)\n\n share_file_paths.extend(\n [\n app_root_path() / \"share\" / file_name,\n app_root_path() / file_name,\n Path(file_name),\n ]\n )\n\n for share_file_path in share_file_paths:\n if share_file_path.exists():\n return share_file_path.resolve()\n\n raise ValueError(f\"Unable to find shared data file {file_name}\")\n\n @staticmethod\n def get_allowed_guilds():\n # ALLOWED_GUILDS is a comma separated list of guild ids\n # It can also just be one guild ID\n # Read these allowed guilds and return as a list of ints\n try:\n allowed_guilds = os.getenv(\"ALLOWED_GUILDS\")\n except Exception:\n allowed_guilds = None\n\n if allowed_guilds is None:\n raise ValueError(\n \"ALLOWED_GUILDS is not defined properly in the environment file!\"\n \"Please copy your server's guild ID and put it into ALLOWED_GUILDS in the .env file.\"\n 'For example a line should look like: `ALLOWED_GUILDS=\"971268468148166697\"`'\n )\n\n allowed_guilds = (\n allowed_guilds.split(\",\") if \",\" in allowed_guilds else [allowed_guilds]\n )\n allowed_guilds = [int(guild) for guild in allowed_guilds]\n return allowed_guilds\n\n @staticmethod\n def get_described_channels():\n # ALLOWED_GUILDS is a comma separated list of guild ids\n # It can also just be one guild ID\n # Read these allowed guilds and return as a list of ints\n try:\n described_channels = os.getenv(\"DESCRIBED_CHANNELS\")\n except Exception:\n described_channels = None\n\n if described_channels is None:\n raise ValueError(\n \"DESCRIBED_CHANNELS is not properly defined in your environment file. All channels will be enabled for image descriptions\"\n )\n\n described_channels = (\n described_channels.split(\",\")\n if \",\" in described_channels\n else [described_channels]\n )\n return described_channels\n\n @staticmethod\n def get_discord_token():\n try:\n e2b_key = os.getenv(\"DISCORD_TOKEN\")\n return e2b_key\n except Exception:\n return None\n\n @staticmethod\n def get_openai_api_key():\n try:\n openai_key = os.getenv(\"OPENAI_API_KEY\")\n return openai_key\n except Exception:\n return None\n\n @staticmethod\n def get_admin_roles():\n # ADMIN_ROLES is a comma separated list of string roles\n # It can also just be one role\n # Read these allowed roles and return as a list of strings\n try:\n admin_roles = os.getenv(\"ADMIN_ROLES\")\n except Exception:\n admin_roles = None\n\n if admin_roles is None:\n print(\n \"ADMIN_ROLES is not defined properly in the environment file!\"\n \"Please copy your server's role and put it into ADMIN_ROLES in the .env file.\"\n 'For example a line should look like: `ADMIN_ROLES=\"Admin\"`'\n )\n print(\"Defaulting to allowing all users to use admin commands...\")\n return [None]\n\n admin_roles = (\n admin_roles.lower().split(\",\")\n if \",\" in admin_roles\n else [admin_roles.lower()]\n )\n return admin_roles"
},
{
"identifier": "OpenAIExecutor",
"path": "services/openai_service.py",
"snippet": "class OpenAIExecutor:\n def __init__(self):\n self.openai_api_key = EnvService.get_openai_api_key()\n try:\n self.ANALYSIS_PRETEXT = IMAGE_ANALYSIS_PROMPT\n except Exception:\n traceback.print_exc()\n self.ANALYSIS_PRETEXT = \"Describe this image in as much detail as you can, for the visually impaired.\"\n\n @backoff.on_exception(\n backoff.expo,\n ValueError,\n factor=3,\n base=5,\n max_tries=4,\n on_backoff=backoff_handler_request,\n )\n async def send_image_evaluation_request(\n self,\n image_urls,\n ):\n messages = [{\"role\": \"system\", \"content\": self.ANALYSIS_PRETEXT}]\n for image_url in image_urls:\n messages.append(\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": image_url, \"detail\": \"high\"},\n }\n ],\n }\n )\n\n async with aiohttp.ClientSession(\n raise_for_status=False, timeout=aiohttp.ClientTimeout(total=300)\n ) as session:\n payload = {\n \"model\": \"gpt-4-vision-preview\",\n \"messages\": messages,\n \"temperature\": 0,\n \"max_tokens\": 2048,\n }\n\n headers = {\"Authorization\": f\"Bearer {self.openai_api_key}\"}\n\n async with session.post(\n \"https://api.openai.com/v1/chat/completions\",\n json=payload,\n headers=headers,\n ) as resp:\n response = await resp.json()\n # print(f\"Payload -> {payload}\")\n # Parse the total tokens used for this request and response pair from the response\n print(f\"Response -> {response}\")\n\n return response[\"choices\"][0][\"message\"][\"content\"]"
}
] | import pickle
import re
import traceback
import aiofiles
import discord
from collections import defaultdict
from pathlib import Path
from discord_service.embeds.embed_helper import EmbedStatics
from services.check_service import Check
from services.environment_service import EnvService
from services.openai_service import OpenAIExecutor | 2,708 |
class ServerInformation:
def __init__(self, status: bool = False):
self.status = status
class ImageService(discord.Cog, name="ImageService"):
"""cog containing the optimizer command"""
async def change_guild_status(self, guild_id, status: bool):
self.server_information[guild_id].status = status
try:
directory_path = Path(EnvService.save_path()) / "pickles"
directory_path.mkdir(parents=True, exist_ok=True)
async with aiofiles.open(
EnvService.save_path() / "pickles" / "server_information.pickle",
"wb",
) as f:
await f.write(pickle.dumps(self.server_information))
return True
except:
traceback.print_exc()
print("Could not save server information to disk after update.")
return False
def __init__(
self,
bot,
):
super().__init__()
self.bot = bot
self.openai_service = OpenAIExecutor()
self.allowed_channels = EnvService.get_described_channels()
try:
with open(
EnvService.save_path() / "pickles" / "server_information.pickle",
"rb",
) as f:
self.server_information = pickle.load(f)
print("Loaded server information pickle.")
except:
self.server_information = defaultdict(ServerInformation)
for guild in self.bot.guilds:
self.server_information[guild.id] = ServerInformation(False)
@discord.slash_command(
name="describe",
description="Turn image descriptions on or off for the server.",
guild_ids=EnvService.get_allowed_guilds(),
|
class ServerInformation:
def __init__(self, status: bool = False):
self.status = status
class ImageService(discord.Cog, name="ImageService"):
"""cog containing the optimizer command"""
async def change_guild_status(self, guild_id, status: bool):
self.server_information[guild_id].status = status
try:
directory_path = Path(EnvService.save_path()) / "pickles"
directory_path.mkdir(parents=True, exist_ok=True)
async with aiofiles.open(
EnvService.save_path() / "pickles" / "server_information.pickle",
"wb",
) as f:
await f.write(pickle.dumps(self.server_information))
return True
except:
traceback.print_exc()
print("Could not save server information to disk after update.")
return False
def __init__(
self,
bot,
):
super().__init__()
self.bot = bot
self.openai_service = OpenAIExecutor()
self.allowed_channels = EnvService.get_described_channels()
try:
with open(
EnvService.save_path() / "pickles" / "server_information.pickle",
"rb",
) as f:
self.server_information = pickle.load(f)
print("Loaded server information pickle.")
except:
self.server_information = defaultdict(ServerInformation)
for guild in self.bot.guilds:
self.server_information[guild.id] = ServerInformation(False)
@discord.slash_command(
name="describe",
description="Turn image descriptions on or off for the server.",
guild_ids=EnvService.get_allowed_guilds(), | checks=[Check.check_admin_roles()], | 1 | 2023-11-14 02:22:13+00:00 | 4k |
juftin/hatch-pip-compile | hatch_pip_compile/plugin.py | [
{
"identifier": "HatchPipCompileError",
"path": "hatch_pip_compile/exceptions.py",
"snippet": "class HatchPipCompileError(Exception):\n \"\"\"\n Base exception for hatch-pip-compile\n \"\"\""
},
{
"identifier": "PipInstaller",
"path": "hatch_pip_compile/installer.py",
"snippet": "class PipInstaller(PluginInstaller):\n \"\"\"\n Plugin Installer for `pip`\n \"\"\"\n\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies with `pip`\n \"\"\"\n with self.environment.safe_activation():\n if not self.environment.piptools_lock_file.exists():\n return\n extra_args = self.environment.config.get(\"pip-compile-install-args\", [])\n args = [*extra_args, \"--requirement\", str(self.environment.piptools_lock_file)]\n install_command = self.environment.construct_pip_install_command(args=args)\n self.environment.plugin_check_command(install_command)"
},
{
"identifier": "PipSyncInstaller",
"path": "hatch_pip_compile/installer.py",
"snippet": "class PipSyncInstaller(PluginInstaller):\n \"\"\"\n Plugin Installer for `pip-sync`\n \"\"\"\n\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies with `pip-sync`\n\n In the event that there are no dependencies, pip-sync will\n uninstall everything in the environment before deleting the\n lockfile.\n \"\"\"\n self.environment.install_pip_tools()\n cmd = [\n self.environment.virtual_env.python_info.executable,\n \"-m\",\n \"piptools\",\n \"sync\",\n \"--verbose\"\n if self.environment.config.get(\"pip-compile-verbose\", None) is True\n else \"--quiet\",\n \"--python-executable\",\n str(self.environment.virtual_env.python_info.executable),\n ]\n if not self.environment.dependencies:\n self.environment.piptools_lock_file.write_text(\"\")\n extra_args = self.environment.config.get(\"pip-compile-install-args\", [])\n cmd.extend(extra_args)\n cmd.append(str(self.environment.piptools_lock_file))\n self.environment.plugin_check_command(cmd)\n if not self.environment.dependencies:\n self.environment.piptools_lock_file.unlink()\n\n def _full_install(self) -> None:\n \"\"\"\n Run the full install process\n\n 1) Run pip-compile (if necessary)\n 2) Run pip-sync\n 3) (re)install project\n \"\"\"\n with self.environment.safe_activation():\n self.environment.run_pip_compile()\n self.install_dependencies()\n if not self.environment.skip_install:\n if self.environment.dev_mode:\n super().install_project_dev_mode()\n else:\n super().install_project()\n\n def sync_dependencies(self):\n \"\"\"\n Sync dependencies\n \"\"\"\n self._full_install()\n\n def install_project(self):\n \"\"\"\n Install the project the first time\n\n The same implementation as `_full_install`\n due to the way `pip-sync` uninstalls our root package\n \"\"\"\n self._full_install()\n\n def install_project_dev_mode(self):\n \"\"\"\n Install the project the first time in dev mode\n\n The same implementation as `_full_install`\n due to the way `pip-sync` uninstalls our root package\n \"\"\"\n self._full_install()"
},
{
"identifier": "PluginInstaller",
"path": "hatch_pip_compile/installer.py",
"snippet": "class PluginInstaller(ABC):\n \"\"\"\n Package Installer for the plugin\n\n This abstract base class is used to define the interface for\n how the plugin should install packages and dependencies.\n \"\"\"\n\n environment: \"PipCompileEnvironment\"\n\n @abstractmethod\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies\n \"\"\"\n\n def sync_dependencies(self) -> None:\n \"\"\"\n Sync the dependencies - same as `install_dependencies`\n \"\"\"\n self.install_dependencies()\n\n def install_project(self) -> None:\n \"\"\"\n Install the project (`--no-deps`)\n \"\"\"\n with self.environment.safe_activation():\n self.environment.plugin_check_command(\n self.environment.construct_pip_install_command(\n args=[\"--no-deps\", str(self.environment.root)]\n )\n )\n\n def install_project_dev_mode(self) -> None:\n \"\"\"\n Install the project in editable mode (`--no-deps`)\n \"\"\"\n with self.environment.safe_activation():\n self.environment.plugin_check_command(\n self.environment.construct_pip_install_command(\n args=[\"--no-deps\", \"--editable\", str(self.environment.root)]\n )\n )"
},
{
"identifier": "PipCompileLock",
"path": "hatch_pip_compile/lock.py",
"snippet": "class PipCompileLock:\n \"\"\"\n Pip Compile Lock File Operations\n \"\"\"\n\n lock_file: pathlib.Path\n dependencies: List[str]\n project_root: pathlib.Path\n constraints_file: Optional[pathlib.Path]\n env_name: str\n project_name: str\n virtualenv: Optional[VirtualEnv] = None\n\n def process_lock(self, lockfile: pathlib.Path) -> None:\n \"\"\"\n Post process lockfile\n \"\"\"\n version = f\"{self.current_python_version.major}.{self.current_python_version.minor}\"\n raw_prefix = f\"\"\"\n #\n # This file is autogenerated by hatch-pip-compile with Python {version}\n #\n \"\"\"\n prefix = dedent(raw_prefix).strip()\n joined_dependencies = \"\\n\".join([f\"# - {dep}\" for dep in self.dependencies])\n lockfile_text = lockfile.read_text()\n cleaned_input_file = re.sub(\n rf\"-r \\S*/{self.env_name}\\.in\",\n f\"hatch.envs.{self.env_name}\",\n lockfile_text,\n )\n if self.constraints_file is not None:\n constraint_sha = hashlib.sha256(self.constraints_file.read_bytes()).hexdigest()\n constraints_path = self.constraints_file.relative_to(self.project_root).as_posix()\n constraints_line = f\"# [constraints] {constraints_path} (SHA256: {constraint_sha})\"\n joined_dependencies = \"\\n\".join([constraints_line, \"#\", joined_dependencies])\n cleaned_input_file = re.sub(\n r\"-c \\S*\",\n lambda _: f\"-c {constraints_path}\",\n cleaned_input_file,\n )\n prefix += \"\\n\" + joined_dependencies + \"\\n#\"\n new_text = prefix + \"\\n\\n\" + cleaned_input_file\n lockfile.write_text(new_text)\n\n def read_header_requirements(self) -> List[Requirement]:\n \"\"\"\n Read requirements from lock file header\n \"\"\"\n lock_file_text = self.lock_file.read_text()\n parsed_requirements = []\n for line in lock_file_text.splitlines():\n if line.startswith(\"# - \"):\n requirement = Requirement(line[4:])\n parsed_requirements.append(requirement)\n elif not line.startswith(\"#\"):\n break\n return parsed_requirements\n\n @property\n def current_python_version(self) -> Version:\n \"\"\"\n Get python version\n\n In the case of running as a hatch plugin, the `virtualenv` will be set,\n otherwise it will be None and the Python version will be read differently.\n \"\"\"\n if self.virtualenv is not None:\n return Version(self.virtualenv.environment[\"python_version\"])\n else:\n msg = \"VirtualEnv is not set\"\n raise NotImplementedError(msg)\n\n @property\n def lock_file_version(self) -> Version:\n \"\"\"\n Get lock file version\n \"\"\"\n lock_file_text = self.lock_file.read_text()\n match = re.search(\n r\"# This file is autogenerated by hatch-pip-compile with Python (.*)\", lock_file_text\n )\n if match is None:\n msg = \"Could not find lock file python version\"\n raise LockFileError(msg)\n return Version(match.group(1))\n\n def compare_python_versions(self, verbose: Optional[bool] = None) -> bool:\n \"\"\"\n Compare python versions\n\n Parameters\n ----------\n verbose : Optional[bool]\n Print warning if python versions are different, by default None\n which will print the warning. Used as a plugin flag.\n \"\"\"\n lock_version = self.lock_file_version\n current_version = self.current_python_version\n match = (current_version.major == lock_version.major) and (\n current_version.minor == lock_version.minor\n )\n if match is False and verbose is not False:\n logger.error(\n \"[hatch-pip-compile] Your Python version is different \"\n \"from the lock file, your results may vary.\"\n )\n return lock_version == current_version\n\n def compare_requirements(self, requirements: Iterable[Requirement]) -> bool:\n \"\"\"\n Compare requirements\n\n Parameters\n ----------\n requirements : Iterable[Requirement]\n List of requirements to compare against the lock file\n \"\"\"\n lock_requirements = self.read_header_requirements()\n return set(requirements) == set(lock_requirements)\n\n def compare_constraint_sha(self, sha: str) -> bool:\n \"\"\"\n Compare SHA to the SHA on the lockfile\n \"\"\"\n lock_file_text = self.lock_file.read_text()\n match = re.search(r\"# \\[constraints\\] \\S* \\(SHA256: (.*)\\)\", lock_file_text)\n if match is None:\n return False\n return match.group(1).strip() == sha.strip()\n\n def get_file_content_hash(self) -> str:\n \"\"\"\n Get hash of lock file\n \"\"\"\n return hashlib.sha256(self.lock_file.read_bytes()).hexdigest()\n\n def read_lock_requirements(self) -> List[Requirement]:\n \"\"\"\n Read all requirements from lock file\n \"\"\"\n if not self.dependencies:\n return []\n install_requirements = parse_requirements(\n str(self.lock_file),\n session=PipSession(),\n )\n return [ireq.req for ireq in install_requirements] # type: ignore[misc]"
}
] | import functools
import hashlib
import logging
import os
import pathlib
import shutil
import tempfile
from subprocess import CompletedProcess
from typing import Any, Dict, List, Optional, Union
from hatch.env.virtual import VirtualEnvironment
from hatch.utils.platform import Platform
from hatchling.dep.core import dependencies_in_sync
from packaging.requirements import Requirement
from hatch_pip_compile.exceptions import HatchPipCompileError
from hatch_pip_compile.installer import PipInstaller, PipSyncInstaller, PluginInstaller
from hatch_pip_compile.lock import PipCompileLock | 2,800 | """
hatch-pip-compile plugin
"""
logger = logging.getLogger(__name__)
class PipCompileEnvironment(VirtualEnvironment):
"""
Virtual Environment supported by pip-compile
"""
PLUGIN_NAME = "pip-compile"
default_env_name = "default"
def __repr__(self):
"""
Get representation of PipCompileEnvironment
"""
return f"<{self.__class__.__name__} - {self.name}>"
def __init__(self, *args, **kwargs) -> None:
"""
Initialize PipCompileEnvironment with extra attributes
"""
super().__init__(*args, **kwargs)
lock_filename_config = self.config.get("lock-filename")
if lock_filename_config is None:
if self.name == self.default_env_name:
lock_filename = "requirements.txt"
else:
lock_filename = f"requirements/requirements-{self.name}.txt"
else:
with self.metadata.context.apply_context(self.context):
lock_filename = self.metadata.context.format(lock_filename_config)
self.piptools_lock_file = self.root / lock_filename
self.piptools_lock = PipCompileLock(
lock_file=self.piptools_lock_file,
dependencies=self.dependencies,
virtualenv=self.virtual_env,
constraints_file=self.piptools_constraints_file,
project_root=self.root,
env_name=self.name,
project_name=self.metadata.name,
)
install_method = self.config.get("pip-compile-installer", "pip")
self.installer: PluginInstaller
if install_method == "pip":
self.installer = PipInstaller(environment=self)
elif install_method == "pip-sync":
self.installer = PipSyncInstaller(environment=self)
else:
msg = (
f"Invalid pip-tools install method: {install_method} - "
"must be 'pip' or 'pip-sync'"
)
| """
hatch-pip-compile plugin
"""
logger = logging.getLogger(__name__)
class PipCompileEnvironment(VirtualEnvironment):
"""
Virtual Environment supported by pip-compile
"""
PLUGIN_NAME = "pip-compile"
default_env_name = "default"
def __repr__(self):
"""
Get representation of PipCompileEnvironment
"""
return f"<{self.__class__.__name__} - {self.name}>"
def __init__(self, *args, **kwargs) -> None:
"""
Initialize PipCompileEnvironment with extra attributes
"""
super().__init__(*args, **kwargs)
lock_filename_config = self.config.get("lock-filename")
if lock_filename_config is None:
if self.name == self.default_env_name:
lock_filename = "requirements.txt"
else:
lock_filename = f"requirements/requirements-{self.name}.txt"
else:
with self.metadata.context.apply_context(self.context):
lock_filename = self.metadata.context.format(lock_filename_config)
self.piptools_lock_file = self.root / lock_filename
self.piptools_lock = PipCompileLock(
lock_file=self.piptools_lock_file,
dependencies=self.dependencies,
virtualenv=self.virtual_env,
constraints_file=self.piptools_constraints_file,
project_root=self.root,
env_name=self.name,
project_name=self.metadata.name,
)
install_method = self.config.get("pip-compile-installer", "pip")
self.installer: PluginInstaller
if install_method == "pip":
self.installer = PipInstaller(environment=self)
elif install_method == "pip-sync":
self.installer = PipSyncInstaller(environment=self)
else:
msg = (
f"Invalid pip-tools install method: {install_method} - "
"must be 'pip' or 'pip-sync'"
) | raise HatchPipCompileError(msg) | 0 | 2023-11-10 00:34:00+00:00 | 4k |
google-deepmind/pix2act | pix2act/tasks/webshop/write_tf_examples.py | [
{
"identifier": "env_utils",
"path": "pix2act/common/env_utils.py",
"snippet": "class EnvConfig:\nclass CursorState:\ndef rel_x_y_to_x_y(env_config, x_rel, y_rel):\ndef is_float(element: str) -> bool:\ndef is_valid_coordinate(env_config: EnvConfig, x_str: str, y_str: str) -> bool:\ndef is_valid(env_config, action_str):\ndef filter_predictions(env_config, predictions):\ndef process_action(\n driver,\n env_config,\n cursor_state,\n action_str,\n):\ndef _key_press(driver, key, key_hold=None):\ndef _scroll(driver, delta_y, get_scrollable_element_fn):\ndef _ptr_move(driver, cursor_state, x, y):\ndef _ptr_down(driver, cursor_state):\ndef _ptr_up(driver, cursor_state):\ndef _double_click(driver):\ndef _click(driver, cursor_state, x, y):\ndef create_web_driver(chrome_options):\ndef get_screenshot(driver, env_config, cursor_state):\ndef get_screenshot_as_png(driver, env_config, cursor_state):\ndef get_cursor_type(driver, cursor_state):"
},
{
"identifier": "render_utils",
"path": "pix2act/common/render_utils.py",
"snippet": "def get_cursor(cursor_dir: str, filename: str) -> Image.Image:\ndef image_to_png(image: Image.Image) -> bytes:\ndef png_to_image(image_png: bytes) -> Image.Image:\ndef create_empty_image_of_size(size: Tuple[int, int]) -> Image.Image:\ndef crop(screenshot: Image.Image, width: int, height: int) -> Image.Image:\ndef add_cursor(\n cursor_dir: str, screenshot: Image.Image, cursor_state\n) -> Image.Image:\ndef augment_screenshot(image: Image.Image, render_marker: bool) -> Image.Image:\ndef render_header(\n image: Image.Image, header: str, background_color: str\n) -> Image.Image:\ndef render_action_history(\n image: Image.Image,\n history: str,\n max_action_chars: int,\n):"
},
{
"identifier": "tf_utils",
"path": "pix2act/common/tf_utils.py",
"snippet": "def add_bytes_feature(\n example: tf.train.Example, key: str, value: bytes\n) -> None:\ndef add_text_feature(example: tf.train.Example, key: str, value: str) -> None:\ndef get_bytes_feature(example: tf.train.Example, key: str) -> bytes:\ndef get_text_feature(example: tf.train.Example, key: str) -> str:\ndef _get_hash(key: str) -> int:\ndef _increment_counter(item, counter):\n def __init__(\n self,\n output_dir: str,\n validation_percent: Optional[int] = 10,\n train_file_name: str = \"train.tfr\",\n val_file_name: str = \"val.tfr\",\n ):\n def _partition_index(\n self, example: tf.train.Example, unused_num_partitions: int\n ) -> int:\n def expand(self, pcoll):\nclass SplitAndWriteTFRecords(beam.PTransform):"
},
{
"identifier": "demo_utils",
"path": "pix2act/tasks/webshop/demo_utils.py",
"snippet": "ACTION_BUTTONS = frozenset([\n \"back to search\",\n \"next >\",\n \"< prev\",\n \"description\",\n \"features\",\n \"reviews\",\n \"attributes\",\n \"buy now\",\n])\n_EPISODE_END_TEXT = \"Thank you for shopping with us!\"\nSPLITS = {\"train\": (1500, 12087), \"val\": (500, 1500), \"test\": (0, 500)}\ndef maybe_prepend_instruction(\n instruction_text: str, prepend_instruction: int\n) -> str:\ndef format_action(action: str) -> str:\ndef get_action_history(\n prev_examples,\n num_previous_actions,\n action_key=\"parse\") -> str:\ndef process_goal(state, human_goals):\ndef get_split(goal_idx):\ndef read_goals_file(human_goals_file: str):\ndef read_demos_file(demo_file: str):\ndef get_reward(driver) -> float:\ndef is_episode_done(driver):\ndef get_instruction_text(driver) -> str:\ndef x_y_to_rel_x_y(x: int, y: int) -> Tuple[int, int]:\ndef get_click_coordinates(driver, element) -> Tuple[int, int]:\ndef _normalize_str(x):\ndef get_element(driver, arg, arg_translated):\ndef is_coordinate_not_in_full_view(click_y: int) -> bool:\ndef coordinate_needs_down_scroll(click_y: int) -> bool:\ndef coordinate_needs_up_scroll(click_y: int) -> bool:\ndef is_scrollable(driver, scroll_y: int) -> bool:\ndef convert_action(\n driver, action, action_translated, word_input_to_search\n) -> Iterable[Tuple[str, str]]:"
},
{
"identifier": "webshop_env",
"path": "pix2act/tasks/webshop/webshop_env.py",
"snippet": "WIDTH = 800\nHEIGHT = 600\nNUM_X_BUCKETS = 100\nNUM_Y_BUCKETS = 100\nCENTER_BUCKETS = False\nSCROLLABLE_ELEMENT_FN = \"window\"\ndef get_env_config(cursor_dir):"
}
] | import json
import os
import typing
import tensorflow as tf
from typing import Any, Dict, List
from absl import app
from absl import flags
from pix2act.common import env_utils
from pix2act.common import render_utils
from pix2act.common import tf_utils
from pix2act.tasks.webshop import demo_utils
from pix2act.tasks.webshop import webshop_env
from selenium import webdriver
from selenium.common import exceptions | 2,092 |
r"""Converts demonstrations to tf examples for training, validation, and test.
# pylint:disable=long-too-long
This requires that the Webshop server is running locally. See the official repo
for setup instructions: https://github.com/princeton-nlp/WebShop
Follows split and preprocessing here from the get_data method here:
https://github.com/princeton-nlp/WebShop/blob/master/baseline_models/train_choice_il.py
# pylint:enable=long-too-long
"""
FLAGS = flags.FLAGS
flags.DEFINE_string(
"webshop_url",
"http://localhost:3000/",
"Webshop server URL.",
)
flags.DEFINE_string(
"demo_file",
"",
"File containing high-level demonstrations.",
)
flags.DEFINE_string(
"human_goals_file",
"",
"Human goals file which dictates train/dev/test split.",
)
flags.DEFINE_string(
"processed_dir",
"",
"Processed dir name.",
)
flags.DEFINE_float(
"reward_threshold",
0.1,
"Demonstrations below this threshold will be discarded.",
)
flags.DEFINE_bool(
"do_word_input_search",
True,
"Use word level input for search.",
)
flags.DEFINE_bool(
"skip_test",
True,
"Skips test split if true.",
)
flags.DEFINE_string(
"cursor_dir",
"gs://pix2act-data/cursors/",
"Directory with cursor files.",
)
flags.DEFINE_bool(
"render_action_history",
False,
"Renders action history on the screenshot if true.",
)
flags.DEFINE_integer(
"num_prepend_actions",
5,
"Prepends these many previous actions to parse before current actions.",
)
flags.DEFINE_integer(
"max_action_chars",
200,
(
"Max num of chars which can be rendered on the action section of the"
" input."
),
)
def process_data(driver):
"""Process and split data according to the official WebShop repo."""
env_config = webshop_env.get_env_config(FLAGS.cursor_dir)
demos = demo_utils.read_demos_file(FLAGS.demo_file)
human_goals = demo_utils.read_goals_file(FLAGS.human_goals_file)
split_info = {}
for split in demo_utils.SPLITS.keys():
split_info[split] = {"num_processed": 0, "rewards": {}}
for demo_idx, demo in enumerate(demos):
demo_examples = []
_, goal_idx = demo_utils.process_goal(demo["states"][0], human_goals)
split = demo_utils.get_split(goal_idx)
if FLAGS.skip_test and split == "test":
continue
print("Processing %d out of %d" % (demo_idx, len(demos)))
driver.get(FLAGS.webshop_url + "fixed_%d" % goal_idx)
instruction_text = demo_utils.get_instruction_text(driver)
cursor_state = env_utils.CursorState()
for i, (demo_action, demo_action_translate) in enumerate(
zip(demo["actions"], demo["actions_translate"])
):
for low_level_action, _ in demo_utils.convert_action(
driver, demo_action, demo_action_translate, FLAGS.do_word_input_search
):
parse = low_level_action
history = demo_utils.get_action_history(
demo_examples,
FLAGS.num_prepend_actions,
)
current_frame = env_utils.get_screenshot(
driver, env_config, cursor_state
)
| # Copyright 2023 The pix2act Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Converts demonstrations to tf examples for training, validation, and test.
# pylint:disable=long-too-long
This requires that the Webshop server is running locally. See the official repo
for setup instructions: https://github.com/princeton-nlp/WebShop
Follows split and preprocessing here from the get_data method here:
https://github.com/princeton-nlp/WebShop/blob/master/baseline_models/train_choice_il.py
# pylint:enable=long-too-long
"""
FLAGS = flags.FLAGS
flags.DEFINE_string(
"webshop_url",
"http://localhost:3000/",
"Webshop server URL.",
)
flags.DEFINE_string(
"demo_file",
"",
"File containing high-level demonstrations.",
)
flags.DEFINE_string(
"human_goals_file",
"",
"Human goals file which dictates train/dev/test split.",
)
flags.DEFINE_string(
"processed_dir",
"",
"Processed dir name.",
)
flags.DEFINE_float(
"reward_threshold",
0.1,
"Demonstrations below this threshold will be discarded.",
)
flags.DEFINE_bool(
"do_word_input_search",
True,
"Use word level input for search.",
)
flags.DEFINE_bool(
"skip_test",
True,
"Skips test split if true.",
)
flags.DEFINE_string(
"cursor_dir",
"gs://pix2act-data/cursors/",
"Directory with cursor files.",
)
flags.DEFINE_bool(
"render_action_history",
False,
"Renders action history on the screenshot if true.",
)
flags.DEFINE_integer(
"num_prepend_actions",
5,
"Prepends these many previous actions to parse before current actions.",
)
flags.DEFINE_integer(
"max_action_chars",
200,
(
"Max num of chars which can be rendered on the action section of the"
" input."
),
)
def process_data(driver):
"""Process and split data according to the official WebShop repo."""
env_config = webshop_env.get_env_config(FLAGS.cursor_dir)
demos = demo_utils.read_demos_file(FLAGS.demo_file)
human_goals = demo_utils.read_goals_file(FLAGS.human_goals_file)
split_info = {}
for split in demo_utils.SPLITS.keys():
split_info[split] = {"num_processed": 0, "rewards": {}}
for demo_idx, demo in enumerate(demos):
demo_examples = []
_, goal_idx = demo_utils.process_goal(demo["states"][0], human_goals)
split = demo_utils.get_split(goal_idx)
if FLAGS.skip_test and split == "test":
continue
print("Processing %d out of %d" % (demo_idx, len(demos)))
driver.get(FLAGS.webshop_url + "fixed_%d" % goal_idx)
instruction_text = demo_utils.get_instruction_text(driver)
cursor_state = env_utils.CursorState()
for i, (demo_action, demo_action_translate) in enumerate(
zip(demo["actions"], demo["actions_translate"])
):
for low_level_action, _ in demo_utils.convert_action(
driver, demo_action, demo_action_translate, FLAGS.do_word_input_search
):
parse = low_level_action
history = demo_utils.get_action_history(
demo_examples,
FLAGS.num_prepend_actions,
)
current_frame = env_utils.get_screenshot(
driver, env_config, cursor_state
) | current_frame = render_utils.render_header( | 1 | 2023-11-13 22:50:55+00:00 | 4k |
zhang-tao-whu/DVIS_Plus | mask2former_video/modeling/transformer_decoder/video_mask2former_transformer_decoder.py | [
{
"identifier": "TRANSFORMER_DECODER_REGISTRY",
"path": "mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py",
"snippet": "TRANSFORMER_DECODER_REGISTRY = Registry(\"TRANSFORMER_MODULE\")"
},
{
"identifier": "PositionEmbeddingSine3D",
"path": "mask2former_video/modeling/transformer_decoder/position_encoding.py",
"snippet": "class PositionEmbeddingSine3D(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n\n def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, x, mask=None):\n # b, t, c, h, w\n assert x.dim() == 5, f\"{x.shape} should be a 5-dimensional Tensor, got {x.dim()}-dimensional Tensor instead\"\n if mask is None:\n mask = torch.zeros((x.size(0), x.size(1), x.size(3), x.size(4)), device=x.device, dtype=torch.bool)\n not_mask = ~mask\n z_embed = not_mask.cumsum(1, dtype=torch.float32)\n y_embed = not_mask.cumsum(2, dtype=torch.float32)\n x_embed = not_mask.cumsum(3, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n z_embed = z_embed / (z_embed[:, -1:, :, :] + eps) * self.scale\n y_embed = y_embed / (y_embed[:, :, -1:, :] + eps) * self.scale\n x_embed = x_embed / (x_embed[:, :, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n dim_t_z = torch.arange((self.num_pos_feats * 2), dtype=torch.float32, device=x.device)\n dim_t_z = self.temperature ** (2 * (dim_t_z // 2) / (self.num_pos_feats * 2))\n\n pos_x = x_embed[:, :, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, :, None] / dim_t\n pos_z = z_embed[:, :, :, :, None] / dim_t_z\n pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4)\n pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4)\n pos_z = torch.stack((pos_z[:, :, :, :, 0::2].sin(), pos_z[:, :, :, :, 1::2].cos()), dim=5).flatten(4)\n pos = (torch.cat((pos_y, pos_x), dim=4) + pos_z).permute(0, 1, 4, 2, 3) # b, t, c, h, w\n return pos"
}
] | import logging
import fvcore.nn.weight_init as weight_init
import torch
from typing import Optional
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from mask2former.modeling.transformer_decoder.maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
from .position_encoding import PositionEmbeddingSine3D | 2,505 |
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
class SelfAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
return self.forward_post(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| @TRANSFORMER_DECODER_REGISTRY.register() | 0 | 2023-11-14 10:55:11+00:00 | 4k |
teamreboott/data-modori | data_modori/config/config.py | [
{
"identifier": "OPERATORS",
"path": "data_modori/ops/base_op.py",
"snippet": "OPERATORS = Registry('Operators')"
},
{
"identifier": "setup_logger",
"path": "data_modori/utils/logger_utils.py",
"snippet": "def setup_logger(save_dir, distributed_rank=0, filename='log.txt', mode='o', redirect=True):\n \"\"\"\n Setup logger for training and testing.\n\n :param save_dir: location to save log file\n :param distributed_rank: device rank when multi-gpu environment\n :param filename: log file name to save\n :param mode: log file write mode, `append` or `override`. default is `o`.\n :param redirect: whether to redirect system output\n :return: logger instance.\n \"\"\"\n global LOGGER_SETUP\n\n if LOGGER_SETUP:\n return\n\n loguru_format = (\n '<green>{time:YYYY-MM-DD HH:mm:ss}</green> | '\n '<level>{level: <8}</level> | '\n '<cyan>{name}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>')\n\n logger.remove()\n save_file = os.path.join(save_dir, filename)\n if mode == 'o' and os.path.exists(save_file):\n os.remove(save_file)\n\n # only keep logger in rank0 process\n if distributed_rank == 0:\n logger.add(\n sys.stderr,\n format=loguru_format,\n level='INFO',\n enqueue=True,\n )\n logger.add(save_file)\n\n # redirect stdout/stderr to loguru\n if redirect:\n redirect_sys_output('INFO')\n LOGGER_SETUP = True"
}
] | import os
import shutil
import time
import tempfile
import pprint
from argparse import ArgumentError
from typing import Dict, List, Tuple, Union
from jsonargparse import (ActionConfigFile, ArgumentParser, dict_to_namespace,
namespace_to_dict)
from jsonargparse.typing import NonNegativeInt, PositiveInt
from loguru import logger
from data_modori.ops.base_op import OPERATORS
from data_modori.utils.logger_utils import setup_logger
from datasets import disable_caching
from datasets import config
from tabulate import tabulate | 2,843 | help='Number of samples extracted by tracer to show the dataset '
'difference before and after a op. Only available when '
'open_tracer is true.')
parser.add_argument(
'--op_fusion',
type=bool,
default=False,
help='Whether to fuse operators that share the same intermediate '
'variables automatically. Op fusion might reduce the memory '
'requirements slightly but speed up the whole process.')
parser.add_argument(
'--process',
type=List[Dict],
help='List of several operators with their arguments, these ops will '
'be applied to dataset in order')
parser.add_argument(
'--save_stats_in_one_file',
type=bool,
default=False,
help='Whether to save all stats to only one file. Only used in '
'Analysis.')
parser.add_argument(
'--ray_address',
type=str,
default='auto',
help='The address of the Ray cluster.'
)
# add all parameters of the registered ops class to the parser,
# and these op parameters can be modified through the command line,
ops_sorted_by_types = sort_op_by_types_and_names(OPERATORS.modules.items())
_collect_config_info_from_class_docs(ops_sorted_by_types, parser)
try:
cfg = parser.parse_args(args=args)
option_in_commands = [
''.join(arg.split('--')[1].split('.')[0]) for arg in parser.args
if '--' in arg and 'config' not in arg
]
full_option_in_commands = list(
set([
''.join(arg.split('--')[1].split('=')[0])
for arg in parser.args if '--' in arg and 'config' not in arg
]))
if cfg.process is None:
cfg.process = []
# check and update every op params in `cfg.process`
# e.g.
# `python demo.py --config demo.yaml
# --language_id_score_filter.lang en`
for i, op_in_process in enumerate(cfg.process):
op_in_process_name = list(op_in_process.keys())[0]
temp_cfg = cfg
if op_in_process_name not in option_in_commands:
# update op params to temp cfg if set
if op_in_process[op_in_process_name]:
temp_cfg = parser.merge_config(
dict_to_namespace(op_in_process), cfg)
else:
# args in the command line override the ones in `cfg.process`
for full_option_in_command in full_option_in_commands:
key = full_option_in_command.split('.')[1]
if op_in_process[
op_in_process_name] and key in op_in_process[
op_in_process_name].keys():
op_in_process[op_in_process_name].pop(key)
if op_in_process[op_in_process_name]:
temp_cfg = parser.merge_config(
dict_to_namespace(op_in_process), temp_cfg)
# update op params of cfg.process
internal_op_para = temp_cfg.get(op_in_process_name)
cfg.process[i] = {
op_in_process_name:
None if internal_op_para is None else
namespace_to_dict(internal_op_para)
}
cfg = init_setup_from_cfg(cfg)
# copy the config file into the work directory
config_backup(cfg)
# show the final config tables before the process started
display_config(cfg)
return cfg
except ArgumentError:
logger.error('Config initialization failed')
def init_setup_from_cfg(cfg):
"""
Do some extra setup tasks after parsing config file or command line.
1. create working directory and a log directory
2. update cache directory
3. update checkpoint and `temp_dir` of tempfile
:param cfg: a original cfg
:param cfg: a updated cfg
"""
export_path = cfg.export_path
cfg.work_dir = os.path.dirname(export_path)
log_dir = os.path.join(cfg.work_dir, 'log')
if not os.path.exists(log_dir):
os.makedirs(log_dir, exist_ok=True)
timestamp = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
cfg.timestamp = timestamp
logfile_name = timestamp + '.txt'
|
def init_configs(args=None):
"""
initialize the jsonargparse parser and parse configs from one of:
1. POSIX-style commands line args;
2. config files in yaml (json and jsonnet supersets);
3. environment variables
4. hard-coded defaults
:param args: list of params, e.g., ['--conifg', 'cfg.yaml'], defaut None.
:return: a global cfg object used by the Executor or Analyser
"""
parser = ArgumentParser(default_env=True, default_config_files=None)
parser.add_argument(
'--config',
action=ActionConfigFile,
help='Path to a configuration file.',
required=True)
# basic global paras with extended type hints
# e.g., files can be mode include flags
# "fr": "path to a file that exists and is readable")
# "fc": "path to a file that can be created if it does not exist")
# "dw": "path to a directory that exists and is writeable")
# "dc": "path to a directory that can be created if it does not exist")
# "drw": "path to a directory that exists and is readable and writeable")
parser.add_argument(
'--project_name',
type=str,
default='hello_world',
help='Name of your data process project.')
parser.add_argument(
'--executor_type',
type=str,
default='default',
choices=['default', 'ray'],
help='Type of executor, support "default" or "ray" for now.'
)
parser.add_argument(
'--dataset_path',
type=str,
help='Path to datasets with optional weights(0.0-1.0), 1.0 as '
'default. Accepted format:<w1> dataset1-path <w2> dataset2-path '
'<w3> dataset3-path ...')
parser.add_argument(
'--export_path',
type=str,
default='./outputs/hello_world.jsonl',
help='Path to export and save the output processed dataset. The '
'directory to store the processed dataset will be the work '
'directory of this process.')
parser.add_argument(
'--export_shard_size',
type=NonNegativeInt,
default=0,
help='Shard size of exported dataset in Byte. In default, it\'s 0, '
'which means export the whole dataset into only one file. If '
'it\'s set a positive number, the exported dataset will be split '
'into several sub-dataset shards, and the max size of each shard '
'won\'t larger than the export_shard_size')
parser.add_argument(
'--export_in_parallel',
type=bool,
default=False,
help='Whether to export the result dataset in parallel to a single '
'file, which usually takes less time. It only works when '
'export_shard_size is 0, and its default number of processes is '
'the same as the argument np. **Notice**: If it\'s True, '
'sometimes exporting in parallel might require much more time '
'due to the IO blocking, especially for very large datasets. '
'When this happens, False is a better choice, although it takes '
'more time.')
parser.add_argument(
'--np',
type=PositiveInt,
default=4,
help='Number of processes to process dataset.')
parser.add_argument(
'--text_keys',
type=Union[str, List[str]],
default='text',
help='Key name of field where the sample texts to be processed, e.g., '
'`text`, `text.instruction`, `text.output`, ... Note: currently, '
'we support specify only ONE key for each op, for cases '
'requiring multiple keys, users can specify the op multiple '
'times. We will only use the first key of `text_keys` when you '
'set multiple keys.')
parser.add_argument(
'--suffixes',
type=Union[str, List[str], Tuple[str]],
default=[],
help='Suffixes of files that will be find and loaded. If not set, we '
'will find all suffix files, and select a suitable formatter '
'with the most files as default.')
parser.add_argument(
'--use_cache',
type=bool,
default=True,
help='Whether to use the cache management of huggingface datasets. It '
'might take up lots of disk space when using cache')
parser.add_argument(
'--ds_cache_dir',
type=str,
default=None,
help='Cache dir for HuggingFace datasets. In default it\'s the same '
'as the environment variable `HF_DATASETS_CACHE`, whose default '
'value is usually "~/.cache/huggingface/datasets". If this '
'argument is set to a valid path by users, it will override the '
'default cache dir.')
parser.add_argument(
'--cache_compress',
type=str,
default=None,
help='The compression method of the cache file, which can be'
'specified in ["gzip", "zstd", "lz4"]. If this parameter is'
'None, the cache file will not be compressed.')
parser.add_argument(
'--use_checkpoint',
type=bool,
default=False,
help='Whether to use the checkpoint management to save the latest '
'version of dataset to work dir when processing. Rerun the same '
'config will reload the checkpoint and skip ops before it. Cache '
'will be disabled when it is true . If args of ops before the '
'checkpoint are changed, all ops will be rerun from the '
'beginning.')
parser.add_argument(
'--temp_dir',
type=str,
default=None,
help='Path to the temp directory to store intermediate caches when '
'cache is disabled. In default it\'s None, so the temp dir will '
'be specified by system. NOTICE: you should be caution when '
'setting this argument because it might cause unexpected program '
'behaviors when this path is set to an unsafe directory.')
parser.add_argument(
'--open_tracer',
type=bool,
default=False,
help='Whether to open the tracer to trace samples changed during '
'process. It might take more time when opening tracer.')
parser.add_argument(
'--op_list_to_trace',
type=List[str],
default=[],
help='Which ops will be traced by tracer. If it\'s empty, all ops in '
'cfg.process will be traced. Only available when open_tracer is '
'true.')
parser.add_argument(
'--trace_num',
type=int,
default=10,
help='Number of samples extracted by tracer to show the dataset '
'difference before and after a op. Only available when '
'open_tracer is true.')
parser.add_argument(
'--op_fusion',
type=bool,
default=False,
help='Whether to fuse operators that share the same intermediate '
'variables automatically. Op fusion might reduce the memory '
'requirements slightly but speed up the whole process.')
parser.add_argument(
'--process',
type=List[Dict],
help='List of several operators with their arguments, these ops will '
'be applied to dataset in order')
parser.add_argument(
'--save_stats_in_one_file',
type=bool,
default=False,
help='Whether to save all stats to only one file. Only used in '
'Analysis.')
parser.add_argument(
'--ray_address',
type=str,
default='auto',
help='The address of the Ray cluster.'
)
# add all parameters of the registered ops class to the parser,
# and these op parameters can be modified through the command line,
ops_sorted_by_types = sort_op_by_types_and_names(OPERATORS.modules.items())
_collect_config_info_from_class_docs(ops_sorted_by_types, parser)
try:
cfg = parser.parse_args(args=args)
option_in_commands = [
''.join(arg.split('--')[1].split('.')[0]) for arg in parser.args
if '--' in arg and 'config' not in arg
]
full_option_in_commands = list(
set([
''.join(arg.split('--')[1].split('=')[0])
for arg in parser.args if '--' in arg and 'config' not in arg
]))
if cfg.process is None:
cfg.process = []
# check and update every op params in `cfg.process`
# e.g.
# `python demo.py --config demo.yaml
# --language_id_score_filter.lang en`
for i, op_in_process in enumerate(cfg.process):
op_in_process_name = list(op_in_process.keys())[0]
temp_cfg = cfg
if op_in_process_name not in option_in_commands:
# update op params to temp cfg if set
if op_in_process[op_in_process_name]:
temp_cfg = parser.merge_config(
dict_to_namespace(op_in_process), cfg)
else:
# args in the command line override the ones in `cfg.process`
for full_option_in_command in full_option_in_commands:
key = full_option_in_command.split('.')[1]
if op_in_process[
op_in_process_name] and key in op_in_process[
op_in_process_name].keys():
op_in_process[op_in_process_name].pop(key)
if op_in_process[op_in_process_name]:
temp_cfg = parser.merge_config(
dict_to_namespace(op_in_process), temp_cfg)
# update op params of cfg.process
internal_op_para = temp_cfg.get(op_in_process_name)
cfg.process[i] = {
op_in_process_name:
None if internal_op_para is None else
namespace_to_dict(internal_op_para)
}
cfg = init_setup_from_cfg(cfg)
# copy the config file into the work directory
config_backup(cfg)
# show the final config tables before the process started
display_config(cfg)
return cfg
except ArgumentError:
logger.error('Config initialization failed')
def init_setup_from_cfg(cfg):
"""
Do some extra setup tasks after parsing config file or command line.
1. create working directory and a log directory
2. update cache directory
3. update checkpoint and `temp_dir` of tempfile
:param cfg: a original cfg
:param cfg: a updated cfg
"""
export_path = cfg.export_path
cfg.work_dir = os.path.dirname(export_path)
log_dir = os.path.join(cfg.work_dir, 'log')
if not os.path.exists(log_dir):
os.makedirs(log_dir, exist_ok=True)
timestamp = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
cfg.timestamp = timestamp
logfile_name = timestamp + '.txt' | setup_logger(save_dir=log_dir, filename=logfile_name, redirect=cfg.executor_type=='default') | 1 | 2023-11-13 04:52:55+00:00 | 4k |
52phm/pylmkit | pylmkit/core/base.py | [
{
"identifier": "read_yaml",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def read_yaml(filepath):\n try:\n with open(filepath, encoding=\"utf-8\") as fp:\n result = yaml.load(fp, Loader=SafeLoader)\n except Exception as e:\n raise Exception(e)\n return result"
},
{
"identifier": "read_json",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def read_json(filepath, mode='r', encoding='utf-8'):\n with open(filepath, mode, encoding=encoding) as fp:\n data = json.load(fp)\n return data"
},
{
"identifier": "write_yaml",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def write_yaml(data, filepath, mode=\"w\", encoding='utf-8'):\n try:\n with open(filepath, mode=mode, encoding=encoding) as f:\n yaml.dump(data=data, stream=f, allow_unicode=True)\n except Exception as e:\n raise Exception(e)"
},
{
"identifier": "write_json",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def write_json(data, filepath, mode='w', encoding='utf-8', ensure_ascii=False):\n with open(filepath, mode, encoding=encoding) as fp:\n json.dump(data, # 字典数据\n fp=fp, # open 文件\n ensure_ascii=ensure_ascii, # 确保中文无乱码\n )"
},
{
"identifier": "message_as_string",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def message_as_string(memory_messages):\n messages_string = [f\"\\n{message['role']}: {message['content']}\" for message in memory_messages]\n return \"\".join(messages_string)"
},
{
"identifier": "document_as_dict",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def document_as_dict(documents):\n document_dict = [{\"page_content\": doc.page_content, \"metadata\": doc.metadata,\n \"type\": doc.type} for doc in documents]\n return document_dict"
},
{
"identifier": "dict_as_document",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def dict_as_document(doc_dict):\n document_dict = [Document(page_content=doc['page_content'],\n metadata=doc.get('metadata', {}),\n type=doc.get('type', 'Document')) for doc in doc_dict]\n return document_dict"
},
{
"identifier": "BaseLoader",
"path": "pylmkit/perception/directory.py",
"snippet": "FILE_LOADER_TYPE = Union[\n Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]\n]\ndef _is_visible(p: Path) -> bool:\n def __init__(\n self,\n path: str,\n glob: str = \"**/[!.]*\",\n suffixes: Optional[Sequence[str]] = None, # Modifying\n silent_errors: bool = False,\n load_hidden: bool = False,\n loader_cls=None,\n loader_kwargs: Union[dict, None] = None,\n recursive: bool = False,\n show_progress: bool = False,\n use_multithreading: bool = False,\n max_concurrency: int = 4,\n *,\n sample_size: int = 0,\n randomize_sample: bool = False,\n sample_seed: Union[int, None] = None,\n ):\n def load_file(\n self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]\n ) -> None:\n def load(self) -> List[Document]:\nclass DirectoryLoader(BaseLoader):"
},
{
"identifier": "text_as_document",
"path": "pylmkit/utils/data_utils.py",
"snippet": "def text_as_document(texts, metadatas=None, types=\"Document\"):\n documents = []\n if metadatas:\n if isinstance(types, str):\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, metadata=metadatas[i], type=types))\n else: # types is `list` mode\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, metadata=metadatas[i], type=types[i]))\n else:\n if isinstance(types, str):\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, type=types))\n else: # types is `list` mode\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, type=types[i]))\n return documents"
},
{
"identifier": "RecursiveCharacterTextSplitter",
"path": "pylmkit/perception/directory.py",
"snippet": "FILE_LOADER_TYPE = Union[\n Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]\n]\ndef _is_visible(p: Path) -> bool:\n def __init__(\n self,\n path: str,\n glob: str = \"**/[!.]*\",\n suffixes: Optional[Sequence[str]] = None, # Modifying\n silent_errors: bool = False,\n load_hidden: bool = False,\n loader_cls=None,\n loader_kwargs: Union[dict, None] = None,\n recursive: bool = False,\n show_progress: bool = False,\n use_multithreading: bool = False,\n max_concurrency: int = 4,\n *,\n sample_size: int = 0,\n randomize_sample: bool = False,\n sample_seed: Union[int, None] = None,\n ):\n def load_file(\n self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]\n ) -> None:\n def load(self) -> List[Document]:\nclass DirectoryLoader(BaseLoader):"
},
{
"identifier": "init_css",
"path": "pylmkit/core/html.py",
"snippet": ""
},
{
"identifier": "_zh",
"path": "pylmkit/core/html.py",
"snippet": ""
}
] | from abc import ABC
from pathlib import Path
from tqdm import tqdm
from pydantic import Field, BaseModel
from pylmkit.utils.data_utils import read_yaml, read_json, write_yaml, write_json
from pylmkit.utils.data_utils import message_as_string, document_as_dict, dict_as_document
from typing import Any, List, Optional, Type, Union, Sequence, Literal
from pylmkit.perception.directory import BaseLoader
from pylmkit.utils.data_utils import text_as_document
from pylmkit.perception.directory import RecursiveCharacterTextSplitter
from functools import partial
from pylmkit.core.html import init_css, init_footer, init_logo
from pylmkit.core.html import _zh, _en
import time
import pandas as pd
import streamlit as st | 2,276 |
class BaseMemory(object):
human_prefix: str = "Human"
ai_prefix: str = "AI"
system_prefix: str = "System"
def __init__(self, init_memory=None, streamlit_web=False):
self.memory_messages = []
self.streamlit_web = streamlit_web
if self.streamlit_web: # streamlit rerun page, so need cache
if "memory" not in st.session_state:
st.session_state["memory"] = self.memory_messages
if isinstance(init_memory, list):
self.memory_messages = init_memory
if self.streamlit_web:
st.session_state['memory'] = self.memory_messages
if self.streamlit_web: # streamlit rerun page, so need cache
self.memory_messages = st.session_state['memory']
def add(self, role, content, refer=''):
""" role,human ai system
"""
if role in ['user', 'User', 'USER', 'human', 'Human', 'HUMAN']:
role = self.human_prefix
elif role in ['ai', 'Ai', 'AI', 'assistant']:
role = self.ai_prefix
elif role in ['sys', 'system', 'System', 'SYS', 'SYSTEM']:
role = self.system_prefix
else:
raise Exception(f"The role `{role}` does not exist")
self.memory_messages.append(
{"role": role, "content": content, "refer": refer, "date": time.strftime('%Y-%m-%d %H:%M:%S')})
if self.streamlit_web: # streamlit rerun page, so need cache
st.session_state['memory'] = self.memory_messages
def to_csv(self, filepath, index=False, **kwargs):
data = self.memory_messages
pd.DataFrame(data).to_csv(filepath, index=index, **kwargs)
def clear(self):
self.memory_messages = []
if self.streamlit_web: # streamlit rerun page, so need cache
st.session_state['memory'] = self.memory_messages
def _get(self, mode='message'):
if mode == 'message':
return self.memory_messages
elif mode == 'string':
return message_as_string(self.memory_messages)
else:
raise Exception(f"There is no such `{mode}` mode. Support modes: message, string")
class BaseKnowledgeBase(object):
def __init__(self, init_documents=None):
self.documents = []
self.splitter_documents = []
if isinstance(init_documents, list):
self.documents = init_documents
@classmethod
def load(cls, filepath, is_return=True, return_mode="doc", extend=True):
if filepath.endswith('.json'):
data = read_json(filepath)
elif filepath.endswith('.yaml') or filepath.endswith('yml'):
data = read_yaml(filepath) # data=[{},{}]
else:
raise Exception(f"The file type is not supported")
data_dict_as_document = dict_as_document(data)
result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return,
extend=extend)
if is_return:
return result
@classmethod
def add(cls, texts, metadatas=None, is_return=True, return_mode="doc", extend=True, types="Document"):
data_dict_as_document = text_as_document(texts=texts, metadatas=metadatas, types=types)
result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return,
extend=extend)
if is_return:
return result
def split(self, splitter=None, chunk_size=500, chunk_overlap=100, return_mode='doc', **kwargs):
if splitter is None:
splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
else:
splitter = splitter
self.splitter_documents = splitter.split_documents(self.documents)
if return_mode == 'doc':
return self.splitter_documents
else:
|
class BaseMemory(object):
human_prefix: str = "Human"
ai_prefix: str = "AI"
system_prefix: str = "System"
def __init__(self, init_memory=None, streamlit_web=False):
self.memory_messages = []
self.streamlit_web = streamlit_web
if self.streamlit_web: # streamlit rerun page, so need cache
if "memory" not in st.session_state:
st.session_state["memory"] = self.memory_messages
if isinstance(init_memory, list):
self.memory_messages = init_memory
if self.streamlit_web:
st.session_state['memory'] = self.memory_messages
if self.streamlit_web: # streamlit rerun page, so need cache
self.memory_messages = st.session_state['memory']
def add(self, role, content, refer=''):
""" role,human ai system
"""
if role in ['user', 'User', 'USER', 'human', 'Human', 'HUMAN']:
role = self.human_prefix
elif role in ['ai', 'Ai', 'AI', 'assistant']:
role = self.ai_prefix
elif role in ['sys', 'system', 'System', 'SYS', 'SYSTEM']:
role = self.system_prefix
else:
raise Exception(f"The role `{role}` does not exist")
self.memory_messages.append(
{"role": role, "content": content, "refer": refer, "date": time.strftime('%Y-%m-%d %H:%M:%S')})
if self.streamlit_web: # streamlit rerun page, so need cache
st.session_state['memory'] = self.memory_messages
def to_csv(self, filepath, index=False, **kwargs):
data = self.memory_messages
pd.DataFrame(data).to_csv(filepath, index=index, **kwargs)
def clear(self):
self.memory_messages = []
if self.streamlit_web: # streamlit rerun page, so need cache
st.session_state['memory'] = self.memory_messages
def _get(self, mode='message'):
if mode == 'message':
return self.memory_messages
elif mode == 'string':
return message_as_string(self.memory_messages)
else:
raise Exception(f"There is no such `{mode}` mode. Support modes: message, string")
class BaseKnowledgeBase(object):
def __init__(self, init_documents=None):
self.documents = []
self.splitter_documents = []
if isinstance(init_documents, list):
self.documents = init_documents
@classmethod
def load(cls, filepath, is_return=True, return_mode="doc", extend=True):
if filepath.endswith('.json'):
data = read_json(filepath)
elif filepath.endswith('.yaml') or filepath.endswith('yml'):
data = read_yaml(filepath) # data=[{},{}]
else:
raise Exception(f"The file type is not supported")
data_dict_as_document = dict_as_document(data)
result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return,
extend=extend)
if is_return:
return result
@classmethod
def add(cls, texts, metadatas=None, is_return=True, return_mode="doc", extend=True, types="Document"):
data_dict_as_document = text_as_document(texts=texts, metadatas=metadatas, types=types)
result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return,
extend=extend)
if is_return:
return result
def split(self, splitter=None, chunk_size=500, chunk_overlap=100, return_mode='doc', **kwargs):
if splitter is None:
splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
else:
splitter = splitter
self.splitter_documents = splitter.split_documents(self.documents)
if return_mode == 'doc':
return self.splitter_documents
else: | return document_as_dict(self.splitter_documents) | 5 | 2023-11-18 10:31:58+00:00 | 4k |
PufferAI/pokegym | pokegym/environment.py | [
{
"identifier": "ACTIONS",
"path": "pokegym/pyboy_binding.py",
"snippet": "ACTIONS = (Down, Left, Right, Up, A, B, Start, Select)"
},
{
"identifier": "make_env",
"path": "pokegym/pyboy_binding.py",
"snippet": "def make_env(gb_path, headless=True, quiet=False, **kwargs):\n gb_path='pokemon_red.gb'\n game = PyBoy(\n gb_path,\n debugging=False,\n window_type='headless' if headless else 'SDL2',\n hide_window=quiet,\n **kwargs,\n )\n\n screen = game.botsupport_manager().screen()\n\n if not headless:\n game.set_emulation_speed(6)\n\n return game, screen"
},
{
"identifier": "open_state_file",
"path": "pokegym/pyboy_binding.py",
"snippet": "def open_state_file(path):\n '''Load state file with BytesIO so we can cache it'''\n with open(path, 'rb') as f:\n initial_state = BytesIO(f.read())\n\n return initial_state"
},
{
"identifier": "load_pyboy_state",
"path": "pokegym/pyboy_binding.py",
"snippet": "def load_pyboy_state(pyboy, state):\n '''Reset state stream and load it into PyBoy'''\n state.seek(0)\n pyboy.load_state(state)"
},
{
"identifier": "run_action_on_emulator",
"path": "pokegym/pyboy_binding.py",
"snippet": "def run_action_on_emulator(pyboy, screen, action,\n headless=True, fast_video=True, frame_skip=24):\n '''Sends actions to PyBoy'''\n press, release = action.PRESS, action.RELEASE\n pyboy.send_input(press)\n\n if headless or fast_video:\n pyboy._rendering(False)\n\n frames = []\n for i in range(frame_skip):\n if i == 8: # Release button after 8 frames\n pyboy.send_input(release)\n if not fast_video: # Save every frame\n frames.append(screen.screen_ndarray())\n if i == frame_skip - 1:\n pyboy._rendering(True)\n pyboy.tick()\n\n if fast_video: # Save only the last frame\n frames.append(screen.screen_ndarray())"
},
{
"identifier": "ram_map",
"path": "pokegym/ram_map.py",
"snippet": "HP_ADDR = [0xD16C, 0xD198, 0xD1C4, 0xD1F0, 0xD21C, 0xD248]\nMAX_HP_ADDR = [0xD18D, 0xD1B9, 0xD1E5, 0xD211, 0xD23D, 0xD269]\nPARTY_SIZE_ADDR = 0xD163\nPARTY_ADDR = [0xD164, 0xD165, 0xD166, 0xD167, 0xD168, 0xD169]\nPARTY_LEVEL_ADDR = [0xD18C, 0xD1B8, 0xD1E4, 0xD210, 0xD23C, 0xD268]\nPOKE_XP_ADDR = [0xD179, 0xD1A5, 0xD1D1, 0xD1FD, 0xD229, 0xD255]\nCAUGHT_POKE_ADDR = range(0xD2F7, 0xD309)\nSEEN_POKE_ADDR = range(0xD30A, 0xD31D)\nOPPONENT_LEVEL_ADDR = [0xD8C5, 0xD8F1, 0xD91D, 0xD949, 0xD975, 0xD9A1]\nX_POS_ADDR = 0xD362\nY_POS_ADDR = 0xD361\nMAP_N_ADDR = 0xD35E\nBADGE_1_ADDR = 0xD356\nOAK_PARCEL_ADDR = 0xD74E\nOAK_POKEDEX_ADDR = 0xD74B\nOPPONENT_LEVEL = 0xCFF3\nENEMY_POKE_COUNT = 0xD89C\nEVENT_FLAGS_START_ADDR = 0xD747\nEVENT_FLAGS_END_ADDR = 0xD761\nMUSEUM_TICKET_ADDR = 0xD754\nMONEY_ADDR_1 = 0xD347\nMONEY_ADDR_100 = 0xD348\nMONEY_ADDR_10000 = 0xD349\ndef bcd(num):\ndef bit_count(bits):\ndef read_bit(game, addr, bit) -> bool:\ndef read_uint16(game, start_addr):\ndef position(game):\ndef party(game):\ndef opponent(game):\ndef oak_parcel(game):\ndef pokedex_obtained(game):\ndef pokemon_seen(game):\ndef pokemon_caught(game):\ndef hp(game):\ndef money(game):\ndef badges(game):\ndef events(game):"
},
{
"identifier": "game_map",
"path": "pokegym/game_map.py",
"snippet": "MAP_PATH = __file__.rstrip('game_map.py') + 'map_data.json'\nMAP_DATA = json.load(open(MAP_PATH, 'r'))['regions']\nMAP_DATA = {int(e['id']): e for e in MAP_DATA}\ndef local_to_global(r, c, map_n):"
}
] | from pdb import set_trace as T
from gymnasium import Env, spaces
from pokegym.pyboy_binding import (ACTIONS, make_env, open_state_file,
load_pyboy_state, run_action_on_emulator)
from pokegym import ram_map, game_map
import numpy as np
import os | 1,604 |
def play():
'''Creates an environment and plays it'''
env = Environment(rom_path='pokemon_red.gb', state_path=None, headless=False,
disable_input=False, sound=False, sound_emulated=False, verbose=True
)
env.reset()
env.game.set_emulation_speed(1)
# Display available actions
print("Available actions:")
for idx, action in enumerate(ACTIONS):
print(f"{idx}: {action}")
# Create a mapping from WindowEvent to action index
window_event_to_action = {
'PRESS_ARROW_DOWN': 0,
'PRESS_ARROW_LEFT': 1,
'PRESS_ARROW_RIGHT': 2,
'PRESS_ARROW_UP': 3,
'PRESS_BUTTON_A': 4,
'PRESS_BUTTON_B': 5,
'PRESS_BUTTON_START': 6,
'PRESS_BUTTON_SELECT': 7,
# Add more mappings if necessary
}
while True:
# Get input from pyboy's get_input method
input_events = env.game.get_input()
env.game.tick()
env.render()
if len(input_events) == 0:
continue
for event in input_events:
event_str = str(event)
if event_str in window_event_to_action:
action_index = window_event_to_action[event_str]
observation, reward, done, _, info = env.step(
action_index, fast_video=False)
# Check for game over
if done:
print(f"{done}")
break
# Additional game logic or information display can go here
print(f"new Reward: {reward}\n")
class Base:
def __init__(self, rom_path='pokemon_red.gb',
state_path=None, headless=True, quiet=False, **kwargs):
'''Creates a PokemonRed environment'''
if state_path is None:
state_path = __file__.rstrip('environment.py') + 'has_pokedex_nballs.state'
|
def play():
'''Creates an environment and plays it'''
env = Environment(rom_path='pokemon_red.gb', state_path=None, headless=False,
disable_input=False, sound=False, sound_emulated=False, verbose=True
)
env.reset()
env.game.set_emulation_speed(1)
# Display available actions
print("Available actions:")
for idx, action in enumerate(ACTIONS):
print(f"{idx}: {action}")
# Create a mapping from WindowEvent to action index
window_event_to_action = {
'PRESS_ARROW_DOWN': 0,
'PRESS_ARROW_LEFT': 1,
'PRESS_ARROW_RIGHT': 2,
'PRESS_ARROW_UP': 3,
'PRESS_BUTTON_A': 4,
'PRESS_BUTTON_B': 5,
'PRESS_BUTTON_START': 6,
'PRESS_BUTTON_SELECT': 7,
# Add more mappings if necessary
}
while True:
# Get input from pyboy's get_input method
input_events = env.game.get_input()
env.game.tick()
env.render()
if len(input_events) == 0:
continue
for event in input_events:
event_str = str(event)
if event_str in window_event_to_action:
action_index = window_event_to_action[event_str]
observation, reward, done, _, info = env.step(
action_index, fast_video=False)
# Check for game over
if done:
print(f"{done}")
break
# Additional game logic or information display can go here
print(f"new Reward: {reward}\n")
class Base:
def __init__(self, rom_path='pokemon_red.gb',
state_path=None, headless=True, quiet=False, **kwargs):
'''Creates a PokemonRed environment'''
if state_path is None:
state_path = __file__.rstrip('environment.py') + 'has_pokedex_nballs.state'
| self.game, self.screen = make_env( | 1 | 2023-11-16 18:34:28+00:00 | 4k |
AlexandrErohin/home-assistant-flightradar24 | custom_components/flightradar24/sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/flightradar24/const.py",
"snippet": "DOMAIN = \"flightradar24\""
},
{
"identifier": "FlightRadar24Coordinator",
"path": "custom_components/flightradar24/coordinator.py",
"snippet": "class FlightRadar24Coordinator(DataUpdateCoordinator[int]):\n\n def __init__(\n self,\n hass: HomeAssistant,\n bound: BoundingBox,\n client: FlightRadar24API,\n update_interval: int,\n logger: Logger,\n ) -> None:\n\n self._bound = bound\n self._client = client\n self._logger = logger\n self.tracked: dict[int, dict[str, Any]] | None = None\n self.entered = {}\n self.exited = {}\n self.device_info = DeviceInfo(\n configuration_url=URL,\n identifiers={(DOMAIN, DEFAULT_NAME)},\n manufacturer=DEFAULT_NAME,\n name=DEFAULT_NAME,\n )\n\n super().__init__(\n hass,\n logger,\n name=DOMAIN,\n update_interval=timedelta(seconds=update_interval),\n )\n\n @staticmethod\n def get_bounding_box(\n latitude: float,\n longitude: float,\n radius: float,\n ) -> BoundingBox:\n \"\"\"Get bounding box from radius and a point.\"\"\"\n half_side_in_km = abs(radius) / 1000\n\n lat = math.radians(latitude)\n lon = math.radians(longitude)\n\n approx_earth_radius = 6371\n hypotenuse_distance = math.sqrt(2 * (math.pow(half_side_in_km, 2)))\n\n lat_min = math.asin(\n math.sin(lat) * math.cos(hypotenuse_distance / approx_earth_radius)\n + math.cos(lat)\n * math.sin(hypotenuse_distance / approx_earth_radius)\n * math.cos(225 * (math.pi / 180)),\n )\n lon_min = lon + math.atan2(\n math.sin(225 * (math.pi / 180))\n * math.sin(hypotenuse_distance / approx_earth_radius)\n * math.cos(lat),\n math.cos(hypotenuse_distance / approx_earth_radius)\n - math.sin(lat) * math.sin(lat_min),\n )\n\n lat_max = math.asin(\n math.sin(lat) * math.cos(hypotenuse_distance / approx_earth_radius)\n + math.cos(lat)\n * math.sin(hypotenuse_distance / approx_earth_radius)\n * math.cos(45 * (math.pi / 180)),\n )\n lon_max = lon + math.atan2(\n math.sin(45 * (math.pi / 180))\n * math.sin(hypotenuse_distance / approx_earth_radius)\n * math.cos(lat),\n math.cos(hypotenuse_distance / approx_earth_radius)\n - math.sin(lat) * math.sin(lat_max),\n )\n\n rad2deg = math.degrees\n\n return BoundingBox(\n min_latitude=rad2deg(lat_min),\n max_latitude=rad2deg(lat_max),\n min_longitude=rad2deg(lon_min),\n max_longitude=rad2deg(lon_max),\n )\n\n async def _async_update_data(self):\n self.entered = {}\n self.exited = {}\n try:\n flights = await self.hass.async_add_executor_job(\n self._client.get_flights, None, self._bound.get_string()\n )\n current: dict[int, dict[str, Any]] = {}\n for obj in flights:\n if self.tracked is not None and obj.id in self.tracked and self._is_valid(self.tracked[obj.id]):\n flight = self.tracked[obj.id]\n else:\n data = await self.hass.async_add_executor_job(\n self._client.get_flight_details, obj\n )\n flight = self._get_flight_data(data)\n if flight is not None:\n current[flight['id']] = flight\n flight['latitude'] = obj.latitude\n flight['longitude'] = obj.longitude\n flight['altitude'] = obj.altitude\n flight['heading'] = obj.heading\n flight['ground_speed'] = obj.ground_speed\n flight['squawk'] = obj.squawk\n\n if self.tracked is not None:\n entries = current.keys() - self.tracked.keys()\n self.entered = [current[x] for x in entries]\n exits = self.tracked.keys() - current.keys()\n self.exited = [self.tracked[x] for x in exits]\n self._handle_boundary(EVENT_FLIGHTRADAR24_ENTRY, self.entered)\n self._handle_boundary(EVENT_FLIGHTRADAR24_EXIT, self.exited)\n self.tracked = current\n\n except Exception as e:\n self._logger.error(e)\n\n def _handle_boundary(self, event: str, flights: list[dict[str, Any]]) -> None:\n for flight in flights:\n self.hass.bus.fire(event, flight)\n\n @staticmethod\n def _get_value(dictionary: dict, keys: list) -> Any | None:\n nested_dict = dictionary\n\n for key in keys:\n try:\n nested_dict = nested_dict[key]\n except Exception:\n return None\n return nested_dict\n\n @staticmethod\n def _is_valid(flight: dict) -> bool:\n return flight['flight_number'] is not None\n\n @staticmethod\n def _get_country_code(code: None | str) -> None | str:\n if code is None or len(code) == 2:\n return code\n country = pycountry.countries.get(alpha_3=code)\n\n return country.alpha_2 if country is not None else code\n\n @staticmethod\n def _get_flight_data(flight: dict) -> dict[str, Any] | None:\n flight_id = FlightRadar24Coordinator._get_value(flight, ['identification', 'id'])\n if flight_id is None:\n return None\n\n return {\n 'id': flight_id,\n 'flight_number': FlightRadar24Coordinator._get_value(flight, ['identification', 'number', 'default']),\n 'callsign': FlightRadar24Coordinator._get_value(flight, ['identification', 'callsign']),\n 'aircraft_registration': FlightRadar24Coordinator._get_value(flight, ['aircraft', 'registration']),\n 'aircraft_photo_small': FlightRadar24Coordinator._get_value(flight,\n ['aircraft', 'images', 'thumbnails', 0, 'src']),\n 'aircraft_model': FlightRadar24Coordinator._get_value(flight, ['aircraft', 'model', 'text']),\n 'aircraft_code': FlightRadar24Coordinator._get_value(flight, ['aircraft', 'model', 'code']),\n 'airline': FlightRadar24Coordinator._get_value(flight, ['airline', 'name']),\n 'airline_short': FlightRadar24Coordinator._get_value(flight, ['airline', 'short']),\n 'airline_iata': FlightRadar24Coordinator._get_value(flight, ['airline', 'code', 'iata']),\n 'airline_icao': FlightRadar24Coordinator._get_value(flight, ['airline', 'code', 'icao']),\n 'airport_origin_name': FlightRadar24Coordinator._get_value(flight, ['airport', 'origin', 'name']),\n 'airport_origin_code_iata': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'origin', 'code', 'iata']),\n 'airport_origin_code_icao': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'origin', 'code', 'icao']),\n 'airport_origin_country_name': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'origin', 'position',\n 'country', 'name']),\n 'airport_origin_country_code': FlightRadar24Coordinator._get_country_code(\n FlightRadar24Coordinator._get_value(flight,['airport', 'origin', 'position', 'country', 'code'])),\n 'airport_origin_city': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'origin', 'position', 'region',\n 'city']),\n 'airport_destination_name': FlightRadar24Coordinator._get_value(flight, ['airport', 'destination', 'name']),\n 'airport_destination_code_iata': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'destination', 'code',\n 'iata']),\n 'airport_destination_code_icao': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'destination', 'code',\n 'icao']),\n 'airport_destination_country_name': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'destination',\n 'position', 'country',\n 'name']),\n 'airport_destination_country_code': FlightRadar24Coordinator._get_country_code(\n FlightRadar24Coordinator._get_value(flight,\n ['airport', 'destination', 'position', 'country', 'code'])),\n 'airport_destination_city': FlightRadar24Coordinator._get_value(flight,\n ['airport', 'destination', 'position',\n 'region', 'city']),\n 'time_scheduled_departure': FlightRadar24Coordinator._get_value(flight, ['time', 'scheduled', 'departure']),\n 'time_scheduled_arrival': FlightRadar24Coordinator._get_value(flight, ['time', 'scheduled', 'arrival']),\n 'time_real_departure': FlightRadar24Coordinator._get_value(flight, ['time', 'real', 'departure']),\n 'time_real_arrival': FlightRadar24Coordinator._get_value(flight, ['time', 'real', 'arrival']),\n }"
}
] | from dataclasses import dataclass
from collections.abc import Callable
from typing import Any
from homeassistant.components.sensor import (
SensorStateClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from .const import DOMAIN
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .coordinator import FlightRadar24Coordinator | 2,721 |
@dataclass
class FlightRadar24SensorRequiredKeysMixin:
value: Callable[[FlightRadar24Coordinator], Any]
attributes: Callable[[FlightRadar24Coordinator], Any]
@dataclass
class TFlightRadar24SensorEntityDescription(SensorEntityDescription, FlightRadar24SensorRequiredKeysMixin):
"""A class that describes sensor entities."""
SENSOR_TYPES: tuple[TFlightRadar24SensorEntityDescription, ...] = (
TFlightRadar24SensorEntityDescription(
key="in_area",
name="Current in area",
icon="mdi:airplane",
state_class=SensorStateClass.TOTAL,
value=lambda coord: len(coord.tracked) if coord.tracked is not None else 0,
attributes=lambda coord: {'flights': [coord.tracked[x] for x in coord.tracked]},
),
TFlightRadar24SensorEntityDescription(
key="entered",
name="Entered area",
icon="mdi:airplane",
state_class=SensorStateClass.TOTAL,
value=lambda coord: len(coord.entered),
attributes=lambda coord: {'flights': coord.entered},
),
TFlightRadar24SensorEntityDescription(
key="exited",
name="Exited area",
icon="mdi:airplane",
state_class=SensorStateClass.TOTAL,
value=lambda coord: len(coord.exited),
attributes=lambda coord: {'flights': coord.exited},
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
|
@dataclass
class FlightRadar24SensorRequiredKeysMixin:
value: Callable[[FlightRadar24Coordinator], Any]
attributes: Callable[[FlightRadar24Coordinator], Any]
@dataclass
class TFlightRadar24SensorEntityDescription(SensorEntityDescription, FlightRadar24SensorRequiredKeysMixin):
"""A class that describes sensor entities."""
SENSOR_TYPES: tuple[TFlightRadar24SensorEntityDescription, ...] = (
TFlightRadar24SensorEntityDescription(
key="in_area",
name="Current in area",
icon="mdi:airplane",
state_class=SensorStateClass.TOTAL,
value=lambda coord: len(coord.tracked) if coord.tracked is not None else 0,
attributes=lambda coord: {'flights': [coord.tracked[x] for x in coord.tracked]},
),
TFlightRadar24SensorEntityDescription(
key="entered",
name="Entered area",
icon="mdi:airplane",
state_class=SensorStateClass.TOTAL,
value=lambda coord: len(coord.entered),
attributes=lambda coord: {'flights': coord.entered},
),
TFlightRadar24SensorEntityDescription(
key="exited",
name="Exited area",
icon="mdi:airplane",
state_class=SensorStateClass.TOTAL,
value=lambda coord: len(coord.exited),
attributes=lambda coord: {'flights': coord.exited},
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None: | coordinator = hass.data[DOMAIN][entry.entry_id] | 0 | 2023-11-16 10:51:24+00:00 | 4k |
ej0cl6/TextEE | TextEE/models/EEQA/EDtrainer.py | [
{
"identifier": "BasicTrainer",
"path": "TextEE/models/trainer.py",
"snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass"
},
{
"identifier": "EEQAEDModel",
"path": "TextEE/models/EEQA/EDmodel.py",
"snippet": "class EEQAEDModel(nn.Module):\n def __init__(self, config, tokenizer, type_set):\n super().__init__()\n self.config = config\n self.tokenizer = tokenizer\n self.type_set = type_set\n self.generate_tagging_vocab()\n \n # base encoder\n if self.config.pretrained_model_name.startswith('bert-'):\n self.tokenizer.bos_token = self.tokenizer.cls_token\n self.tokenizer.eos_token = self.tokenizer.sep_token\n self.base_config = BertConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = BertModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n elif self.config.pretrained_model_name.startswith('roberta-'):\n self.base_config = RobertaConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = RobertaModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n elif self.config.pretrained_model_name.startswith('xlm-'):\n self.base_config = XLMRobertaConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = XLMRobertaModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n else:\n raise ValueError(f\"pretrained_model_name is not supported.\")\n \n self.base_model.resize_token_embeddings(len(self.tokenizer))\n self.base_model_dim = self.base_config.hidden_size\n self.base_model_dropout = nn.Dropout(p=self.config.base_model_dropout)\n \n # local classifiers\n self.dropout = nn.Dropout(p=self.config.linear_dropout)\n feature_dim = self.base_model_dim\n\n self.trigger_label_ffn = Linears([feature_dim, len(self.type_stoi[\"trigger\"])],\n dropout_prob=self.config.linear_dropout, \n bias=self.config.linear_bias, \n activation=self.config.linear_activation)\n \n def generate_tagging_vocab(self):\n trigger_type_itos = ['O'] + [t for t in sorted(self.type_set[\"trigger\"])]\n trigger_type_stoi = {t: i for i, t in enumerate(trigger_type_itos)}\n self.type_itos = {\"trigger\": trigger_type_itos}\n self.type_stoi = {\"trigger\": trigger_type_stoi}\n \n def token_lens_to_offsets(self, token_lens):\n \"\"\"Map token lengths to first word piece indices, used by the sentence\n encoder.\n :param token_lens (list): token lengths (word piece numbers)\n :return (list): first word piece indices (offsets)\n \"\"\"\n max_token_num = max([len(x) for x in token_lens])\n offsets = []\n for seq_token_lens in token_lens:\n seq_offsets = [0]\n for l in seq_token_lens[:-1]:\n seq_offsets.append(seq_offsets[-1] + l)\n offsets.append(seq_offsets + [-1] * (max_token_num - len(seq_offsets)))\n return offsets\n \n def token_lens_to_idxs(self, token_lens):\n \"\"\"Map token lengths to a word piece index matrix (for torch.gather) and a\n mask tensor.\n For example (only show a sequence instead of a batch):\n token lengths: [1,1,1,3,1]\n =>\n indices: [[0,0,0], [1,0,0], [2,0,0], [3,4,5], [6,0,0]]\n masks: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0],\n [0.33, 0.33, 0.33], [1.0, 0.0, 0.0]]\n Next, we use torch.gather() to select vectors of word pieces for each token,\n and average them as follows (incomplete code):\n outputs = torch.gather(bert_outputs, 1, indices) * masks\n outputs = bert_outputs.view(batch_size, seq_len, -1, self.bert_dim)\n outputs = bert_outputs.sum(2)\n :param token_lens (list): token lengths.\n :return: a index matrix and a mask tensor.\n \"\"\"\n max_token_num = max([len(x) for x in token_lens])\n max_token_len = max([max(x) for x in token_lens])\n idxs, masks = [], []\n for seq_token_lens in token_lens:\n seq_idxs, seq_masks = [], []\n offset = 0\n for token_len in seq_token_lens:\n seq_idxs.extend([i + offset for i in range(token_len)]\n + [-1] * (max_token_len - token_len))\n seq_masks.extend([1.0 / token_len] * token_len\n + [0.0] * (max_token_len - token_len))\n offset += token_len\n seq_idxs.extend([-1] * max_token_len * (max_token_num - len(seq_token_lens)))\n seq_masks.extend([0.0] * max_token_len * (max_token_num - len(seq_token_lens)))\n idxs.append(seq_idxs)\n masks.append(seq_masks)\n return idxs, masks, max_token_num, max_token_len\n \n def process_data(self, batch):\n enc_idxs = []\n enc_attn = []\n trigger_seqidxs = []\n token_lens = []\n token_nums = []\n max_token_num = max(batch.batch_token_num)\n \n for tokens, pieces, triggers, token_len, token_num in zip(batch.batch_tokens, batch.batch_pieces, batch.batch_triggers, \n batch.batch_token_lens, batch.batch_token_num):\n\n piece_id = self.tokenizer.convert_tokens_to_ids(pieces)\n \n if self.config.question_type == \"verb\":\n question = \"verb\"\n question_pieces = self.tokenizer.tokenize(question, is_split_into_words=True)\n question_idx = self.tokenizer.convert_tokens_to_ids(question_pieces)\n \n enc_idx = [self.tokenizer.convert_tokens_to_ids(self.tokenizer.bos_token)] + question_idx + \\\n [self.tokenizer.convert_tokens_to_ids(self.tokenizer.sep_token)] + piece_id + [self.tokenizer.convert_tokens_to_ids(self.tokenizer.eos_token)]\n \n enc_idxs.append(enc_idx)\n enc_attn.append([1]*len(enc_idx))\n self.question_offset = len(question_idx) + 2\n \n token_lens.append(token_len)\n token_nums.append(token_num)\n \n labels = ['O'] * token_num\n for trigger in triggers:\n if labels[trigger[0]] == 'O':\n labels[trigger[0]] = trigger[2]\n \n trigger_seqidxs.append([self.type_stoi[\"trigger\"][s] for s in labels] + [-100] * (max_token_num-len(tokens)))\n\n max_len = max([len(enc_idx) for enc_idx in enc_idxs])\n enc_idxs = torch.LongTensor([enc_idx + [self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)]*(max_len-len(enc_idx)) for enc_idx in enc_idxs])\n enc_attn = torch.LongTensor([enc_att + [0]*(max_len-len(enc_att)) for enc_att in enc_attn])\n enc_idxs = enc_idxs.cuda()\n enc_attn = enc_attn.cuda()\n trigger_seqidxs = torch.cuda.LongTensor(trigger_seqidxs)\n return enc_idxs, enc_attn, trigger_seqidxs, token_lens, torch.cuda.LongTensor(token_nums)\n \n def encode(self, piece_idxs, attention_masks, token_lens):\n \"\"\"Encode input sequences with BERT\n :param piece_idxs (LongTensor): word pieces indices\n :param attention_masks (FloatTensor): attention mask\n :param token_lens (list): token lengths\n \"\"\"\n batch_size, _ = piece_idxs.size()\n all_base_model_outputs = self.base_model(piece_idxs, attention_mask=attention_masks)\n base_model_outputs = all_base_model_outputs[0]\n if self.config.multi_piece_strategy == 'first':\n # select the first piece for multi-piece words\n offsets = token_lens_to_offsets(token_lens)\n offsets = piece_idxs.new(offsets) # batch x max_token_num\n # + 1 because the first vector is for [CLS]\n offsets = offsets.unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + self.question_offset\n base_model_outputs = torch.gather(base_model_outputs, 1, offsets)\n elif self.config.multi_piece_strategy == 'average':\n # average all pieces for multi-piece words\n idxs, masks, token_num, token_len = self.token_lens_to_idxs(token_lens)\n idxs = piece_idxs.new(idxs).unsqueeze(-1).expand(batch_size, -1, self.base_model_dim) + self.question_offset\n masks = base_model_outputs.new(masks).unsqueeze(-1)\n base_model_outputs = torch.gather(base_model_outputs, 1, idxs) * masks\n base_model_outputs = base_model_outputs.view(batch_size, token_num, token_len, self.base_model_dim)\n base_model_outputs = base_model_outputs.sum(2)\n else:\n raise ValueError(f'Unknown multi-piece token handling strategy: {self.config.multi_piece_strategy}')\n base_model_outputs = self.base_model_dropout(base_model_outputs)\n return base_model_outputs\n\n def forward(self, batch):\n # process data\n enc_idxs, enc_attn, trigger_seqidxs, token_lens, token_nums = self.process_data(batch)\n \n # encoding\n base_model_outputs = self.encode(enc_idxs, enc_attn, token_lens)\n logits = self.trigger_label_ffn(base_model_outputs)\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), trigger_seqidxs.view(-1))\n \n return loss\n \n def predict(self, batch):\n self.eval()\n with torch.no_grad():\n # process data\n enc_idxs, enc_attn, _, token_lens, token_nums = self.process_data(batch)\n \n # encoding\n base_model_outputs = self.encode(enc_idxs, enc_attn, token_lens)\n logits = self.trigger_label_ffn(base_model_outputs)\n preds = logits.argmax(dim=-1)\n \n preds = preds.cpu().numpy()\n pred_triggers = []\n for pred, token_num in zip(preds, token_nums):\n pred_trigger = []\n for i, t in enumerate(pred):\n if i >= token_num:\n break\n if t == 0:\n continue\n pred_trigger.append((i, i+1, self.type_itos[\"trigger\"][t]))\n pred_triggers.append(pred_trigger)\n self.train()\n return pred_triggers"
}
] | import os, sys, logging, tqdm, pprint
import torch
import numpy as np
import ipdb
from collections import namedtuple
from transformers import RobertaTokenizer, AutoTokenizer, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader
from torch.optim import AdamW
from ..trainer import BasicTrainer
from .EDmodel import EEQAEDModel
from scorer import compute_ED_scores, print_scores | 3,002 |
logger = logging.getLogger(__name__)
EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_pieces', 'batch_token_lens', 'batch_token_num', 'batch_text', 'batch_triggers']
EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields))
def ED_collate_fn(batch):
return EDBatch(
batch_doc_id=[instance["doc_id"] for instance in batch],
batch_wnd_id=[instance["wnd_id"] for instance in batch],
batch_tokens=[instance["tokens"] for instance in batch],
batch_pieces=[instance["pieces"] for instance in batch],
batch_token_lens=[instance["token_lens"] for instance in batch],
batch_token_num=[instance["token_num"] for instance in batch],
batch_text=[instance["text"] for instance in batch],
batch_triggers=[instance["triggers"] for instance in batch],
)
|
logger = logging.getLogger(__name__)
EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_pieces', 'batch_token_lens', 'batch_token_num', 'batch_text', 'batch_triggers']
EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields))
def ED_collate_fn(batch):
return EDBatch(
batch_doc_id=[instance["doc_id"] for instance in batch],
batch_wnd_id=[instance["wnd_id"] for instance in batch],
batch_tokens=[instance["tokens"] for instance in batch],
batch_pieces=[instance["pieces"] for instance in batch],
batch_token_lens=[instance["token_lens"] for instance in batch],
batch_token_num=[instance["token_num"] for instance in batch],
batch_text=[instance["text"] for instance in batch],
batch_triggers=[instance["triggers"] for instance in batch],
)
| class EEQAEDTrainer(BasicTrainer): | 0 | 2023-11-15 21:32:56+00:00 | 4k |
isce-framework/snaphu-py | src/snaphu/_unwrap.py | [
{
"identifier": "run_snaphu",
"path": "src/snaphu/_snaphu.py",
"snippet": "def run_snaphu(config_file: str | os.PathLike[str]) -> None:\n \"\"\"\n Run SNAPHU with the specified config file.\n\n Parameters\n ----------\n config_file : path-like\n The file path of a text file storing configuration parameters to pass to SNAPHU.\n \"\"\"\n if not Path(config_file).is_file():\n errmsg = f\"config file not found: {config_file}\"\n raise FileNotFoundError(errmsg)\n\n with get_snaphu_executable() as snaphu:\n args = [os.fspath(snaphu), \"-f\", os.fspath(config_file)]\n try:\n subprocess.run(args, stderr=subprocess.PIPE, check=True, text=True)\n except subprocess.CalledProcessError as e:\n errmsg = e.stderr.strip()\n raise RuntimeError(errmsg) from e"
},
{
"identifier": "BlockIterator",
"path": "src/snaphu/_util.py",
"snippet": "class BlockIterator(Iterable[tuple[slice, ...]]):\n \"\"\"\n An iterable over chunks of an N-dimensional array.\n\n `BlockIterator` represents a partitioning of a multidimensional array into\n regularly-sized non-overlapping blocks. Each block is represented by an index\n expression (i.e. a tuple of `slice` objects) that can be used to access the\n corresponding block of data from the partitioned array. The full set of blocks spans\n the entire array.\n\n Iterating over a `BlockIterator` object yields each block in unspecified order.\n \"\"\"\n\n shape: tuple[int, ...]\n \"\"\"tuple of int : The shape of the array to be partitioned into blocks.\"\"\"\n chunks: tuple[int, ...]\n \"\"\"\n tuple of int : The shape of a typical block. The last block along each axis may be\n smaller.\n \"\"\"\n\n def __init__(self, shape: int | Iterable[int], chunks: int | Iterable[int]):\n \"\"\"\n Construct a new `BlockIterator` object.\n\n Parameters\n ----------\n shape : int or iterable of int\n The shape of the array to be partitioned into blocks. Each dimension must be\n > 0.\n chunks : int or iterable of int\n The shape of a typical block. Must be the same length as `shape`. Each chunk\n dimension must be > 0.\n \"\"\"\n # Normalize `shape` and `chunks` into tuples of ints.\n shape = as_tuple_of_int(shape)\n chunks = as_tuple_of_int(chunks)\n\n if len(chunks) != len(shape):\n errmsg = (\n \"size mismatch: shape and chunks must have the same number of elements,\"\n f\" instead got len(shape) != len(chunks) ({len(shape)} !=\"\n f\" {len(chunks)})\"\n )\n raise ValueError(errmsg)\n\n if not all(n > 0 for n in shape):\n errmsg = f\"shape elements must all be > 0, instead got {shape}\"\n raise ValueError(errmsg)\n if any(n <= 0 for n in chunks):\n errmsg = f\"chunk elements must all be > 0, instead got {chunks}\"\n raise ValueError(errmsg)\n\n # XXX Workaround for `frozen=True`.\n object.__setattr__(self, \"shape\", shape)\n object.__setattr__(self, \"chunks\", chunks)\n\n def __iter__(self) -> Iterator[tuple[slice, ...]]:\n \"\"\"\n Iterate over blocks in unspecified order.\n\n Yields\n ------\n block : tuple of slice\n A tuple of slices that can be used to access the corresponding block of data\n from an array.\n \"\"\"\n # Number of blocks along each array axis.\n nblocks = ceil_divide(self.shape, self.chunks)\n\n # Iterate over blocks.\n for block_ind in itertools.product(*[range(n) for n in nblocks]):\n # Get the lower & upper index bounds for the current block.\n start = np.multiply(block_ind, self.chunks)\n stop = np.minimum(start + self.chunks, self.shape)\n\n # Yield a tuple of slice objects.\n yield tuple(itertools.starmap(slice, zip(start, stop)))"
},
{
"identifier": "scratch_directory",
"path": "src/snaphu/_util.py",
"snippet": "@contextmanager\ndef scratch_directory(\n dir_: str | os.PathLike[str] | None = None, *, delete: bool = True\n) -> Generator[Path, None, None]:\n \"\"\"\n Context manager that creates a (possibly temporary) file system directory.\n\n If `dir_` is a path-like object, a directory will be created at the specified\n file system path if it did not already exist. Otherwise, if `dir_` is None, a\n temporary directory will instead be created as though by ``tempfile.mkdtemp()``.\n\n The directory may be automatically removed from the file system upon exiting the\n context manager.\n\n Parameters\n ----------\n dir_ : path-like or None, optional\n Scratch directory path. If None, a temporary directory will be created. Defaults\n to None.\n delete : bool, optional\n If True, the directory and its contents are recursively removed from the\n file system upon exiting the context manager. Defaults to True.\n\n Yields\n ------\n pathlib.Path\n Scratch directory path. If `delete` was True, the directory will be removed from\n the file system upon exiting the context manager scope.\n \"\"\"\n if dir_ is None:\n scratchdir = Path(mkdtemp())\n else:\n scratchdir = Path(dir_)\n scratchdir.mkdir(parents=True, exist_ok=True)\n\n yield scratchdir\n\n if delete:\n shutil.rmtree(scratchdir)"
},
{
"identifier": "InputDataset",
"path": "src/snaphu/io/_dataset.py",
"snippet": "class InputDataset(Protocol):\n \"\"\"\n An array-like interface for reading input datasets.\n\n `InputDataset` defines the abstract interface that types must conform to in order\n to be valid inputs to the ``snaphu.unwrap()`` function. Such objects must export\n NumPy-like `dtype`, `shape`, and `ndim` attributes and must support NumPy-style\n slice-based indexing.\n\n See Also\n --------\n OutputDataset\n \"\"\"\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"numpy.dtype : Data-type of the array's elements.\"\"\"\n\n @property\n def shape(self) -> tuple[int, ...]:\n \"\"\"tuple of int : Tuple of array dimensions.\"\"\" # noqa: D403\n\n @property\n def ndim(self) -> int:\n \"\"\"int : Number of array dimensions.\"\"\" # noqa: D403\n\n def __getitem__(self, key: slice | tuple[slice, ...], /) -> ArrayLike:\n \"\"\"Read a block of data.\"\"\""
},
{
"identifier": "OutputDataset",
"path": "src/snaphu/io/_dataset.py",
"snippet": "class OutputDataset(Protocol):\n \"\"\"\n An array-like interface for writing output datasets.\n\n `OutputDataset` defines the abstract interface that types must conform to in order\n to be valid outputs of the ``snaphu.unwrap()`` function. Such objects must export\n NumPy-like `dtype`, `shape`, and `ndim` attributes and must support NumPy-style\n slice-based indexing.\n\n See Also\n --------\n InputDataset\n \"\"\"\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"numpy.dtype : Data-type of the array's elements.\"\"\"\n\n @property\n def shape(self) -> tuple[int, ...]:\n \"\"\"tuple of int : Tuple of array dimensions.\"\"\" # noqa: D403\n\n @property\n def ndim(self) -> int:\n \"\"\"int : Number of array dimensions.\"\"\" # noqa: D403\n\n def __setitem__(self, key: slice | tuple[slice, ...], value: np.ndarray, /) -> None:\n \"\"\"Write a block of data.\"\"\""
}
] | import io
import os
import textwrap
import numpy as np
from dataclasses import dataclass
from pathlib import Path
from tempfile import mkstemp
from typing import cast, overload
from ._snaphu import run_snaphu
from ._util import BlockIterator, scratch_directory
from .io import InputDataset, OutputDataset | 3,261 | class SnaphuConfig:
"""
SNAPHU configuration parameters.
Parameters
----------
infile : path-like
The input interferogram file path.
corrfile : path-like
The input coherence file path.
outfile : path-like
The output unwrapped phase file path.
conncompfile : path-like
The output connected component labels file path.
linelength : int
The line length, in samples, of the input interferogram data array.
ncorrlooks : float
The equivalent number of independent looks used to form the coherence data.
statcostmode : str
The statistical cost mode.
initmethod : str
The algorithm used for initializing the network solver routine.
bytemaskfile : path-like or None, optional
An optional file path of a byte mask file. If None, no mask is applied. Defaults
to None.
tiling_params : TilingParams or None, optional
Optional additional configuration parameters affecting scene tiling and parallel
processing. Defaults to None.
"""
infile: str | os.PathLike[str]
corrfile: str | os.PathLike[str]
outfile: str | os.PathLike[str]
conncompfile: str | os.PathLike[str]
linelength: int
ncorrlooks: float
statcostmode: str
initmethod: str
bytemaskfile: str | os.PathLike[str] | None = None
tiling_params: TilingParams | None = None
def to_string(self) -> str:
"""
Write SNAPHU configuration parameters to a string.
Creates a multi-line string in SNAPHU configuration file format.
Returns
-------
str
The output string.
"""
config = textwrap.dedent(f"""\
INFILE {os.fspath(self.infile)}
INFILEFORMAT COMPLEX_DATA
CORRFILE {os.fspath(self.corrfile)}
CORRFILEFORMAT FLOAT_DATA
OUTFILE {os.fspath(self.outfile)}
OUTFILEFORMAT FLOAT_DATA
CONNCOMPFILE {os.fspath(self.conncompfile)}
CONNCOMPOUTTYPE UINT
LINELENGTH {self.linelength}
NCORRLOOKS {self.ncorrlooks}
STATCOSTMODE {self.statcostmode.upper()}
INITMETHOD {self.initmethod.upper()}
""")
if self.bytemaskfile is not None:
config += f"BYTEMASKFILE {os.fspath(self.bytemaskfile)}\n"
if self.tiling_params is not None:
config += self.tiling_params.to_string()
return config
def _to_file_textio(self, file_: io.TextIOBase, /) -> None:
# Write config params to file.
s = self.to_string()
count = file_.write(s)
# Check that the full text was successfully written to the file.
if count != len(s):
errmsg = "failed to write config params to file"
raise RuntimeError(errmsg)
def _to_file_pathlike(self, file_: str | os.PathLike[str], /) -> None:
# Create the file's parent directory(ies) if they didn't already exist.
p = Path(file_)
p.parent.mkdir(parents=True, exist_ok=True)
# Write config params to file.
s = self.to_string()
p.write_text(s)
def to_file(self, file_: str | os.PathLike[str] | io.TextIOBase, /) -> None:
"""
Write SNAPHU configuration parameters to a file.
The resulting file is suitable for passing to the SNAPHU executable as a
configuration file.
Parameters
----------
file_ : path-like or file-like
The output file. May be an open text file or a file path. If the file
and any of its parent directories do not exist, they will be created. If the
path to an existing file is specified, the file will be overwritten.
"""
if isinstance(file_, io.TextIOBase):
self._to_file_textio(file_)
elif isinstance(file_, (str, os.PathLike)):
self._to_file_pathlike(file_)
else:
errmsg = (
"to_file argument must be a path-like or file-like object, instead got"
f" type={type(file_)}"
)
raise TypeError(errmsg)
def check_shapes(
| from __future__ import annotations
__all__ = [
"unwrap",
]
@dataclass(frozen=True)
class TilingParams:
"""
SNAPHU configuration parameters affecting scene tiling and parallel processing.
Parameters
----------
ntilerow, ntilecol : int, optional
Number of tiles along the row/column directions. If `ntilerow` and `ntilecol`
are both 1 (the default), the interferogram will be unwrapped as a single tile.
rowovrlp, colovrlp : int, optional
Overlap, in number of rows/columns, between neighboring tiles. Defaults to 0.
nproc : int, optional
Maximum number of child processes to spawn for parallel tile unwrapping.
Defaults to 1.
"""
ntilerow: int = 1
ntilecol: int = 1
rowovrlp: int = 0
colovrlp: int = 0
nproc: int = 1
def to_string(self) -> str:
"""
Write SNAPHU tiling parameters to a string.
Creates a multi-line string in SNAPHU configuration file format.
Returns
-------
str
The output string.
"""
return textwrap.dedent(f"""\
NTILEROW {self.ntilerow}
NTILECOL {self.ntilecol}
ROWOVRLP {self.rowovrlp}
COLOVRLP {self.colovrlp}
NPROC {self.nproc}
""")
@dataclass(frozen=True)
class SnaphuConfig:
"""
SNAPHU configuration parameters.
Parameters
----------
infile : path-like
The input interferogram file path.
corrfile : path-like
The input coherence file path.
outfile : path-like
The output unwrapped phase file path.
conncompfile : path-like
The output connected component labels file path.
linelength : int
The line length, in samples, of the input interferogram data array.
ncorrlooks : float
The equivalent number of independent looks used to form the coherence data.
statcostmode : str
The statistical cost mode.
initmethod : str
The algorithm used for initializing the network solver routine.
bytemaskfile : path-like or None, optional
An optional file path of a byte mask file. If None, no mask is applied. Defaults
to None.
tiling_params : TilingParams or None, optional
Optional additional configuration parameters affecting scene tiling and parallel
processing. Defaults to None.
"""
infile: str | os.PathLike[str]
corrfile: str | os.PathLike[str]
outfile: str | os.PathLike[str]
conncompfile: str | os.PathLike[str]
linelength: int
ncorrlooks: float
statcostmode: str
initmethod: str
bytemaskfile: str | os.PathLike[str] | None = None
tiling_params: TilingParams | None = None
def to_string(self) -> str:
"""
Write SNAPHU configuration parameters to a string.
Creates a multi-line string in SNAPHU configuration file format.
Returns
-------
str
The output string.
"""
config = textwrap.dedent(f"""\
INFILE {os.fspath(self.infile)}
INFILEFORMAT COMPLEX_DATA
CORRFILE {os.fspath(self.corrfile)}
CORRFILEFORMAT FLOAT_DATA
OUTFILE {os.fspath(self.outfile)}
OUTFILEFORMAT FLOAT_DATA
CONNCOMPFILE {os.fspath(self.conncompfile)}
CONNCOMPOUTTYPE UINT
LINELENGTH {self.linelength}
NCORRLOOKS {self.ncorrlooks}
STATCOSTMODE {self.statcostmode.upper()}
INITMETHOD {self.initmethod.upper()}
""")
if self.bytemaskfile is not None:
config += f"BYTEMASKFILE {os.fspath(self.bytemaskfile)}\n"
if self.tiling_params is not None:
config += self.tiling_params.to_string()
return config
def _to_file_textio(self, file_: io.TextIOBase, /) -> None:
# Write config params to file.
s = self.to_string()
count = file_.write(s)
# Check that the full text was successfully written to the file.
if count != len(s):
errmsg = "failed to write config params to file"
raise RuntimeError(errmsg)
def _to_file_pathlike(self, file_: str | os.PathLike[str], /) -> None:
# Create the file's parent directory(ies) if they didn't already exist.
p = Path(file_)
p.parent.mkdir(parents=True, exist_ok=True)
# Write config params to file.
s = self.to_string()
p.write_text(s)
def to_file(self, file_: str | os.PathLike[str] | io.TextIOBase, /) -> None:
"""
Write SNAPHU configuration parameters to a file.
The resulting file is suitable for passing to the SNAPHU executable as a
configuration file.
Parameters
----------
file_ : path-like or file-like
The output file. May be an open text file or a file path. If the file
and any of its parent directories do not exist, they will be created. If the
path to an existing file is specified, the file will be overwritten.
"""
if isinstance(file_, io.TextIOBase):
self._to_file_textio(file_)
elif isinstance(file_, (str, os.PathLike)):
self._to_file_pathlike(file_)
else:
errmsg = (
"to_file argument must be a path-like or file-like object, instead got"
f" type={type(file_)}"
)
raise TypeError(errmsg)
def check_shapes( | igram: InputDataset, | 3 | 2023-11-16 21:48:58+00:00 | 4k |
fofr/cog-sdxl-multi-controlnet-lora | predict.py | [
{
"identifier": "WeightsDownloader",
"path": "weights_downloader.py",
"snippet": "class WeightsDownloader:\n @staticmethod\n def download_if_not_exists(url, dest):\n if not os.path.exists(dest):\n WeightsDownloader.download(url, dest)\n\n @staticmethod\n def download(url, dest):\n start = time.time()\n print(\"downloading url: \", url)\n print(\"downloading to: \", dest)\n subprocess.check_call([\"pget\", \"-x\", url, dest], close_fds=False)\n print(\"downloading took: \", time.time() - start)"
},
{
"identifier": "WeightsManager",
"path": "weights_manager.py",
"snippet": "class WeightsManager:\n def __init__(self, predictor):\n self.predictor = predictor\n self.weights_cache = WeightsDownloadCache()\n\n def load_trained_weights(self, weights, pipe):\n from no_init import no_init_or_tensor\n\n # weights can be a URLPath, which behaves in unexpected ways\n weights = str(weights)\n if self.predictor.tuned_weights == weights:\n print(\"skipping loading .. weights already loaded\")\n return\n\n self.predictor.tuned_weights = weights\n\n local_weights_cache = self.weights_cache.ensure(weights)\n\n # load UNET\n print(\"Loading fine-tuned model\")\n self.predictor.is_lora = False\n\n maybe_unet_path = os.path.join(local_weights_cache, \"unet.safetensors\")\n if not os.path.exists(maybe_unet_path):\n print(\"Does not have Unet. assume we are using LoRA\")\n self.predictor.is_lora = True\n\n if not self.predictor.is_lora:\n print(\"Loading Unet\")\n\n new_unet_params = load_file(\n os.path.join(local_weights_cache, \"unet.safetensors\")\n )\n # this should return _IncompatibleKeys(missing_keys=[...], unexpected_keys=[])\n pipe.unet.load_state_dict(new_unet_params, strict=False)\n\n else:\n print(\"Loading Unet LoRA\")\n\n unet = pipe.unet\n\n tensors = load_file(os.path.join(local_weights_cache, \"lora.safetensors\"))\n\n unet_lora_attn_procs = {}\n name_rank_map = {}\n for tk, tv in tensors.items():\n # up is N, d\n if tk.endswith(\"up.weight\"):\n proc_name = \".\".join(tk.split(\".\")[:-3])\n r = tv.shape[1]\n name_rank_map[proc_name] = r\n\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = (\n None\n if name.endswith(\"attn1.processor\")\n else unet.config.cross_attention_dim\n )\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[\n block_id\n ]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n with no_init_or_tensor():\n module = LoRAAttnProcessor2_0(\n hidden_size=hidden_size,\n cross_attention_dim=cross_attention_dim,\n rank=name_rank_map[name],\n )\n unet_lora_attn_procs[name] = module.to(\"cuda\", non_blocking=True)\n\n unet.set_attn_processor(unet_lora_attn_procs)\n unet.load_state_dict(tensors, strict=False)\n\n # load text\n handler = TokenEmbeddingsHandler(\n [pipe.text_encoder, pipe.text_encoder_2], [pipe.tokenizer, pipe.tokenizer_2]\n )\n handler.load_embeddings(os.path.join(local_weights_cache, \"embeddings.pti\"))\n\n # load params\n with open(os.path.join(local_weights_cache, \"special_params.json\"), \"r\") as f:\n params = json.load(f)\n self.predictor.token_map = params\n\n self.predictor.tuned_model = True"
},
{
"identifier": "ControlNet",
"path": "controlnet.py",
"snippet": "class ControlNet:\n CONTROLNET_MODELS = [\n \"none\",\n \"edge_canny\",\n \"illusion\",\n \"depth_leres\",\n \"depth_midas\",\n \"soft_edge_pidi\",\n \"soft_edge_hed\",\n \"lineart\",\n \"lineart_anime\",\n \"openpose\",\n # Preprocessors without an XL model yet\n # \"straight_edge_mlsd\",\n # \"face_detector\",\n # \"content_shuffle\",\n # \"normal_bae\",\n # \"segementation_sam\",\n ]\n\n def __init__(self, predictor):\n WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE)\n self.predictor = predictor\n self.controlnet_preprocessor = None\n self.models = {}\n\n def initialize_controlnet(self, model_name):\n print(\"Initializing\", model_name)\n return ControlNetModel.from_pretrained(\n model_name, cache_dir=CONTROLNET_MODEL_CACHE, torch_dtype=torch.float16\n )\n\n def get_model(self, controlnet_name):\n if controlnet_name not in self.models:\n if controlnet_name.startswith(\"edge_\"):\n self.models[controlnet_name] = self.initialize_controlnet(\"diffusers/controlnet-canny-sdxl-1.0\")\n elif controlnet_name.startswith(\"depth_\"):\n self.models[controlnet_name] = self.initialize_controlnet(\"diffusers/controlnet-depth-sdxl-1.0-small\")\n elif controlnet_name.startswith(\"soft_edge\") or controlnet_name.startswith(\"lineart\"):\n self.models[controlnet_name] = self.initialize_controlnet(\"SargeZT/controlnet-sd-xl-1.0-softedge-dexined\")\n elif controlnet_name == \"openpose\":\n self.models[controlnet_name] = self.initialize_controlnet(\"thibaud/controlnet-openpose-sdxl-1.0\")\n elif controlnet_name == \"illusion\":\n self.models[controlnet_name] = self.initialize_controlnet(\"monster-labs/control_v1p_sdxl_qrcode_monster\")\n return self.models.get(controlnet_name)\n\n def get_models(self, controlnet_names):\n models = [\n self.get_model(controlnet_name) for controlnet_name in controlnet_names\n ]\n return list(filter(None, models))\n\n def preprocess(self, image, controlnet_name):\n # Illusion model needs no preprocessing\n if controlnet_name == \"illusion\" or controlnet_name == \"none\":\n return image\n\n if self.controlnet_preprocessor is None:\n self.controlnet_preprocessor = ControlNetPreprocessor(self.predictor)\n\n return self.controlnet_preprocessor.process_image(image, controlnet_name)\n\n @staticmethod\n def get_controlnet_names():\n return ControlNet.CONTROLNET_MODELS"
},
{
"identifier": "SizingStrategy",
"path": "sizing_strategy.py",
"snippet": "class SizingStrategy:\n def __init__(self):\n pass\n\n def get_dimensions(self, image):\n original_width, original_height = image.size\n print(\n f\"Original dimensions: Width: {original_width}, Height: {original_height}\"\n )\n resized_width, resized_height = self.get_resized_dimensions(\n original_width, original_height\n )\n print(\n f\"Dimensions to resize to: Width: {resized_width}, Height: {resized_height}\"\n )\n return resized_width, resized_height\n\n def get_allowed_dimensions(self, base=LOWEST_DIMENSION, max_dim=MAX_DIMENSION):\n \"\"\"\n Function to generate allowed dimensions optimized around a base up to a max\n \"\"\"\n allowed_dimensions = []\n for i in range(base, max_dim + 1, 64):\n for j in range(base, max_dim + 1, 64):\n allowed_dimensions.append((i, j))\n return allowed_dimensions\n\n def get_resized_dimensions(self, width, height):\n allowed_dimensions = self.get_allowed_dimensions()\n aspect_ratio = width / height\n print(f\"Aspect Ratio: {aspect_ratio:.2f}\")\n # Find the closest allowed dimensions that maintain the aspect ratio\n # and are closest to the optimum dimension\n closest_dimensions = min(\n allowed_dimensions,\n key=lambda dim: abs(dim[0] / dim[1] - aspect_ratio)\n + abs(dim[0] - OPTIMUM_DIMENSION),\n )\n return closest_dimensions\n\n def resize_images(self, images, width, height):\n return [\n img.resize((width, height)) if img is not None else None for img in images\n ]\n\n def open_image(self, image_path):\n return Image.open(str(image_path)) if image_path is not None else None\n\n def apply(\n self,\n sizing_strategy,\n width,\n height,\n image=None,\n mask=None,\n control_1_image=None,\n control_2_image=None,\n control_3_image=None,\n ):\n image_keys = [\n \"input_image\",\n \"mask_image\",\n \"controlnet_1_image\",\n \"controlnet_2_image\",\n \"controlnet_3_image\",\n ]\n image_values = [image, mask, control_1_image, control_2_image, control_3_image]\n image_dict = {\n key: self.open_image(value).convert(\"RGB\") if value is not None else None\n for key, value in zip(image_keys, image_values)\n }\n\n if sizing_strategy in image_dict:\n print(f\"Resizing based on {sizing_strategy}\")\n width, height = self.get_dimensions(image_dict[sizing_strategy])\n else:\n print(\"Using given dimensions\")\n\n resized_images = self.resize_images(\n list(image_dict.values()),\n width,\n height,\n )\n\n return width, height, resized_images"
}
] | import os
import time
import numpy as np
import torch
from typing import List, Optional
from cog import BasePredictor, Input, Path
from diffusers import (
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
PNDMScheduler,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionXLInpaintPipeline,
StableDiffusionXLControlNetPipeline,
StableDiffusionXLControlNetInpaintPipeline,
StableDiffusionXLControlNetImg2ImgPipeline,
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from transformers import CLIPImageProcessor
from weights_downloader import WeightsDownloader
from weights_manager import WeightsManager
from controlnet import ControlNet
from sizing_strategy import SizingStrategy | 3,039 |
SDXL_MODEL_CACHE = "./sdxl-cache"
REFINER_MODEL_CACHE = "./refiner-cache"
SAFETY_CACHE = "./safety-cache"
FEATURE_EXTRACTOR = "./feature-extractor"
SDXL_URL = "https://weights.replicate.delivery/default/sdxl/sdxl-vae-upcast-fix.tar"
REFINER_URL = (
"https://weights.replicate.delivery/default/sdxl/refiner-no-vae-no-encoder-1.0.tar"
)
SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar"
class KarrasDPM:
def from_config(config):
return DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True)
SCHEDULERS = {
"DDIM": DDIMScheduler,
"DPMSolverMultistep": DPMSolverMultistepScheduler,
"HeunDiscrete": HeunDiscreteScheduler,
"KarrasDPM": KarrasDPM,
"K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler,
"K_EULER": EulerDiscreteScheduler,
"PNDM": PNDMScheduler,
}
class Predictor(BasePredictor):
def load_trained_weights(self, weights, pipe):
self.weights_manager.load_trained_weights(weights, pipe)
def build_controlnet_pipeline(self, pipeline_class, controlnet_models):
pipe = pipeline_class.from_pretrained(
SDXL_MODEL_CACHE,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
vae=self.txt2img_pipe.vae,
text_encoder=self.txt2img_pipe.text_encoder,
text_encoder_2=self.txt2img_pipe.text_encoder_2,
tokenizer=self.txt2img_pipe.tokenizer,
tokenizer_2=self.txt2img_pipe.tokenizer_2,
unet=self.txt2img_pipe.unet,
scheduler=self.txt2img_pipe.scheduler,
controlnet=self.controlnet.get_models(controlnet_models),
)
pipe.to("cuda")
return pipe
def setup(self, weights: Optional[Path] = None):
"""Load the model into memory to make running multiple predictions efficient"""
start = time.time()
|
SDXL_MODEL_CACHE = "./sdxl-cache"
REFINER_MODEL_CACHE = "./refiner-cache"
SAFETY_CACHE = "./safety-cache"
FEATURE_EXTRACTOR = "./feature-extractor"
SDXL_URL = "https://weights.replicate.delivery/default/sdxl/sdxl-vae-upcast-fix.tar"
REFINER_URL = (
"https://weights.replicate.delivery/default/sdxl/refiner-no-vae-no-encoder-1.0.tar"
)
SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar"
class KarrasDPM:
def from_config(config):
return DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True)
SCHEDULERS = {
"DDIM": DDIMScheduler,
"DPMSolverMultistep": DPMSolverMultistepScheduler,
"HeunDiscrete": HeunDiscreteScheduler,
"KarrasDPM": KarrasDPM,
"K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler,
"K_EULER": EulerDiscreteScheduler,
"PNDM": PNDMScheduler,
}
class Predictor(BasePredictor):
def load_trained_weights(self, weights, pipe):
self.weights_manager.load_trained_weights(weights, pipe)
def build_controlnet_pipeline(self, pipeline_class, controlnet_models):
pipe = pipeline_class.from_pretrained(
SDXL_MODEL_CACHE,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
vae=self.txt2img_pipe.vae,
text_encoder=self.txt2img_pipe.text_encoder,
text_encoder_2=self.txt2img_pipe.text_encoder_2,
tokenizer=self.txt2img_pipe.tokenizer,
tokenizer_2=self.txt2img_pipe.tokenizer_2,
unet=self.txt2img_pipe.unet,
scheduler=self.txt2img_pipe.scheduler,
controlnet=self.controlnet.get_models(controlnet_models),
)
pipe.to("cuda")
return pipe
def setup(self, weights: Optional[Path] = None):
"""Load the model into memory to make running multiple predictions efficient"""
start = time.time() | self.sizing_strategy = SizingStrategy() | 3 | 2023-11-13 13:04:41+00:00 | 4k |
ahayler/s4c | datasets/kitti_raw/kitti_raw_dataset.py | [
{
"identifier": "apply_crop",
"path": "utils/array_operations.py",
"snippet": "def apply_crop(array, crop):\n return array[crop[0]:crop[0] + crop[2], crop[1]:crop[1] + crop[3]]"
},
{
"identifier": "get_color_aug_fn",
"path": "utils/augmentation.py",
"snippet": "def get_color_aug_fn(params):\n fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = params\n def color_aug_fn(img):\n for fn_id in fn_idx:\n if fn_id == 0 and brightness_factor is not None:\n img = F.adjust_brightness(img, brightness_factor)\n elif fn_id == 1 and contrast_factor is not None:\n img = F.adjust_contrast(img, contrast_factor)\n elif fn_id == 2 and saturation_factor is not None:\n img = F.adjust_saturation(img, saturation_factor)\n elif fn_id == 3 and hue_factor is not None:\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n return color_aug_fn"
}
] | import os
import time
import cv2
import numpy as np
import torch
from collections import Counter
from pathlib import Path
from torch.utils.data import Dataset
from torchvision.transforms import ColorJitter
from utils.array_operations import apply_crop
from utils.augmentation import get_color_aug_fn | 2,545 |
R_rect = np.eye(4, dtype=np.float32)
R_rect[:3, :3] = cam_calib_file_data['R_rect_00'].reshape(3, 3)
T_v2c = np.hstack((velo_calib_file_data['R'].reshape(3, 3), velo_calib_file_data['T'][..., np.newaxis]))
T_v2c = np.vstack((T_v2c, np.array([0, 0, 0, 1.0], dtype=np.float32)))
P_v2cl = P_rect_l @ R_rect @ T_v2c
P_v2cr = P_rect_r @ R_rect @ T_v2c
# Compute the rectified extrinsics from cam0 to camN
T_l = np.eye(4, dtype=np.float32)
T_l[0, 3] = P_rect_l[0, 3] / P_rect_l[0, 0]
T_r = np.eye(4, dtype=np.float32)
T_r[0, 3] = P_rect_r[0, 3] / P_rect_r[0, 0]
K = P_rect_l[:3, :3]
if keep_aspect_ratio:
r_orig = im_size[0] / im_size[1]
r_target = target_image_size[0] / target_image_size[1]
if r_orig >= r_target:
new_height = r_target * im_size[1]
crop_height = im_size[0] - ((im_size[0] - new_height) // 2) * 2
box = ((im_size[0] - new_height) // 2, 0, crop_height, int(im_size[1]))
c_x = K[0, 2] / im_size[1]
c_y = (K[1, 2] - (im_size[0] - new_height) / 2) / new_height
rescale = im_size[1] / target_image_size[1]
else:
new_width = im_size[0] / r_target
crop_width = im_size[1] - ((im_size[1] - new_width) // 2) * 2
box = (0, (im_size[1] - new_width) // 2, im_size[0], crop_width)
c_x = (K[0, 2] - (im_size[1] - new_width) / 2) / new_width
c_y = K[1, 2] / im_size[0]
rescale = im_size[0] / target_image_size[0]
f_x = (K[0, 0] / target_image_size[1]) / rescale
f_y = (K[1, 1] / target_image_size[0]) / rescale
box = tuple([int(x) for x in box])
else:
f_x = K[0, 0] / im_size[1]
f_y = K[1, 1] / im_size[0]
c_x = K[0, 2] / im_size[1]
c_y = K[1, 2] / im_size[0]
box = None
# Replace old K with new K
K[0, 0] = f_x * 2.
K[1, 1] = f_y * 2.
K[0, 2] = c_x * 2 - 1
K[1, 2] = c_y * 2 - 1
# Invert to get camera to center transformation, not center to camera
T_r = np.linalg.inv(T_r)
T_l = np.linalg.inv(T_l)
calibs[day] = {
"K": K,
"T_l": T_l,
"T_r": T_r,
"P_v2cl": P_v2cl,
"P_v2cr": P_v2cr,
"crop": box
}
return calibs
@staticmethod
def _load_poses(pose_path, sequences):
poses = {}
for day, seq, _ in sequences:
pose_file = Path(pose_path) / day / f"{seq}.txt"
poses_seq = []
try:
with open(pose_file, 'r') as f:
lines = f.readlines()
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses_seq.append(T_w_cam0)
except FileNotFoundError:
print(f'Ground truth poses are not avaialble for sequence {seq}.')
poses_seq = np.array(poses_seq, dtype=np.float32)
poses[(day, seq)] = poses_seq
return poses
def load_images(self, day, seq, ids, load_left, load_right):
imgs_left = []
imgs_right = []
for id in ids:
if load_left:
img = cv2.cvtColor(cv2.imread(os.path.join(self.data_path, day, seq, "image_02", "data", f"{id:010d}.jpg")), cv2.COLOR_BGR2RGB).astype(np.float32) / 255
imgs_left += [img]
if load_right:
img = cv2.cvtColor(cv2.imread(os.path.join(self.data_path, day, seq, "image_03", "data", f"{id:010d}.jpg")), cv2.COLOR_BGR2RGB).astype(np.float32) / 255
imgs_right += [img]
return imgs_left, imgs_right
def process_img(self, img: np.array, crop_box=None, color_aug_fn=None):
if crop_box:
|
# This could also be retrieved from
BASE_SIZES = {
"2011_09_26": (375, 1242),
"2011_09_28": (370, 1224),
"2011_09_29": (374, 1238),
"2011_09_30": (370, 1226),
"2011_10_03": (376, 1241),
}
class KittiRawDataset(Dataset):
def __init__(self,
data_path: str,
pose_path: str,
split_path: str,
target_image_size=(192, 640),
return_stereo=False,
return_depth=False,
frame_count=2,
keyframe_offset=0,
dilation=1,
keep_aspect_ratio=False,
eigen_depth=True,
color_aug=False
):
self.data_path = data_path
self.pose_path = pose_path
self.split_path = split_path
self.target_image_size = target_image_size
self.return_stereo = return_stereo
self.return_depth = return_depth
self.frame_count = frame_count
self.dilation = dilation
self.keyframe_offset = keyframe_offset
self.keep_aspect_ratio = keep_aspect_ratio
self.eigen_depth = eigen_depth
self.color_aug = color_aug
self._sequences = self._get_sequences(self.data_path)
self._seq_lengths = {(day, seq): length for day, seq, length in self._sequences}
self._calibs = self._load_calibs(self.data_path, self.target_image_size, keep_aspect_ratio)
self._poses = self._load_poses(self.pose_path, self._sequences)
self._datapoints = self._load_split(self.split_path)
self._left_offset = ((self.frame_count - 1) // 2 + self.keyframe_offset) * self.dilation
self._skip = 0
self.length = len(self._datapoints)
@staticmethod
def _get_sequences(data_path):
all_sequences = []
data_path = Path(data_path)
for day in data_path.iterdir():
if not day.is_dir():
continue
day_sequences = [seq for seq in day.iterdir() if seq.is_dir()]
lengths = [len(list((seq / "image_02" / "data").iterdir())) for seq in day_sequences]
day_sequences = [(day.name, seq.name, length) for seq, length in zip(day_sequences, lengths)]
all_sequences.extend(day_sequences)
return all_sequences
@staticmethod
def _load_split(split_path):
with open(split_path, "r") as f:
lines = f.readlines()
def split_line(l):
segments = l.split(" ")
day, sequence = segments[0].split("/")
# (day, sequence, id, is_right)
return day, sequence, int(segments[1]), segments[2] == "r"
return list(map(split_line, lines))
@staticmethod
def _load_calibs(data_path, target_image_size, keep_aspect_ratio):
calibs = {}
for day in BASE_SIZES.keys():
day_folder = Path(data_path) / day
cam_calib_file = day_folder / "calib_cam_to_cam.txt"
velo_calib_file = day_folder / "calib_velo_to_cam.txt"
cam_calib_file_data = {}
with open(cam_calib_file, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
try:
cam_calib_file_data[key] = np.array([float(x) for x in value.split()], dtype=np.float32)
except ValueError:
pass
velo_calib_file_data = {}
with open(velo_calib_file, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
try:
velo_calib_file_data[key] = np.array([float(x) for x in value.split()], dtype=np.float32)
except ValueError:
pass
im_size = BASE_SIZES[day]
# Create 3x4 projection matrices
P_rect_l = np.reshape(cam_calib_file_data['P_rect_02'], (3, 4))
P_rect_r = np.reshape(cam_calib_file_data['P_rect_03'], (3, 4))
R_rect = np.eye(4, dtype=np.float32)
R_rect[:3, :3] = cam_calib_file_data['R_rect_00'].reshape(3, 3)
T_v2c = np.hstack((velo_calib_file_data['R'].reshape(3, 3), velo_calib_file_data['T'][..., np.newaxis]))
T_v2c = np.vstack((T_v2c, np.array([0, 0, 0, 1.0], dtype=np.float32)))
P_v2cl = P_rect_l @ R_rect @ T_v2c
P_v2cr = P_rect_r @ R_rect @ T_v2c
# Compute the rectified extrinsics from cam0 to camN
T_l = np.eye(4, dtype=np.float32)
T_l[0, 3] = P_rect_l[0, 3] / P_rect_l[0, 0]
T_r = np.eye(4, dtype=np.float32)
T_r[0, 3] = P_rect_r[0, 3] / P_rect_r[0, 0]
K = P_rect_l[:3, :3]
if keep_aspect_ratio:
r_orig = im_size[0] / im_size[1]
r_target = target_image_size[0] / target_image_size[1]
if r_orig >= r_target:
new_height = r_target * im_size[1]
crop_height = im_size[0] - ((im_size[0] - new_height) // 2) * 2
box = ((im_size[0] - new_height) // 2, 0, crop_height, int(im_size[1]))
c_x = K[0, 2] / im_size[1]
c_y = (K[1, 2] - (im_size[0] - new_height) / 2) / new_height
rescale = im_size[1] / target_image_size[1]
else:
new_width = im_size[0] / r_target
crop_width = im_size[1] - ((im_size[1] - new_width) // 2) * 2
box = (0, (im_size[1] - new_width) // 2, im_size[0], crop_width)
c_x = (K[0, 2] - (im_size[1] - new_width) / 2) / new_width
c_y = K[1, 2] / im_size[0]
rescale = im_size[0] / target_image_size[0]
f_x = (K[0, 0] / target_image_size[1]) / rescale
f_y = (K[1, 1] / target_image_size[0]) / rescale
box = tuple([int(x) for x in box])
else:
f_x = K[0, 0] / im_size[1]
f_y = K[1, 1] / im_size[0]
c_x = K[0, 2] / im_size[1]
c_y = K[1, 2] / im_size[0]
box = None
# Replace old K with new K
K[0, 0] = f_x * 2.
K[1, 1] = f_y * 2.
K[0, 2] = c_x * 2 - 1
K[1, 2] = c_y * 2 - 1
# Invert to get camera to center transformation, not center to camera
T_r = np.linalg.inv(T_r)
T_l = np.linalg.inv(T_l)
calibs[day] = {
"K": K,
"T_l": T_l,
"T_r": T_r,
"P_v2cl": P_v2cl,
"P_v2cr": P_v2cr,
"crop": box
}
return calibs
@staticmethod
def _load_poses(pose_path, sequences):
poses = {}
for day, seq, _ in sequences:
pose_file = Path(pose_path) / day / f"{seq}.txt"
poses_seq = []
try:
with open(pose_file, 'r') as f:
lines = f.readlines()
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses_seq.append(T_w_cam0)
except FileNotFoundError:
print(f'Ground truth poses are not avaialble for sequence {seq}.')
poses_seq = np.array(poses_seq, dtype=np.float32)
poses[(day, seq)] = poses_seq
return poses
def load_images(self, day, seq, ids, load_left, load_right):
imgs_left = []
imgs_right = []
for id in ids:
if load_left:
img = cv2.cvtColor(cv2.imread(os.path.join(self.data_path, day, seq, "image_02", "data", f"{id:010d}.jpg")), cv2.COLOR_BGR2RGB).astype(np.float32) / 255
imgs_left += [img]
if load_right:
img = cv2.cvtColor(cv2.imread(os.path.join(self.data_path, day, seq, "image_03", "data", f"{id:010d}.jpg")), cv2.COLOR_BGR2RGB).astype(np.float32) / 255
imgs_right += [img]
return imgs_left, imgs_right
def process_img(self, img: np.array, crop_box=None, color_aug_fn=None):
if crop_box: | img = apply_crop(img, crop_box) | 0 | 2023-11-12 21:53:27+00:00 | 4k |
TimbreWatermarking/TimbreWatermarking | watermarking_model/model/modules.py | [
{
"identifier": "FCBlock",
"path": "watermarking_model/model/blocks.py",
"snippet": "class FCBlock(nn.Module):\n \"\"\" Fully Connected Block \"\"\"\n\n def __init__(self, in_features, out_features, activation=None, bias=False, dropout=None, spectral_norm=False):\n super(FCBlock, self).__init__()\n self.fc_layer = nn.Sequential()\n self.fc_layer.add_module(\n \"fc_layer\",\n LinearNorm(\n in_features,\n out_features,\n bias,\n spectral_norm,\n ),\n )\n if activation is not None:\n self.fc_layer.add_module(\"activ\", activation)\n self.dropout = dropout\n\n def forward(self, x):\n x = self.fc_layer(x)\n if self.dropout is not None:\n x = F.dropout(x, self.dropout, self.training)\n return x"
},
{
"identifier": "PositionalEncoding",
"path": "watermarking_model/model/blocks.py",
"snippet": "class PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout=0.0, max_len=10000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n # pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(1), :] # [WORD_NUM, BATCH, DIM]\n return self.dropout(x)"
},
{
"identifier": "Mish",
"path": "watermarking_model/model/blocks.py",
"snippet": "class Mish(nn.Module):\n def forward(self, x):\n return x * torch.tanh(F.softplus(x))"
},
{
"identifier": "Conv1DBlock",
"path": "watermarking_model/model/blocks.py",
"snippet": "class Conv1DBlock(nn.Module):\n \"\"\" 1D Convolutional Block \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, activation=None, dropout=None, spectral_norm=False):\n super(Conv1DBlock, self).__init__()\n\n self.conv_layer = nn.Sequential()\n self.conv_layer.add_module(\n \"conv_layer\",\n ConvNorm(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=int((kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n spectral_norm=spectral_norm,\n ),\n )\n if activation is not None:\n self.conv_layer.add_module(\"activ\", activation)\n self.dropout = dropout\n\n def forward(self, x, mask=None):\n # x = x.contiguous().transpose(1, 2)\n x = self.conv_layer(x)\n\n if self.dropout is not None:\n x = F.dropout(x, self.dropout, self.training)\n\n # x = x.contiguous().transpose(1, 2)\n if mask is not None:\n x = x.masked_fill(mask.unsqueeze(-1), 0)\n\n return x"
}
] | from base64 import encode
from torch.nn import LeakyReLU
from .blocks import FCBlock, PositionalEncoding, Mish, Conv1DBlock
import torch
import torch.nn as nn | 1,743 |
class Encoder(nn.Module):
def __init__(self, model_config, msg_length, win_dim, embedding_dim, nlayers_encoder=6, transformer_drop=0.1, attention_heads=8):
super(Encoder, self).__init__()
self.encoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.dec_encoder_layer = nn.TransformerDecoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.encoder = nn.TransformerEncoder(self.encoder_layer, nlayers_encoder)
self.decoder = nn.TransformerDecoder(self.dec_encoder_layer, nlayers_encoder)
#MLP for the input audio waveform
self.wav_linear_in = FCBlock(win_dim, embedding_dim, activation=LeakyReLU(inplace=True))
self.wav_linear_out = FCBlock(embedding_dim, win_dim)
#MLP for the input wm
self.msg_linear_in = FCBlock(msg_length, embedding_dim, activation=LeakyReLU(inplace=True))
#position encoding
self.pos_encoder = PositionalEncoding(d_model=embedding_dim, dropout=transformer_drop)
def forward_encode_msg(self, x, w):
x_embedding = self.wav_linear_in(x)
p_x = self.pos_encoder(x_embedding)
encoder_out = self.encoder(p_x.transpose(0,1)).transpose(0,1) # tgt_len, bsz, embed_dim = query.size()
# Temporal Average Pooling
wav_feature = torch.mean(encoder_out, dim=1, keepdim=True) # [B, 1, H]
msg_feature = self.msg_linear_in(w)
encoded_msg = wav_feature.add(msg_feature)
return encoded_msg, encoder_out, p_x
def forward_decode_wav(self, encoded_msg, encoder_out, p_x):
# B, _, D = encoded_msg.shape
encode_msg_repeat = encoded_msg.repeat(1, p_x.size(1), 1)
embeded = self.decoder((encode_msg_repeat + p_x).transpose(0,1), memory=encoder_out.transpose(0,1)).transpose(0,1)
wav_out = self.wav_linear_out(embeded)
return wav_out
def forward(self, x, w):
encoded_msg, encoder_out, p_x = self.forward_encode_msg(x, w)
wav_out = self.forward_decode_wav(encoded_msg, encoder_out, p_x)
return wav_out
class Decoder(nn.Module):
def __init__(self, model_config, msg_length, win_dim, embedding_dim, nlayers_decoder=6, transformer_drop=0.1, attention_heads=8):
super(Decoder, self).__init__()
self.msg_decoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.msg_decoder = nn.TransformerEncoder(self.msg_decoder_layer, nlayers_decoder)
self.msg_linear_out = FCBlock(embedding_dim, msg_length)
#MLP for the input audio waveform
self.wav_linear_in = FCBlock(win_dim, embedding_dim, activation=LeakyReLU(inplace=True))
#position encoding
self.pos_encoder = PositionalEncoding(d_model=embedding_dim, dropout=transformer_drop)
def forward(self, x):
x_embedding = self.wav_linear_in(x)
p_x = self.pos_encoder(x_embedding)
encoder_out = self.msg_decoder(p_x.transpose(0,1)).transpose(0,1)
# Temporal Average Pooling
wav_feature = torch.mean(encoder_out, dim=1, keepdim=True) # [B, 1, H]
out_msg = self.msg_linear_out(wav_feature)
return out_msg
class Discriminator(nn.Module):
def __init__(self, msg_length, win_dim, embedding_dim, nlayers_decoder=6, transformer_drop=0.1, attention_heads=8):
super(Decoder, self).__init__()
self.msg_decoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.msg_decoder = nn.TransformerEncoder(self.msg_decoder_layer, nlayers_decoder)
self.msg_linear_out = FCBlock(embedding_dim, msg_length)
#MLP for the input audio waveform
|
class Encoder(nn.Module):
def __init__(self, model_config, msg_length, win_dim, embedding_dim, nlayers_encoder=6, transformer_drop=0.1, attention_heads=8):
super(Encoder, self).__init__()
self.encoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.dec_encoder_layer = nn.TransformerDecoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.encoder = nn.TransformerEncoder(self.encoder_layer, nlayers_encoder)
self.decoder = nn.TransformerDecoder(self.dec_encoder_layer, nlayers_encoder)
#MLP for the input audio waveform
self.wav_linear_in = FCBlock(win_dim, embedding_dim, activation=LeakyReLU(inplace=True))
self.wav_linear_out = FCBlock(embedding_dim, win_dim)
#MLP for the input wm
self.msg_linear_in = FCBlock(msg_length, embedding_dim, activation=LeakyReLU(inplace=True))
#position encoding
self.pos_encoder = PositionalEncoding(d_model=embedding_dim, dropout=transformer_drop)
def forward_encode_msg(self, x, w):
x_embedding = self.wav_linear_in(x)
p_x = self.pos_encoder(x_embedding)
encoder_out = self.encoder(p_x.transpose(0,1)).transpose(0,1) # tgt_len, bsz, embed_dim = query.size()
# Temporal Average Pooling
wav_feature = torch.mean(encoder_out, dim=1, keepdim=True) # [B, 1, H]
msg_feature = self.msg_linear_in(w)
encoded_msg = wav_feature.add(msg_feature)
return encoded_msg, encoder_out, p_x
def forward_decode_wav(self, encoded_msg, encoder_out, p_x):
# B, _, D = encoded_msg.shape
encode_msg_repeat = encoded_msg.repeat(1, p_x.size(1), 1)
embeded = self.decoder((encode_msg_repeat + p_x).transpose(0,1), memory=encoder_out.transpose(0,1)).transpose(0,1)
wav_out = self.wav_linear_out(embeded)
return wav_out
def forward(self, x, w):
encoded_msg, encoder_out, p_x = self.forward_encode_msg(x, w)
wav_out = self.forward_decode_wav(encoded_msg, encoder_out, p_x)
return wav_out
class Decoder(nn.Module):
def __init__(self, model_config, msg_length, win_dim, embedding_dim, nlayers_decoder=6, transformer_drop=0.1, attention_heads=8):
super(Decoder, self).__init__()
self.msg_decoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.msg_decoder = nn.TransformerEncoder(self.msg_decoder_layer, nlayers_decoder)
self.msg_linear_out = FCBlock(embedding_dim, msg_length)
#MLP for the input audio waveform
self.wav_linear_in = FCBlock(win_dim, embedding_dim, activation=LeakyReLU(inplace=True))
#position encoding
self.pos_encoder = PositionalEncoding(d_model=embedding_dim, dropout=transformer_drop)
def forward(self, x):
x_embedding = self.wav_linear_in(x)
p_x = self.pos_encoder(x_embedding)
encoder_out = self.msg_decoder(p_x.transpose(0,1)).transpose(0,1)
# Temporal Average Pooling
wav_feature = torch.mean(encoder_out, dim=1, keepdim=True) # [B, 1, H]
out_msg = self.msg_linear_out(wav_feature)
return out_msg
class Discriminator(nn.Module):
def __init__(self, msg_length, win_dim, embedding_dim, nlayers_decoder=6, transformer_drop=0.1, attention_heads=8):
super(Decoder, self).__init__()
self.msg_decoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=attention_heads, dropout=transformer_drop)
self.msg_decoder = nn.TransformerEncoder(self.msg_decoder_layer, nlayers_decoder)
self.msg_linear_out = FCBlock(embedding_dim, msg_length)
#MLP for the input audio waveform | self.wav_linear_in = FCBlock(win_dim, embedding_dim, activation=Mish()) | 2 | 2023-11-13 01:40:03+00:00 | 4k |
joseph-crowley/tool-creator | tool_user.py | [
{
"identifier": "AssistantConfig",
"path": "user_config.py",
"snippet": "class AssistantConfig:\n def __init__(self, tools_to_use=None):\n self.tools_to_use = tools_to_use or []\n self.instructions_for_assistant = 'Use the tools to accomplish the task'\n self.files_for_assistant = [] # Local file paths\n self.assistant_details = self._build_assistant_details()\n\n def _build_assistant_details(self):\n assistant_details = {\n 'build_params': {\n 'model': \"gpt-4-1106-preview\",\n 'name': \"Tool User\",\n 'description': \"Assistant to use tools made by the tool creator.\",\n 'instructions': self.instructions_for_assistant,\n 'tools': [{\"type\": \"code_interpreter\"}], # Tools will be added in the loop below\n 'file_ids': [],\n 'metadata': {},\n },\n 'file_paths': self.files_for_assistant,\n 'functions': {}, # Functions will be added in the loop below\n }\n\n # Load tools and their details\n os.makedirs('tools', exist_ok=True)\n if not self.tools_to_use:\n self.tools_to_use = [tool.split('.')[0] for tool in os.listdir('tools') if tool.endswith('.py')]\n for tool in self.tools_to_use:\n with open(f'tools/{tool}.json') as f:\n tool_details = json.load(f)\n\n with open(f'tools/{tool}.py') as f:\n tool_code = f.read()\n\n assistant_details['build_params']['tools'].append({\n \"type\": \"function\",\n \"function\": {\n \"name\": tool_details['name'],\n \"description\": tool_details['description'],\n \"parameters\": eval(tool_details['parameters']),\n },\n })\n assistant_details['functions'][tool_details['name']] = tool_code\n assistant_details['dependencies'] = assistant_details.get('dependencies', []) + tool_details['dependencies'].split()\n\n return assistant_details"
},
{
"identifier": "chat",
"path": "utils.py",
"snippet": "def chat(client, thread, assistant, functions):\n while True:\n user_message = input(\"You: \")\n\n # add user message to thread\n thread_message = client.beta.threads.messages.create(\n thread.id,\n role=\"user\",\n content=user_message,\n ) \n\n # get assistant response in thread\n run = client.beta.threads.runs.create(\n thread_id=thread.id,\n assistant_id=assistant.id,\n )\n\n # wait for run to complete\n wait_time = 0\n while True:\n run = client.beta.threads.runs.retrieve(\n thread_id=thread.id,\n run_id=run.id,\n )\n\n if run.status == \"completed\":\n break\n elif run.status == \"in_progress\":\n continue\n elif run.status == \"queued\":\n continue\n elif run.status == \"requires_action\":\n if run.required_action.type == 'submit_tool_outputs':\n tool_calls = run.required_action.submit_tool_outputs.tool_calls\n\n tool_outputs = []\n for tc in tool_calls:\n function_to_call = functions.get(tc.function.name)\n if not function_to_call:\n raise ValueError(f\"Function {tc.function.name} not found in execution environment\")\n\n # safely parse function arguments and call function\n try:\n function_args = json.loads(tc.function.arguments or {})\n function_response = function_to_call(**function_args)\n except Exception as e:\n exception_message = f\"Exception in function {tc.function.name}: {e}\"\n print(exception_message, flush=True)\n function_response = exception_message\n\n print(f\"\\nCalling function {tc.function.name} with args {function_args}\", flush=True)\n tool_outputs.append({\n \"tool_call_id\": tc.id,\n \"output\": json.dumps(function_response, default=numpy_json_serializer),\n })\n\n print(f\"Submitting tool outputs\\n {json.dumps(tool_outputs,indent=4)}\\n\\n\", flush=True)\n run = client.beta.threads.runs.submit_tool_outputs(\n thread_id=thread.id,\n run_id=run.id,\n tool_outputs=tool_outputs\n )\n else:\n input(f'Run status: {run.status}. press enter to continue, or ctrl+c to quit')\n\n if wait_time % 5 == 0:\n print(f\"waiting for run to complete...\", flush=True)\n wait_time += 1\n time.sleep(1)\n\n\n # get most recent message from thread\n thread_message = client.beta.threads.messages.list(thread.id, limit=1, order='desc').data[0]\n\n # get assistant response from message\n try:\n assistant_response = ''\n for content in thread_message.content:\n if content.type == 'text':\n assistant_response += content.text.value\n elif content.type == 'image_file':\n # get the file id\n file_id = content.image_file.file_id\n message_file = client.beta.threads.messages.files.retrieve(\n thread_id=thread.id,\n message_id=thread_message.id,\n file_id=file_id,\n )\n\n # get the image data\n image_data = client.files.retrieve_content(message_file.id)\n image_data_bytes = image_data.data\n \n # # debug output\n # print(f\"File id: {file_id}\", flush=True)\n # print(f'Message file: {message_file}\\n\\n{dir(message_file)}', flush=True)\n # print(f\"Image data: {image_data}\\n\\n{dir(image_data)}\", flush=True)\n # print(f\"Image data bytes: {image_data_bytes}\", flush=True)\n\n os.makedirs('images', exist_ok=True)\n with open(f\"images/{file_id}.png\", \"wb\") as file:\n file.write(image_data_bytes)\n print(f\"Saved image to images/{file_id}.png\", flush=True)\n\n assistant_response += f\"\\n\"\n assistant_response += '\\n'\n\n except Exception as e:\n print(f\"Exception getting assistant response: {e}\", flush=True)\n\n\n print(f\"\\n\\nBot: {assistant_response}\\n\\n\", flush=True)\n\n # continue?\n try:\n input(\"Press enter to continue chatting, or ctrl+c to stop chat\\n\")\n except KeyboardInterrupt:\n print(f\"Stopping chat\\n\" + 90*\"-\" + \"\\n\\n\", flush=True)\n break\n\n # Store information about the conversation\n thread_creation_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(thread.created_at))\n conversation = {\n 'start_date': thread_creation_time, \n 'end_date': time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\n 'assistant': assistant.name, \n 'bot_id': assistant.id,\n 'thread_id': thread.id,\n }\n\n # Check that chat_history.json exists and append the conversation\n chat_history_path = Path('assistants/chat_history.json')\n chat_history = []\n\n if chat_history_path.exists():\n with chat_history_path.open('r+') as f:\n try:\n chat_history = json.load(f)\n except JSONDecodeError:\n pass # File is empty or invalid, will overwrite with new content\n\n chat_history.append(conversation)\n\n with chat_history_path.open('w') as f:\n json.dump(chat_history, f, indent=4)"
}
] | import os
import json
from user_config import AssistantConfig as UserConfig
from utils import chat as chat_loop
from openai import OpenAI | 2,411 | """
Create an assistant using the tools from tool_creator using the assistant creation API
"""
client = OpenAI() # be sure to set your OPENAI_API_KEY environment variable
def create_tool_user(assistant_details):
# create the assistant
tool_user = client.beta.assistants.create(**assistant_details["build_params"])
print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True)
# save the assistant info to a json file
info_to_export = {
"assistant_id": tool_user.id,
"assistant_details": assistant_details,
}
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json', 'w') as f:
json.dump(info_to_export, f, indent=4)
return tool_user
def talk_to_tool_user(assistant_details):
"""
talk to the assistant to use the tools
"""
# check if json file exists
try:
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json') as f:
create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]')
if create_new == 'y':
raise Exception("User wants a new assistant")
assistant_from_json = json.load(f)
tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id'])
print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True)
print(f'Assistant {tool_user.id}:\n')
assistant_details = assistant_from_json["assistant_details"]
except:
# create the assistant first
tool_user = create_tool_user(assistant_details)
# gather the dependencies
dependencies = assistant_details["dependencies"]
if dependencies:
print(f"Installing dependencies...", flush=True)
for d in dependencies:
os.system(f"pip install {d}")
print(f"Installed dependencies\n\n" + 90*"-" + "\n\n", flush=True)
# exec the functions from the py files
os.makedirs('tools', exist_ok=True)
functions = assistant_details["functions"]
for func in functions:
print(f"Loading function {func} into execution environment", flush=True)
try:
with open('tools/' + func + '.py') as f:
exec(f.read(), globals())
functions.update({func: eval(func)})
except Exception as e:
print(f"Exception loading function {func}: {e}", flush=True)
print(f"Continuing without {func}...", flush=True)
print(f"Loaded functions\n\n" + 90*"-" + "\n\n", flush=True)
# Create thread
thread = client.beta.threads.create()
# chat with the assistant
| """
Create an assistant using the tools from tool_creator using the assistant creation API
"""
client = OpenAI() # be sure to set your OPENAI_API_KEY environment variable
def create_tool_user(assistant_details):
# create the assistant
tool_user = client.beta.assistants.create(**assistant_details["build_params"])
print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True)
# save the assistant info to a json file
info_to_export = {
"assistant_id": tool_user.id,
"assistant_details": assistant_details,
}
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json', 'w') as f:
json.dump(info_to_export, f, indent=4)
return tool_user
def talk_to_tool_user(assistant_details):
"""
talk to the assistant to use the tools
"""
# check if json file exists
try:
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json') as f:
create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]')
if create_new == 'y':
raise Exception("User wants a new assistant")
assistant_from_json = json.load(f)
tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id'])
print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True)
print(f'Assistant {tool_user.id}:\n')
assistant_details = assistant_from_json["assistant_details"]
except:
# create the assistant first
tool_user = create_tool_user(assistant_details)
# gather the dependencies
dependencies = assistant_details["dependencies"]
if dependencies:
print(f"Installing dependencies...", flush=True)
for d in dependencies:
os.system(f"pip install {d}")
print(f"Installed dependencies\n\n" + 90*"-" + "\n\n", flush=True)
# exec the functions from the py files
os.makedirs('tools', exist_ok=True)
functions = assistant_details["functions"]
for func in functions:
print(f"Loading function {func} into execution environment", flush=True)
try:
with open('tools/' + func + '.py') as f:
exec(f.read(), globals())
functions.update({func: eval(func)})
except Exception as e:
print(f"Exception loading function {func}: {e}", flush=True)
print(f"Continuing without {func}...", flush=True)
print(f"Loaded functions\n\n" + 90*"-" + "\n\n", flush=True)
# Create thread
thread = client.beta.threads.create()
# chat with the assistant | chat_loop(client, thread, tool_user, functions) | 0 | 2023-11-10 03:02:32+00:00 | 4k |
nillion-oss/tinysig | src/tinysig/tecdsa.py | [
{
"identifier": "add",
"path": "src/tinysig/utils.py",
"snippet": "def add(values: list[int], size: int) -> int:\ndef add_ec(points: list[EccPoint]) -> int:\ndef generate_additive_shares(secret: int, n: int, size: int) -> list[int]:\ndef multiply(values: list[int], size: int) -> int:\ndef egcd(a: int, p: int) -> int:\ndef hash(message: str, q: int):\ndef verify_dsa_signature(message: int, r: int, s: int, y: int, p: int, q: int, g: int) -> None:\ndef verify_ecdsa_signature(message: int, r: int, s: int, Y: EccPoint, q: int, G: EccPoint) -> None:\n def __init__(self, message):\n def setUp(self): \n def test_add(self):\n def test_generate_additive_shares(self):\n def test_multiply(self):\n V = u1 * G + u2 * Y\nclass VerifySignatureError(Exception):\nclass TestUtils(unittest.TestCase):"
},
{
"identifier": "DSASetup",
"path": "src/tinysig/setup.py",
"snippet": "class DSASetup:\n \"\"\"\n Dataclass representing a DSA (Digital Signature Algorithm) setup.\n\n Example:\n setup = DSASetup.generate_dsa_setup()\n \"\"\"\n\n p: int\n \"\"\"The DSA modulus.\"\"\"\n q: int\n \"\"\"The order of the subgroup.\"\"\"\n g: int\n \"\"\"A generator of the subgroup.\"\"\"\n h: int\n \"\"\"A generator of the field :math:`\\mathbb{Z}_q`.\"\"\"\n\n def generate_dsa_setup():\n \"\"\"Generate a DSA setup based on system parameters.\"\"\"\n key = DSA.generate(2048)\n g = int(key._key['g'])\n p = int(key._key['p'])\n q = int(key._key['q']) \n h = get_generator(q)\n return DSASetup(p, q, g, h)"
},
{
"identifier": "ECDSASetup",
"path": "src/tinysig/setup.py",
"snippet": "class ECDSASetup:\n \"\"\"\n Dataclass representing an ECDSA (Elliptic Curve Digital Signature Algorithm) setup.\n\n Example:\n setup = ECDSASetup.generate_ecdsa_setup()\n \"\"\"\n\n curve: str\n \"\"\"The name of the elliptic curve.\"\"\"\n p: Optional[int] = None\n \"\"\"The finite field of the elliptic curve.\"\"\"\n q: Optional[int] = None\n \"\"\"The order of the elliptic curve group.\"\"\"\n G: Optional[EccPoint] = None\n \"\"\"A base point on the elliptic curve.\"\"\"\n h: Optional[int] = None\n \"\"\"A generator of field :math:`\\mathbb{Z}_q`.\"\"\"\n\n def generate_ecdsa_setup(self):\n \"\"\"\n Generate an ECDSA setup for the specified elliptic curve.\n\n Returns:\n ECDSASetup: An instance of ECDSASetup with generated parameters.\n\n Raises:\n ValueError: If the specified curve is not supported.\n\n Example:\n >>> setup = ECDSASetup(curve='P-256').generate_ecdsa_setup()\n \"\"\"\n\n supported_curves = self.supported_curves()\n curve = self.curve\n if curve not in supported_curves:\n raise ValueError(\"{} is not one of the specified curves. \\\n Please choose one of the following curves:\\n \\\n ['P-192', 'P-224', 'P-256', 'P-384', 'P-521']\".format(curve))\n p = int(ECC._curves[curve].p)\n q = int(ECC._curves[curve].order)\n G = ECC._curves[curve].G\n h = get_generator(int(q))\n return ECDSASetup(curve, p, q, G, h)\n \n @staticmethod\n def supported_curves():\n \"\"\"\n Get a list of supported elliptic curves.\n\n Returns:\n List[str]: A list of supported elliptic curve names.\n\n Example:\n >>> supported_curves = ECDSASetup.supported_curves()\n >>> print(supported_curves)\n ['P-192', 'P-224', 'P-256', 'P-384', 'P-521']\n \"\"\"\n \n return ['P-192', 'P-224', 'P-256', 'P-384', 'P-521']\n \n def print_supported_curves(self):\n \"\"\"\n Print the list of supported elliptic curves.\n \"\"\"\n\n supported_curves = self.supported_curves()\n print(\"Supported Elliptic Curves: \", supported_curves)"
},
{
"identifier": "Network",
"path": "src/tinysig/network.py",
"snippet": "class Network:\n \"\"\"Represents a network of nodes and clients.\n \n Manages the interactions and cryptographic operations within the network, \n including sharing secrets, broadcasting values, and reconstructing shared values.\n \"\"\"\n\n nodes: List[Node]\n \"\"\"List of nodes in the network.\"\"\"\n clients: List[Client]\n \"\"\"List of clients in the network.\"\"\"\n q: int\n \"\"\"Prime field.\"\"\"\n h: int \n \"\"\"Multiplicative field generator.\"\"\"\n\n def __init__(self, N, q, h=2, C=1):\n \"\"\"\n Initialize the network with 'N' nodes, prime field 'q', field generator 'h', and 'C' clients.\n \n Parameters:\n N (int): Number of nodes in the network.\n q (int): Prime field.\n h (int): Multiplicative field generator (default is 2).\n C (int): Number of clients in the network (default is 1).\n \"\"\"\n self.nodes = [Node(i+1) for i in range(N)]\n self.clients = [Client(i+1) for i in range(C)]\n self.N = N\n self.q = q\n self.h = h\n\n def print(self):\n \"\"\"Print a readable representation of the network, including nodes and clients with their databases.\"\"\"\n print(f\"Network(N={len(self.nodes)}, q={self.q},\")\n print(\" nodes=[\")\n for node in self.nodes:\n print(f\" Node(id={node.id},\")\n print(\" shares_db={\")\n for key, value in node.shares_db.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(\" public_keys={\")\n for key, value in node.he_public_keys.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(\" open_db={\")\n for key, value in node.open_db.items():\n print(f\" {key}: {value},\")\n print(\" }\")\n print(\" )\")\n print(\" ]\\n)\")\n print(\" clients=[\")\n for client in self.clients:\n print(f\" Client(id={client.id},\")\n print(\" shares_db={\")\n for key, value in client.shares_db.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(\" public_keys={\")\n for key, value in client.he_public_keys.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(f\" private_keys={client.he_private_key},\")\n print(\" open_db={\")\n for key, value in client.open_db.items():\n print(f\" {key}: {value},\")\n print(\" }\")\n print(\" )\")\n print(\" ]\\n)\")\n\n def reconstruct_local(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None:\n \"\"\"Locally reconstruct exponent share ('exp') or base ('base') shared value.\"\"\"\n \n type_label = \"_sh_exp\" if type_share == \"exp\" else \"_sh_base\"\n p = (self.q - 1) if type_share == \"exp\" else self.q\n shares = [party.get_share(get_label+type_label+\"_node_\"+str(node.id)) for node in self.nodes]\n reconstructed = add(shares, p)\n party.set_share(reconstructed, save_label)\n\n def broadcast(self, element: int, label: str) -> None:\n \"\"\"Send element to all nodes.\"\"\"\n\n for node in self.nodes:\n node.open_db[label] = element\n\n def send(self, type_share: str, label: str, party: Union[Client, Node], delete=False) -> None:\n \"\"\"Send exponent ('exp') or base ('base') share to party.\"\"\"\n \n type_label = \"_sh_exp\" if type_share == \"exp\" else \"_sh_base\"\n for node in self.nodes:\n sh_node = node.get_share(label+type_label)\n sh_label = label+type_label+\"_node_\"+str(node.id)\n party.set_share(sh_node, sh_label)\n node.delete_share(label+type_label) if delete else None\n\n def share(self, secret: int, size: int, label: str) -> None:\n \"\"\"Share secret value with all\"\"\"\n\n shares = generate_additive_shares(secret, self.N, size)\n for node in self.nodes:\n node.set_share(shares[node.id - 1], label)\n\n def reveal(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None:\n \"\"\"Send exponent ('exp') or base ('base') share to party.\"\"\" \n \n self.send(type_share, get_label, party)\n self.reconstruct_local(type_share, get_label, save_label, party)"
},
{
"identifier": "Client",
"path": "src/tinysig/network.py",
"snippet": "class Client(Node):\n \"\"\"Represents a client node in the network, inheriting from the 'Node' class.\"\"\"\n he_private_key: int = field(default=0)"
}
] | from Crypto.Hash import SHA256
from phe import paillier
from typing import List
from .utils import add, add_ec, multiply, rand, egcd, verify_dsa_signature, verify_ecdsa_signature
from .setup import DSASetup, ECDSASetup
from .network import Network, Client | 3,566 | raise TypeError("Invalid type provided. "
"Please use either 'DSASetup' or 'ECDSASetup' types."
)
# Generate public and private keys for the paillier homomorphic encryption scheme
for i in range(C):
pub_key, priv_key = paillier.generate_paillier_keypair()
self.clients[i].he_private_key = priv_key
for node in self.nodes:
node.he_public_keys[i] = pub_key
for client in self.clients:
client.he_public_keys[i] = pub_key
def get_lambda(self, labels: list[str]) -> None:
"""
Emulates the generation of LAMBDA pairs :math:`([h^{\gamma}], [\gamma])` between all nodes.
Parameters:
labels (list[str]): A list of labels for which lambda values will be generated
and stored.
Returns:
None
"""
n = len(labels)
h = self.h
q = self.q
q_minus_one = q - 1
for l in range(n):
# Locally generate lambda
alpha = rand(q_minus_one)
h_alpha = pow(h, alpha, q)
self.share(alpha, q_minus_one, labels[l]+"_lambda_sh_exp")
self.share(h_alpha, q, labels[l]+"_lambda_sh_base")
def rss_protocol(self, size: int, label: str) -> None:
"""
Random Secret Sharing (RSS) Protocol.
This function implements a one-round RSS protocol. The goal is to share a random
secret value among a group of nodes using a specific label for the shares.
Parameters:
size (int): The maximum size of the random secret to be generated and shared.
label (str): A label to identify the shared secrets and their associated operations.
Returns:
None
"""
# Round 1
for node in self.nodes:
# Step 1: locally generate random secret
random_element = rand(size)
# Step 2: share random secret with all nodes
self.share(random_element, size, label+"sh_node_"+str(node.id))
# All local
for node in self.nodes:
# DB management
list_of_shares = [
node.get_share(label + "sh_node_" + str(other_node.id))
for other_node in self.nodes
]
# Step 3: add locally all shares
random_sum = add(list_of_shares, size)
# DB management
sh_label = label+"_sh_exp"
node.set_share(random_sum, sh_label)
if not self.debug:
[node.delete_share(label + "sh_node_" + str(other_node.id))
for other_node in self.nodes]
def pow_share_protocol(self, base_type: str, get_label: str, save_label: str) -> None:
"""
Compute a power-sharing protocol among a group of nodes.
This function implements a one-round protocol to securely compute :math:`b^{s}` where
the exponent is a secret shared element between the nodes.
Parameters:
base_type (str): The type of base used: 'exp', when base to be used is self.h;
'base', when the base to be used is self.dsa.g. Note: 'base'
option can only be use for the DSA setup.
get_label (str): The label to retrieve shares of 's' from nodes.
save_label (str): The label to save the final result to.
Returns:
None
"""
if base_type not in ["exp", "base"]:
raise ValueError("{} is not one of the specified base types.\
Please choose one of the following:\n \
['exp', 'base']".format(base_type))
prime = self.q if base_type == "exp" else self.dsa.p
# Round 1
for node in self.nodes:
# DB management
exponent = node.get_share(get_label+"_sh_"+base_type)
# Step 1: compute base^share
if base_type == "exp":
h_exp = pow(self.h, exponent, prime)
else:
h_exp = pow(self.dsa.g, exponent, prime)
# Step 2: Broadcast base^share to nodes
self.broadcast(h_exp, "pow_share_node_"+str(node.id))
# All local
for node in self.nodes:
# DB management
base_exps = [
node.get_open("pow_share_node_"+str(other_node.id))
for other_node in self.nodes
]
# Step 3: multiply locally all powers of shares
|
class ThresholdSignature(Network):
clients: List[Client]
def __init__(self, N, C, setup=None, debug=False):
self.debug = debug
if setup is None:
self.dsa = DSASetup.generate_dsa_setup()
self.setup = DSASetup
super().__init__(N, self.dsa.q, self.dsa.h)
elif type(setup) == DSASetup:
self.dsa = setup
self.setup = DSASetup
super().__init__(N, self.dsa.q, self.dsa.h)
elif type(setup) == ECDSASetup:
self.ecdsa = setup.generate_ecdsa_setup()
self.setup = ECDSASetup
super().__init__(N, self.ecdsa.q, self.ecdsa.h)
else:
raise TypeError("Invalid type provided. "
"Please use either 'DSASetup' or 'ECDSASetup' types."
)
# Generate public and private keys for the paillier homomorphic encryption scheme
for i in range(C):
pub_key, priv_key = paillier.generate_paillier_keypair()
self.clients[i].he_private_key = priv_key
for node in self.nodes:
node.he_public_keys[i] = pub_key
for client in self.clients:
client.he_public_keys[i] = pub_key
def get_lambda(self, labels: list[str]) -> None:
"""
Emulates the generation of LAMBDA pairs :math:`([h^{\gamma}], [\gamma])` between all nodes.
Parameters:
labels (list[str]): A list of labels for which lambda values will be generated
and stored.
Returns:
None
"""
n = len(labels)
h = self.h
q = self.q
q_minus_one = q - 1
for l in range(n):
# Locally generate lambda
alpha = rand(q_minus_one)
h_alpha = pow(h, alpha, q)
self.share(alpha, q_minus_one, labels[l]+"_lambda_sh_exp")
self.share(h_alpha, q, labels[l]+"_lambda_sh_base")
def rss_protocol(self, size: int, label: str) -> None:
"""
Random Secret Sharing (RSS) Protocol.
This function implements a one-round RSS protocol. The goal is to share a random
secret value among a group of nodes using a specific label for the shares.
Parameters:
size (int): The maximum size of the random secret to be generated and shared.
label (str): A label to identify the shared secrets and their associated operations.
Returns:
None
"""
# Round 1
for node in self.nodes:
# Step 1: locally generate random secret
random_element = rand(size)
# Step 2: share random secret with all nodes
self.share(random_element, size, label+"sh_node_"+str(node.id))
# All local
for node in self.nodes:
# DB management
list_of_shares = [
node.get_share(label + "sh_node_" + str(other_node.id))
for other_node in self.nodes
]
# Step 3: add locally all shares
random_sum = add(list_of_shares, size)
# DB management
sh_label = label+"_sh_exp"
node.set_share(random_sum, sh_label)
if not self.debug:
[node.delete_share(label + "sh_node_" + str(other_node.id))
for other_node in self.nodes]
def pow_share_protocol(self, base_type: str, get_label: str, save_label: str) -> None:
"""
Compute a power-sharing protocol among a group of nodes.
This function implements a one-round protocol to securely compute :math:`b^{s}` where
the exponent is a secret shared element between the nodes.
Parameters:
base_type (str): The type of base used: 'exp', when base to be used is self.h;
'base', when the base to be used is self.dsa.g. Note: 'base'
option can only be use for the DSA setup.
get_label (str): The label to retrieve shares of 's' from nodes.
save_label (str): The label to save the final result to.
Returns:
None
"""
if base_type not in ["exp", "base"]:
raise ValueError("{} is not one of the specified base types.\
Please choose one of the following:\n \
['exp', 'base']".format(base_type))
prime = self.q if base_type == "exp" else self.dsa.p
# Round 1
for node in self.nodes:
# DB management
exponent = node.get_share(get_label+"_sh_"+base_type)
# Step 1: compute base^share
if base_type == "exp":
h_exp = pow(self.h, exponent, prime)
else:
h_exp = pow(self.dsa.g, exponent, prime)
# Step 2: Broadcast base^share to nodes
self.broadcast(h_exp, "pow_share_node_"+str(node.id))
# All local
for node in self.nodes:
# DB management
base_exps = [
node.get_open("pow_share_node_"+str(other_node.id))
for other_node in self.nodes
]
# Step 3: multiply locally all powers of shares | val = multiply(base_exps, prime) | 0 | 2023-11-14 13:55:41+00:00 | 4k |
Exscientia/physicsml | src/physicsml/models/mace/modules/blocks.py | [
{
"identifier": "Activation",
"path": "src/physicsml/models/mace/modules/_activation.py",
"snippet": "class Activation(torch.nn.Module):\n r\"\"\"Scalar activation function.\n\n Odd scalar inputs require activation functions with a defined parity (odd or even).\n\n Parameters\n ----------\n irreps_in : `e3nn.o3.Irreps`\n representation of the input\n\n acts : list of function or None\n list of activation functions, `None` if non-scalar or identity\n\n Examples\n --------\n\n >>> a = Activation(\"256x0o\", [torch.abs])\n >>> a.irreps_out\n 256x0e\n\n >>> a = Activation(\"256x0o+16x1e\", [None, None])\n >>> a.irreps_out\n 256x0o+16x1e\n \"\"\"\n\n def __init__(self, irreps_in: o3.Irreps, acts: List[Optional[torch.nn.Module]]):\n super().__init__()\n irreps_in = o3.Irreps(irreps_in)\n if len(irreps_in) != len(acts):\n raise ValueError(\n f\"Irreps in and number of activation functions does not match: {len(acts), (irreps_in, acts)}\",\n )\n\n # normalize the second moment\n acts = [normalize2mom(act) if act is not None else None for act in acts]\n\n from e3nn.util._argtools import _get_device\n\n irreps_out = []\n for (mul, (l_in, p_in)), act in zip(irreps_in, acts):\n if act is not None:\n if l_in != 0:\n raise ValueError(\n \"Activation: cannot apply an activation function to a non-scalar input.\",\n )\n\n x = torch.linspace(0, 10, 256, device=_get_device(act))\n\n a1, a2 = act(x), act(-x)\n if (a1 - a2).abs().max() < 1e-5:\n p_act = 1\n elif (a1 + a2).abs().max() < 1e-5:\n p_act = -1\n else:\n p_act = 0\n\n p_out = p_act if p_in == -1 else p_in\n irreps_out.append((mul, (0, p_out)))\n\n if p_out == 0:\n raise ValueError(\n \"Activation: the parity is violated! The input scalar is odd but the activation is neither \"\n \"even nor odd.\",\n )\n else:\n irreps_out.append((mul, (l_in, p_in)))\n\n self.irreps_in = irreps_in\n self.irreps_out = o3.Irreps(irreps_out)\n self.acts = torch.nn.ModuleList(acts) # type: ignore\n assert len(self.irreps_in) == len(self.acts)\n\n self.ir_dims: List[int] = [ir.dim for _, ir in self.irreps_in]\n\n def __repr__(self) -> str:\n acts = \"\".join([\"x\" if a is not None else \" \" for a in self.acts])\n return f\"{self.__class__.__name__} [{acts}] ({self.irreps_in} -> {self.irreps_out})\"\n\n def forward(self, features: torch.Tensor, dim: int = -1) -> torch.Tensor:\n \"\"\"evaluate\n\n Parameters\n ----------\n features : `torch.Tensor`\n tensor of shape ``(...)``\n\n Returns\n -------\n `torch.Tensor`\n tensor of shape the same shape as the input\n \"\"\"\n # - PROFILER - with torch.autograd.profiler.record_function(repr(self)):\n output = []\n index = 0\n\n for i, act in enumerate(self.acts):\n ir_dim = self.ir_dims[i]\n mul, ir = self.irreps_in[i]\n\n if act is not None:\n output.append(act(features.narrow(dim, index, mul)))\n else:\n output.append(features.narrow(dim, index, mul * ir_dim))\n index += mul * ir_dim\n\n if len(output) > 1:\n return torch.cat(output, dim=dim)\n elif len(output) == 1:\n out: torch.Tensor = output[0]\n return out\n else:\n return torch.zeros_like(features)"
},
{
"identifier": "reshape_irreps",
"path": "src/physicsml/models/mace/modules/irreps_tools.py",
"snippet": "class reshape_irreps(torch.nn.Module):\n def __init__(self, irreps: o3.Irreps) -> None:\n super().__init__()\n self.irreps = o3.Irreps(irreps)\n self.dims = []\n self.muls = []\n for mul, ir in self.irreps:\n d = ir.dim\n self.dims.append(d)\n self.muls.append(mul)\n\n def forward(self, tensor: torch.Tensor) -> torch.Tensor:\n ix = 0\n out = []\n batch, _ = tensor.shape\n for mul, d in zip(self.muls, self.dims):\n field = tensor[:, ix : ix + mul * d] # [batch, sample, mul * repr]\n ix += mul * d\n field = field.reshape(batch, mul, d)\n out.append(field)\n return torch.cat(out, dim=-1)"
},
{
"identifier": "tp_out_irreps_with_instructions",
"path": "src/physicsml/models/mace/modules/irreps_tools.py",
"snippet": "def tp_out_irreps_with_instructions(\n irreps1: o3.Irreps,\n irreps2: o3.Irreps,\n target_irreps: o3.Irreps,\n) -> Tuple[o3.Irreps, List]:\n trainable = True\n\n # Collect possible irreps and their instructions\n irreps_out_list: List[Tuple[int, o3.Irreps]] = []\n instructions = []\n for i, (mul, ir_in) in enumerate(irreps1):\n for j, (_, ir_edge) in enumerate(irreps2):\n for ir_out in ir_in * ir_edge: # | l1 - l2 | <= l <= l1 + l2\n if ir_out in target_irreps:\n k = len(irreps_out_list) # instruction index\n irreps_out_list.append((mul, ir_out))\n instructions.append((i, j, k, \"uvu\", trainable))\n\n # We sort the output irreps of the tensor product so that we can simplify them\n # when they are provided to the second o3.Linear\n irreps_out = o3.Irreps(irreps_out_list)\n irreps_out, permut, _ = irreps_out.sort()\n\n # Permute the output indexes of the instructions to match the sorted irreps:\n instructions = [\n (i_in1, i_in2, permut[i_out], mode, train)\n for i_in1, i_in2, i_out, mode, train in instructions\n ]\n\n return irreps_out, instructions"
},
{
"identifier": "BesselBasis",
"path": "src/physicsml/models/mace/modules/radial.py",
"snippet": "class BesselBasis(torch.nn.Module):\n def __init__(\n self,\n r_max: float,\n num_basis: int = 8,\n trainable: bool = False,\n ) -> None:\n super().__init__()\n\n bessel_weights = torch.pi * torch.arange(1, num_basis + 1) / r_max\n\n if trainable:\n self.bessel_weights = torch.nn.Parameter(bessel_weights)\n else:\n self.register_buffer(\"bessel_weights\", bessel_weights)\n\n self.r_max = r_max\n self.prefactor = sqrt(2.0 / r_max)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor: # [..., 1]\n bessel_funcs = self.prefactor * (\n torch.sin(self.bessel_weights * x) / x\n ) # [..., num_basis]\n return bessel_funcs"
},
{
"identifier": "PolynomialCutoff",
"path": "src/physicsml/models/mace/modules/radial.py",
"snippet": "class PolynomialCutoff(torch.nn.Module):\n def __init__(self, r_max: float, p: int = 6) -> None:\n super().__init__()\n\n self.r_max = r_max\n self.p = p\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n envelope = (\n 1.0\n - ((self.p + 1.0) * (self.p + 2.0) / 2.0)\n * torch.pow(x / self.r_max, self.p)\n + self.p * (self.p + 2.0) * torch.pow(x / self.r_max, self.p + 1)\n - (self.p * (self.p + 1.0) / 2) * torch.pow(x / self.r_max, self.p + 2)\n )\n\n poly: torch.Tensor = envelope * (x < self.r_max)\n return poly"
},
{
"identifier": "SymmetricContraction",
"path": "src/physicsml/models/mace/modules/symmetric_contraction.py",
"snippet": "class SymmetricContraction(CodeGenMixin, torch.nn.Module):\n def __init__(\n self,\n irreps_in: o3.Irreps,\n irreps_out: o3.Irreps,\n correlation: Union[int, Dict[str, int]],\n irrep_normalization: Literal[\"component\", \"norm\"] = \"component\",\n path_normalization: Literal[\"element\", \"path\"] = \"element\",\n internal_weights: Optional[bool] = None,\n shared_weights: Optional[torch.Tensor] = None,\n num_elements: Optional[int] = None,\n ) -> None:\n super().__init__()\n\n assert irrep_normalization in [\"component\", \"norm\", \"none\"]\n assert path_normalization in [\"element\", \"path\", \"none\"]\n\n self.irreps_in = o3.Irreps(irreps_in)\n self.irreps_out = o3.Irreps(irreps_out)\n\n del irreps_in, irreps_out\n\n if not isinstance(correlation, dict):\n corr = correlation\n correlation = {}\n for irrep_out in self.irreps_out:\n correlation[irrep_out] = corr\n\n assert shared_weights or not internal_weights\n\n if internal_weights is None:\n internal_weights = True\n\n self.internal_weights = internal_weights\n self.shared_weights = shared_weights\n\n del internal_weights, shared_weights\n\n self.contractions = torch.nn.ModuleList()\n for irrep_out in self.irreps_out:\n self.contractions.append(\n Contraction(\n irreps_in=self.irreps_in,\n irrep_out=o3.Irreps(str(irrep_out.ir)),\n correlation=correlation[irrep_out],\n internal_weights=self.internal_weights,\n num_elements=num_elements,\n weights=self.shared_weights,\n ),\n )\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n outs = [contraction(x, y) for contraction in self.contractions]\n return torch.cat(outs, dim=-1)"
}
] | from typing import Optional
from e3nn import nn, o3
from torch_geometric.utils.scatter import scatter
from ._activation import Activation
from .irreps_tools import reshape_irreps, tp_out_irreps_with_instructions
from .radial import BesselBasis, PolynomialCutoff
from .symmetric_contraction import SymmetricContraction
import torch | 3,534 |
class NonLinearReadoutBlock(torch.nn.Module):
def __init__(
self,
irreps_in: o3.Irreps,
MLP_irreps: o3.Irreps,
irreps_out: o3.Irreps,
) -> None:
super().__init__()
self.linear_1 = o3.Linear(irreps_in=irreps_in, irreps_out=MLP_irreps)
self.non_linearity = Activation(irreps_in=MLP_irreps, acts=[torch.nn.SiLU()])
self.linear_2 = o3.Linear(irreps_in=MLP_irreps, irreps_out=irreps_out)
def forward(self, x: torch.Tensor) -> torch.Tensor: # [n_nodes, irreps] # [..., ]
x = self.linear_1(x)
x = self.non_linearity(x)
x = self.linear_2(x)
return x
class RadialEmbeddingBlock(torch.nn.Module):
def __init__(
self,
r_max: float,
num_bessel: int,
num_polynomial_cutoff: int,
) -> None:
super().__init__()
self.bessel_fn = BesselBasis(r_max=r_max, num_basis=num_bessel)
self.cutoff_fn = PolynomialCutoff(r_max=r_max, p=num_polynomial_cutoff)
self.out_dim = num_bessel
def forward(
self,
edge_lengths: torch.Tensor, # [n_edges, 1]
) -> torch.Tensor:
bessel = self.bessel_fn(edge_lengths) # [n_edges, n_basis]
cutoff = self.cutoff_fn(edge_lengths) # [n_edges, 1]
output: torch.Tensor = bessel * cutoff # [n_edges, n_basis]
return output
class NodeUpdateBlock(torch.nn.Module):
def __init__(
self,
node_attrs_irreps: o3.Irreps,
node_feats_irreps: o3.Irreps,
hidden_irreps: o3.Irreps,
residual_connection: bool,
) -> None:
super().__init__()
# net to compute W m_i
self.linear = o3.Linear(
hidden_irreps,
hidden_irreps,
internal_weights=True,
shared_weights=True,
)
if residual_connection:
# residual connection from original node attrs and node features
self.residual_connection_layer = o3.FullyConnectedTensorProduct(
node_feats_irreps,
node_attrs_irreps,
hidden_irreps,
)
else:
self.residual_connection_layer = None
def forward(
self,
m_i: torch.Tensor,
node_feats: torch.Tensor,
node_attrs: torch.Tensor,
) -> torch.Tensor:
if self.residual_connection_layer is not None:
node_feats = self.linear(m_i) + self.residual_connection_layer(
node_feats,
node_attrs,
)
else:
node_feats = self.linear(m_i)
return node_feats
class MessageBlock(torch.nn.Module):
def __init__(
self,
interaction_irreps: o3.Irreps,
node_attrs_irreps: o3.Irreps,
hidden_irreps: o3.Irreps,
correlation: int,
) -> None:
super().__init__()
# symmetric contraction to make A_i into messages m_i = W B_i
|
class NonLinearReadoutBlock(torch.nn.Module):
def __init__(
self,
irreps_in: o3.Irreps,
MLP_irreps: o3.Irreps,
irreps_out: o3.Irreps,
) -> None:
super().__init__()
self.linear_1 = o3.Linear(irreps_in=irreps_in, irreps_out=MLP_irreps)
self.non_linearity = Activation(irreps_in=MLP_irreps, acts=[torch.nn.SiLU()])
self.linear_2 = o3.Linear(irreps_in=MLP_irreps, irreps_out=irreps_out)
def forward(self, x: torch.Tensor) -> torch.Tensor: # [n_nodes, irreps] # [..., ]
x = self.linear_1(x)
x = self.non_linearity(x)
x = self.linear_2(x)
return x
class RadialEmbeddingBlock(torch.nn.Module):
def __init__(
self,
r_max: float,
num_bessel: int,
num_polynomial_cutoff: int,
) -> None:
super().__init__()
self.bessel_fn = BesselBasis(r_max=r_max, num_basis=num_bessel)
self.cutoff_fn = PolynomialCutoff(r_max=r_max, p=num_polynomial_cutoff)
self.out_dim = num_bessel
def forward(
self,
edge_lengths: torch.Tensor, # [n_edges, 1]
) -> torch.Tensor:
bessel = self.bessel_fn(edge_lengths) # [n_edges, n_basis]
cutoff = self.cutoff_fn(edge_lengths) # [n_edges, 1]
output: torch.Tensor = bessel * cutoff # [n_edges, n_basis]
return output
class NodeUpdateBlock(torch.nn.Module):
def __init__(
self,
node_attrs_irreps: o3.Irreps,
node_feats_irreps: o3.Irreps,
hidden_irreps: o3.Irreps,
residual_connection: bool,
) -> None:
super().__init__()
# net to compute W m_i
self.linear = o3.Linear(
hidden_irreps,
hidden_irreps,
internal_weights=True,
shared_weights=True,
)
if residual_connection:
# residual connection from original node attrs and node features
self.residual_connection_layer = o3.FullyConnectedTensorProduct(
node_feats_irreps,
node_attrs_irreps,
hidden_irreps,
)
else:
self.residual_connection_layer = None
def forward(
self,
m_i: torch.Tensor,
node_feats: torch.Tensor,
node_attrs: torch.Tensor,
) -> torch.Tensor:
if self.residual_connection_layer is not None:
node_feats = self.linear(m_i) + self.residual_connection_layer(
node_feats,
node_attrs,
)
else:
node_feats = self.linear(m_i)
return node_feats
class MessageBlock(torch.nn.Module):
def __init__(
self,
interaction_irreps: o3.Irreps,
node_attrs_irreps: o3.Irreps,
hidden_irreps: o3.Irreps,
correlation: int,
) -> None:
super().__init__()
# symmetric contraction to make A_i into messages m_i = W B_i | self.symmetric_contractions = SymmetricContraction( | 5 | 2023-11-10 13:54:53+00:00 | 4k |
naver-ai/scob | utils/config_manager.py | [
{
"identifier": "misc",
"path": "utils/misc.py",
"snippet": "def get_node_rank():\ndef get_local_rank():\ndef is_rank_zero():\ndef cpu_count():\ndef get_file(dataset_path, prefix, postfix, ext):\ndef is_otor(task_name, or_oracle=False, oracle=False):"
},
{
"identifier": "AVAILABLE_TASKS",
"path": "utils/constants.py",
"snippet": "AVAILABLE_TASKS = {\n DecoderTypes.TRANSFORMER: {\n Tasks.OCR_READ,\n Tasks.TEXT_READ,\n Tasks.DONUT_KIE,\n Tasks.OCR_READ_2HEAD,\n Tasks.OCR_READ_TEXTINSTANCEPADDING,\n Tasks.TABLE_PARSING,\n Tasks.OTOR,\n Tasks.OTOR_ORACLE,\n },\n}"
},
{
"identifier": "DecoderTypes",
"path": "utils/constants.py",
"snippet": "class DecoderTypes:\n TRANSFORMER = \"transformer_decoder\""
},
{
"identifier": "HeadTypes",
"path": "utils/constants.py",
"snippet": "class HeadTypes:\n TWO_HEAD = \"2head\"\n BASE = \"base\""
},
{
"identifier": "Seperators",
"path": "utils/constants.py",
"snippet": "class Seperators:\n DTD = \"||\""
},
{
"identifier": "Tasks",
"path": "utils/constants.py",
"snippet": "class Tasks:\n # base head tasks\n OCR_READ = \"ocr_read\"\n TEXT_READ = \"text_read\"\n DONUT_KIE = \"donut_kie\"\n OCR_READ_TEXTINSTANCEPADDING = \"ocr_read_TextInstancePadding\"\n TABLE_PARSING = \"table_parsing\"\n OTOR = \"otor\"\n OTOR_ORACLE = \"otor_oracle\"\n\n # 2head tasks\n OCR_READ_2HEAD = \"ocr_read_2head\""
},
{
"identifier": "cpu_count",
"path": "utils/misc.py",
"snippet": "def cpu_count():\n \"\"\"Get number of cpu\n os.cpu_count() has a problem with docker container.\n For example, we have 72 cpus. os.cpu_count() always return 72\n even if we allocate only 4 cpus for container.\n \"\"\"\n return int(subprocess.check_output(\"nproc\").decode().strip())"
},
{
"identifier": "is_otor",
"path": "utils/misc.py",
"snippet": "def is_otor(task_name, or_oracle=False, oracle=False):\n if or_oracle:\n if task_name in [\"otor\", \"otor_oracle\"]:\n return True\n elif oracle:\n if task_name == \"otor_oracle\":\n return True\n else:\n if task_name == \"otor\":\n return True\n return False"
},
{
"identifier": "Singleton",
"path": "utils/singleton.py",
"snippet": "class Singleton(type):\n _instances: Dict = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super().__call__(*args, **kwargs)\n return cls._instances[cls]\n\n def deallocate_instance(cls):\n try:\n del Singleton._instances[cls]\n except KeyError:\n pass"
}
] | import enum
import os
import pickle
import time
import torch
import torch.distributed as dist
from datetime import timedelta
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from utils import misc
from utils.constants import AVAILABLE_TASKS, DecoderTypes, HeadTypes, Seperators, Tasks
from utils.misc import cpu_count, is_otor
from utils.singleton import Singleton
| 2,971 | "This configuration should be added"
" automatically in runtime."
)
for mode in ["train", "val", "test"]:
num_devices = torch.cuda.device_count() * self.__config.train.num_nodes
if self.__config[mode].batch_size % num_devices != 0:
raise ValueError(
f"{mode} batch-size should be a multiple"
" of the number of gpu devices"
)
# check decoder_names
decoder_name_set_from_dataset_items = set()
for dataset_item in self.__config.dataset_items:
for task in dataset_item.tasks:
decoder_name_set_from_dataset_items.add(task.decoder)
decoder_name_set_from_decoders = set()
for decoder_name, decoder_cfg in self.__config.model.decoders.items():
decoder_name_set_from_decoders.add(decoder_name)
assert decoder_name.startswith(decoder_cfg.type)
if decoder_name_set_from_dataset_items != decoder_name_set_from_decoders:
raise ValueError(
"Please match decoder-names.\n"
f"dec-names from dataset_items: {decoder_name_set_from_dataset_items}\n"
f"dec-names from decoders: {decoder_name_set_from_decoders}"
)
# Check available tasks
for dataset_item in self.__config.dataset_items:
for task in dataset_item.tasks:
decoder_cfg = self.__config.model.decoders[task.decoder]
available_tasks = AVAILABLE_TASKS[decoder_cfg.type]
if task.name not in available_tasks:
raise ValueError(
f"Unavailable task {task.name} for decoder {task.decoder}"
)
if decoder_cfg.type == DecoderTypes.TRANSFORMER:
assert not (
(decoder_cfg.head_type == HeadTypes.TWO_HEAD)
^ task.name.endswith(HeadTypes.TWO_HEAD)
), "Two head model should solve two head task."
# Check image_normalize type in PATCH_CLS with grayscale label
if (
hasattr(decoder_cfg, "task")
and decoder_cfg.task == Tasks.PATCH_CLS
and decoder_cfg.kwargs.classification_type == "grayscale"
):
transforms_dict = task.transforms_dict
if isinstance(transforms_dict, str):
if transforms_dict not in self.__config.custom_transforms_dict:
raise ValueError(
f"{transforms_dict} is not in cfg.custom_transforms_dict"
)
transforms_dict = self.__config.custom_transforms_dict[
transforms_dict
]
assert transforms_dict["image_normalize"] == "imagenet_default"
def __change_config(self):
"""Change config like path"""
cfg = self.__config # get reference of __config
# -------------------------------------------
# for convinience (only used for evaluate.py)
if cfg.eval.dataset_name is not None and cfg.eval.task_name is not None:
cfg.dataset_items = [get_eval_dataset_item(cfg.eval)]
# -------------------------------------------
workspace = cfg.workspace_name
if workspace is None:
workspace = os.getcwd()
cfg.workspace = workspace
self.__change_data_paths()
cfg.model.resume_model_path = self.__change_weight_path(
workspace, cfg.model.resume_model_path
)
self.__change_log_dirs()
num_devices = torch.cuda.device_count() * cfg.train.num_nodes
for mode in ["train", "val", "test"]:
# set per-gpu num_workers
if cfg.debug:
cfg[mode].num_workers = 0
else:
num_workers = cfg[mode].num_workers
if num_workers == -1:
num_workers = cpu_count() * cfg.train.num_nodes
cfg[mode].num_workers = max(num_workers // num_devices, 1)
# set per-gpu batch size
new_batch_size = cfg[mode].batch_size // num_devices
cfg[mode].batch_size = new_batch_size
if mode == "train" and is_otor(cfg.dataset_items[0].tasks[0].name):
if cfg[mode].batch_size % 2 > 0:
assert (
cfg[mode].batch_size % 2 == 0
), "when use otor, batch size should be even number."
cfg[mode].batch_size = cfg[mode].batch_size // 2
if cfg.reproduce.seed == -1:
# To avoid each rank have different random_seed, we use TCPStore.
if misc.is_rank_zero():
random_seed = int(time.time()) % 10000
self.__tcp_store_server.set("random_seed", str(random_seed))
else:
random_seed = int(self.__tcp_store_cli.get("random_seed").decode())
cfg.reproduce.seed = random_seed
# Make DTD (Dataset-name, Task-name, Decoder-name) configs
dtd_dict = {}
for dataset_item in cfg.dataset_items:
dataset_name = dataset_item.name
for task in dataset_item.tasks:
| """
SCOB
Copyright (c) 2023-present NAVER Cloud Corp.
MIT license
config parser with omegaconf
"""
class FileType(enum.Enum): # pylint: disable=missing-class-docstring
YAML = 1
PICKLE = 2
class ConfigManager(metaclass=Singleton):
"""Singleton ConfigManager for project
Notes:
Do not call ConfigManager.get_instance() inside the model class.
When creating a model instance, all necessary arguments must be received as constructor arguments.
SHOULD_NOT_USE_CONFIGS (List[str]): Keys that should not be included in the configuration.
Because the corresponding configurations are applied at runtime,
It should not be given through defualt.yaml, user config, or CLI.
"""
SHOULD_NOT_USE_CONFIGS = [
"workspace",
"save_weight_dir",
"tensorboard_dir",
"dtd_dict",
]
def __init__(
self,
conf_path=None,
default_conf_path="./configs/default.yaml",
conf_type=FileType.YAML,
default_conf_type=FileType.YAML,
use_cli=True,
):
self.__config = None
self.__tcp_store_server = None
self.__tcp_store_cli = None
self.default_conf_path = default_conf_path
self.conf_path = conf_path
self.__load(default_conf_type, conf_type, use_cli)
@property
def cfg(self):
"""Return path config"""
return self.__config
@property
def model_cfg(self):
"""Return model config"""
return self.__config.model
def __load(self, default_conf_type, conf_type, use_cli):
"""load config from file"""
# Load configuration parameters
# Step 1. Default config
self.__config = self.__read_conf(self.default_conf_path, default_conf_type)
# Step 2. Config specified by __init__()
if self.conf_path:
cfg = self.__read_conf(self.conf_path, conf_type)
self.__merge_config(cfg)
if use_cli:
# Step 3. Config specified CLI's --config parameter
cfg_cli = self.__get_config_from_cli()
if "config" in cfg_cli:
cfg = OmegaConf.load(cfg_cli.config)
self.__merge_config(cfg)
# Step 4. Config specified CLI's --XYZ parameters
self.__merge_config(cfg_cli)
self.__init_tcp_store()
# Validate config content and apply a few changes
self.__check_config()
self.__change_config()
# Finalize config
OmegaConf.set_readonly(self.__config, True)
OmegaConf.set_struct(self.__config, True)
if misc.is_rank_zero():
print(OmegaConf.to_yaml(self.__config))
@staticmethod
def __read_conf(path, file_type=FileType.YAML):
if file_type is FileType.PICKLE:
with open(path, "rb") as fp:
cfg = pickle.load(fp)
elif file_type is FileType.YAML:
cfg = OmegaConf.load(path)
else:
raise ValueError("[FileType Enum]: Invalid value!")
return cfg
def save(self, path, file_type):
"""Save config file to path"""
with open(path, "wb") as fp:
if file_type is FileType.PICKLE:
pickle.dump(self.__config, fp)
fp.flush()
elif file_type is FileType.YAML:
OmegaConf.save(config=self.__config, f=fp.name)
else:
raise ValueError("[FileType Enum]: Invalid value!")
def __merge_config(self, cfg_for_overwrite):
"""omegaconf merge_config
Notes:
dataset merge 할 때 dictionary끼리 append 되기 때문에 이를 방지함.
"""
self.__config = OmegaConf.merge(self.__config, cfg_for_overwrite)
if "dataset_items" in cfg_for_overwrite:
self.__config.dataset_items = cfg_for_overwrite.dataset_items
if (
"model" in cfg_for_overwrite
and "decoders" in cfg_for_overwrite.model
and "type" in list(cfg_for_overwrite.model.decoders.values())[0]
and "kwargs" in list(cfg_for_overwrite.model.decoders.values())[0]
and "loss_func" in list(cfg_for_overwrite.model.decoders.values())[0]
):
self.__config.model.decoders = cfg_for_overwrite.model.decoders
@staticmethod
def __get_config_from_cli():
"""Get config from cli.
This function can also cover arguments with '--'
"""
cfg_cli = OmegaConf.from_cli()
cli_keys = list(cfg_cli.keys())
for cli_key in cli_keys:
if "--" in cli_key:
cfg_cli[cli_key.replace("--", "")] = cfg_cli[cli_key]
del cfg_cli[cli_key]
return cfg_cli
def __init_tcp_store(self):
ip = self.__config.tcp_store_ip
port = self.__config.tcp_store_port
time_delta = timedelta(seconds=300)
if misc.is_rank_zero():
self.__tcp_store_server = dist.TCPStore(ip, port, -1, True, time_delta)
else:
self.__tcp_store_cli = dist.TCPStore(ip, port, -1, False, time_delta)
def __check_config(self):
"""Check config"""
for key in self.SHOULD_NOT_USE_CONFIGS:
if key in self.__config.keys():
raise ValueError(
f"Do not use {key} as a configuration. \n"
"This configuration should be added"
" automatically in runtime."
)
for mode in ["train", "val", "test"]:
num_devices = torch.cuda.device_count() * self.__config.train.num_nodes
if self.__config[mode].batch_size % num_devices != 0:
raise ValueError(
f"{mode} batch-size should be a multiple"
" of the number of gpu devices"
)
# check decoder_names
decoder_name_set_from_dataset_items = set()
for dataset_item in self.__config.dataset_items:
for task in dataset_item.tasks:
decoder_name_set_from_dataset_items.add(task.decoder)
decoder_name_set_from_decoders = set()
for decoder_name, decoder_cfg in self.__config.model.decoders.items():
decoder_name_set_from_decoders.add(decoder_name)
assert decoder_name.startswith(decoder_cfg.type)
if decoder_name_set_from_dataset_items != decoder_name_set_from_decoders:
raise ValueError(
"Please match decoder-names.\n"
f"dec-names from dataset_items: {decoder_name_set_from_dataset_items}\n"
f"dec-names from decoders: {decoder_name_set_from_decoders}"
)
# Check available tasks
for dataset_item in self.__config.dataset_items:
for task in dataset_item.tasks:
decoder_cfg = self.__config.model.decoders[task.decoder]
available_tasks = AVAILABLE_TASKS[decoder_cfg.type]
if task.name not in available_tasks:
raise ValueError(
f"Unavailable task {task.name} for decoder {task.decoder}"
)
if decoder_cfg.type == DecoderTypes.TRANSFORMER:
assert not (
(decoder_cfg.head_type == HeadTypes.TWO_HEAD)
^ task.name.endswith(HeadTypes.TWO_HEAD)
), "Two head model should solve two head task."
# Check image_normalize type in PATCH_CLS with grayscale label
if (
hasattr(decoder_cfg, "task")
and decoder_cfg.task == Tasks.PATCH_CLS
and decoder_cfg.kwargs.classification_type == "grayscale"
):
transforms_dict = task.transforms_dict
if isinstance(transforms_dict, str):
if transforms_dict not in self.__config.custom_transforms_dict:
raise ValueError(
f"{transforms_dict} is not in cfg.custom_transforms_dict"
)
transforms_dict = self.__config.custom_transforms_dict[
transforms_dict
]
assert transforms_dict["image_normalize"] == "imagenet_default"
def __change_config(self):
"""Change config like path"""
cfg = self.__config # get reference of __config
# -------------------------------------------
# for convinience (only used for evaluate.py)
if cfg.eval.dataset_name is not None and cfg.eval.task_name is not None:
cfg.dataset_items = [get_eval_dataset_item(cfg.eval)]
# -------------------------------------------
workspace = cfg.workspace_name
if workspace is None:
workspace = os.getcwd()
cfg.workspace = workspace
self.__change_data_paths()
cfg.model.resume_model_path = self.__change_weight_path(
workspace, cfg.model.resume_model_path
)
self.__change_log_dirs()
num_devices = torch.cuda.device_count() * cfg.train.num_nodes
for mode in ["train", "val", "test"]:
# set per-gpu num_workers
if cfg.debug:
cfg[mode].num_workers = 0
else:
num_workers = cfg[mode].num_workers
if num_workers == -1:
num_workers = cpu_count() * cfg.train.num_nodes
cfg[mode].num_workers = max(num_workers // num_devices, 1)
# set per-gpu batch size
new_batch_size = cfg[mode].batch_size // num_devices
cfg[mode].batch_size = new_batch_size
if mode == "train" and is_otor(cfg.dataset_items[0].tasks[0].name):
if cfg[mode].batch_size % 2 > 0:
assert (
cfg[mode].batch_size % 2 == 0
), "when use otor, batch size should be even number."
cfg[mode].batch_size = cfg[mode].batch_size // 2
if cfg.reproduce.seed == -1:
# To avoid each rank have different random_seed, we use TCPStore.
if misc.is_rank_zero():
random_seed = int(time.time()) % 10000
self.__tcp_store_server.set("random_seed", str(random_seed))
else:
random_seed = int(self.__tcp_store_cli.get("random_seed").decode())
cfg.reproduce.seed = random_seed
# Make DTD (Dataset-name, Task-name, Decoder-name) configs
dtd_dict = {}
for dataset_item in cfg.dataset_items:
dataset_name = dataset_item.name
for task in dataset_item.tasks:
| dtd_key_str = Seperators.DTD.join(
| 4 | 2023-11-15 00:40:08+00:00 | 4k |
speckai/speck | src/python/speck/chat/entities.py | [
{
"identifier": "ChatLogger",
"path": "src/python/speck/chat/logger.py",
"snippet": "class ChatLogger:\n @staticmethod\n def log(log_config: \"LogConfig\", prompt: Any, model: str, response: Any, **kwargs):\n if kwargs.get(\"config\", {}).get(\"_log\", True):\n universal_format_log(\n log_config=log_config,\n prompt=prompt,\n model=model,\n response=response,\n **kwargs,\n )"
},
{
"identifier": "run_debug_websocket",
"path": "src/python/speck/debug/_debug_socket.py",
"snippet": "def run_debug_websocket(client, connector, prompt, config):\n data = asyncio.run(_websocket_client(client, connector, prompt, config))\n return data"
}
] | from abc import ABC, abstractmethod
from typing import Any, Callable, Iterator, Literal, Optional, Tuple, Union
from openai._types import NotGiven
from pydantic import BaseModel, Extra
from ..chat.logger import ChatLogger
from ..debug._debug_socket import run_debug_websocket | 2,229 | "stream": self.stream,
"_log": self._log,
"temperature": self._convert_optional(self.temperature),
"max_tokens": self._convert_optional(self.max_tokens),
"top_p": self._convert_optional(self.top_p),
"frequency_penalty": self._convert_optional(self.frequency_penalty),
"presence_penalty": self._convert_optional(self.presence_penalty),
"chat_args": self.chat_args,
}
def _convert_optional(self, value):
return None if isinstance(value, NotGiven) else value
@classmethod
def create(cls, config: ChatConfigTypes, kwargs: dict = None) -> "ChatConfig":
if isinstance(config, cls):
if kwargs is not None:
return cls(**{**config.__dict__, **kwargs})
else:
return config
elif isinstance(config, dict):
return cls(**config)
elif kwargs:
return cls(**kwargs)
else:
raise NotImplementedError
def get(self, key: str, default: Any = None) -> Any:
return getattr(self, key, default)
def convert(self, provider: str = "speck") -> "ChatConfig":
"""
Convert to another config format
"""
if provider == "openai":
return OpenAIChatConfig(
model=self.model,
stream=self.stream,
_log=self._log,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
**self._kwargs,
)
return self
def log_chat(
self,
*,
log_config: LogConfig,
prompt: Prompt,
response: Response,
provider: str = "speck",
):
config = self.convert()
ChatLogger.log(
log_config=log_config,
provider=provider,
model=str(config.model),
prompt=prompt,
response=response,
**config.chat_args,
)
def encode(self, encoding: str = "utf-8"):
return self.__str__().encode(encoding)
def __str__(self):
return f"ChatConfig(provider={self.provider}, model={self.model}, stream={self.stream}, _log={self._log}, temperature={self.temperature}, max_tokens={self.max_tokens}, top_p={self.top_p}, frequency_penalty={self.frequency_penalty}, presence_penalty={self.presence_penalty}, _kwargs={self._kwargs})"
class OpenAIChatConfig(ChatConfig):
def __init__(
self,
model: OpenAIModel,
stream: bool = False,
_log: bool = True,
temperature: Union[Optional[float], NotGiven] = NOT_GIVEN,
max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN,
top_p: Union[Optional[float], NotGiven] = NOT_GIVEN,
frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,
presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,
**config_kwargs,
):
self.model = model
self.stream = stream
self._log = _log
self.temperature = temperature
self.max_tokens = max_tokens
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self._kwargs = config_kwargs
def convert(self, provider: str = "speck") -> ChatConfig:
"""
Maps config to universal format then converts to another config format
"""
universal_config = ChatConfig(
model=self.model,
stream=self.stream,
_log=self._log,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
**self._kwargs,
)
return universal_config.convert(provider=provider)
class IChatClient(ABC):
def debug_chat(
self, prompt: "Prompt", config: "ChatConfig"
) -> ("Prompt", "ChatConfig"):
| from __future__ import annotations
# from dataclasses import dataclass
NOT_GIVEN = None
class Message(BaseModel):
role: MessageRole
content: str
class SafeDict(dict):
def __missing__(self, key):
return "{" + key + "}" # Returns the key in curly braces as a string
class Prompt(str):
messages: list[Message]
variables: Union[dict[str, str], None] = None
def to_dict(self):
return {
"messages": self.messages,
"variables": self.variables,
}
def __init__(
self,
messages: PromptTypes,
variables: Union[dict[str, str], None] = None,
**kwargs,
):
if isinstance(messages, str):
messages = [Message(role="user", content=messages)]
elif isinstance(messages, Message):
messages = [messages]
elif isinstance(messages, list):
if all(isinstance(message, Message) for message in messages):
pass
elif all(isinstance(message, dict) for message in messages):
messages = [
Message(role=message["role"], content=message["content"])
for message in messages
]
else:
raise ValueError(
f"Invalid type for messages: {type(messages)}\n{messages}"
)
self.messages = messages
self.variables = variables
super().__init__()
@classmethod
def create(
cls, messages: PromptTypes, variables: dict[str, str] = None
) -> "Prompt":
if isinstance(messages, cls):
# Todo: clone object and add variables
return messages
return cls(messages=messages, variables=variables)
@classmethod
def _read(cls, lines: str) -> "Prompt":
# Todo: add config parsing
config = {}
messages = []
current_min_spaces = 0
current_section = None
current_message = []
def add_message():
nonlocal current_message, current_min_spaces
if current_message:
messages.append(
Message(
role=current_section,
content="\n".join(
[m[current_min_spaces:] for m in current_message]
),
)
)
current_message = []
current_min_spaces = 0
for line in lines.split("\n"):
line = line.rstrip("\r")
if line.startswith("<"):
line = line.strip()
add_message()
current_section = line[1:-1].lower()
elif current_section == "config" and "=" in line:
key, value = line.split("=", 1)
config[key.strip()] = value.strip()
elif current_section in ["system", "user", "assistant"]:
min_spaces = len(line) - len(line.lstrip())
if 0 < min_spaces < current_min_spaces or current_min_spaces == 0:
current_min_spaces = min_spaces
current_message.append(line)
add_message()
return cls(messages=messages)
@classmethod
def read(cls, path: str, name: Union[str, None] = None) -> "Prompt":
with open(path, "r") as f:
if name is not None:
prompts = cls.read_all(path)
return prompts[name]
else:
return cls._read(f.read())
@classmethod
def read_all(cls, path: str) -> dict[str, "Prompt"]:
with open(path, "r") as f:
prompts = {}
lines = []
current_prompt_name = None
current_min_spaces = -1
for line in f:
line = line.rstrip("\n").rstrip("\r")
if line.lstrip().startswith("<"):
min_spaces = len(line) - len(line.lstrip())
stripped_line = line.strip()
if stripped_line.startswith("<prompt") and min_spaces == 0:
if current_prompt_name:
prompts[current_prompt_name] = cls._read(
"\n".join([m[current_min_spaces:] for m in lines])
)
current_prompt_name = stripped_line[8:-1].strip()
current_min_spaces = -1
lines = []
elif stripped_line.startswith("</prompt>") and min_spaces == 0:
prompts[current_prompt_name] = cls._read(
"\n".join([m[current_min_spaces:] for m in lines])
)
current_prompt_name = None
current_min_spaces = -1
lines = []
else:
lines.append(line)
if current_min_spaces == -1 or min_spaces < current_min_spaces:
current_min_spaces = min_spaces
else:
lines.append(line)
return prompts
def _file(self):
file = []
for message in self.messages:
file.append(f"<{message.role.lower()}>")
for line in message.content.split("\n"):
file.append(" " * 4 + line)
return "\n".join(file)
@classmethod
def write(cls, prompt: Union["Prompt", dict[str, "Prompt"]], path: str):
with open(path, "w") as f:
if isinstance(prompt, dict):
content = ""
for name, prompt in prompt.items():
content += f"<prompt {name}>\n"
content += "\n".join(
[" " * 4 + line for line in prompt._file().split("\n")]
)
content += "\n</prompt>\n\n"
f.write(content.strip())
else:
f.write(prompt._file())
def __new__(
cls,
messages: PromptTypes,
**kwargs,
):
# Todo: Handle string, Message, and list[Message]
instance = super(Prompt, cls).__new__(cls, str(messages))
return instance
@classmethod
def from_openai(cls, messages: list[dict[str, str]]):
return cls(
messages=[
Message(role=message["role"], content=message["content"])
for message in messages
]
)
def to_list(self):
return [
{
"role": message.role,
"content": message.content.format_map(SafeDict(self.variables or {})),
}
for message in self.messages
]
def to_dict(self):
return {
"messages": [
{"role": message.role, "content": message.content}
for message in self.messages
],
"variables": self.variables or {},
}
@staticmethod
def _apply_variables(
messages: list[Message], variables: dict[str, str]
) -> list[Message]:
return [
Message(
role=message.role,
content=message.content.format_map(SafeDict(variables or {})),
)
for message in messages
]
def _check_duplicate_keys(self, other_variables: dict[str, str]) -> dict[str, str]:
duplicate_keys = set((self.variables or {}).keys()).intersection(
set((other_variables or {}).keys())
)
return {
key: self.variables[key]
for key in duplicate_keys
if self.variables[key] != other_variables[key]
}
def _remove_duplicate_keys_from_messages(
self, other_variables: dict[str, str]
) -> list[Message]:
messages = self.messages
applied_variables = self._check_duplicate_keys(other_variables)
if len(applied_variables) > 0:
messages = self._apply_variables(self.messages, applied_variables)
return messages
def format(self, *args, **kwargs):
# return self.__class__(
# messages=[
# Message(
# role=message.role, content=message.content.format(*args, **kwargs)
# )
# for message in self.messages
# ]
# )
messages = self._remove_duplicate_keys_from_messages(kwargs)
return self.__class__(
messages=[
Message(role=message.role, content=message.content)
for message in messages
],
variables={**SafeDict(self.variables or {}), **kwargs},
)
def __add__(self, other):
if isinstance(other, Message):
return self.__class__(
messages=self.messages + [other], variables={**(self.variables or {})}
)
elif isinstance(other, Prompt):
# Check if there are duplicate keys
messages = self._remove_duplicate_keys_from_messages(other.variables or {})
return self.__class__(
messages=messages + other.messages,
variables={
**SafeDict(self.variables or {}),
**SafeDict(other.variables or {}),
},
)
else:
raise NotImplementedError
def __str__(self):
return (
"\n".join(
[f"{message.role}: {message.content}" for message in self.messages]
)
+ "\n"
+ str(self.variables or {})
)
class Response(BaseModel):
content: str
prompt_tokens: Union[int, None] = None
completion_tokens: Union[int, None] = None
raw: Union[dict, None] = None
def __init__(
self,
content: str,
closed: bool = False,
prompt_tokens: Union[int, None] = None,
completion_tokens: Union[int, None] = None,
raw: Union[dict, None] = None,
**kwargs,
):
super().__init__(
content=content,
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
raw=raw,
)
for key, value in kwargs.items():
setattr(self, key, value)
@classmethod
def create(cls, response: ResponseTypes) -> "Response":
if isinstance(response, cls):
return response
elif isinstance(response, str):
return cls(content=response)
else:
raise NotImplementedError
def __str__(self):
return f"Response({self.content}, raw={self.raw})"
class MessageChunk(BaseModel):
content: Union[str, None]
def encode(self, encoding: str = "utf-8"):
content = self.content or ""
return content.encode(encoding)
class Stream:
# processor that has lambda which returns MessageDelta
def __init__(
self,
client: "Speck",
iterator: Iterator[Any],
kwargs: dict,
log_config: "LogConfig",
processor: Callable[[Any], MessageChunk],
):
self._client = client
self.message: str = ""
self.tokens: int = 0
self._iterator = iterator
self._kwargs = kwargs
self._processor = processor
self._has_logged = False
self._closed = False
self._log_config = log_config
def _log(self):
if not self._has_logged:
self._has_logged = True
kwargs = self._kwargs
kwargs["prompt"] = self._kwargs.get("prompt", [])
kwargs["temperature"] = self._kwargs.get("temperature", "N/A")
kwargs["model"] = self._kwargs.get("model", "N/A")
kwargs["response"] = Response(
content=self.message, raw={}, closed=True, completion_tokens=self.tokens
)
# Todo: add prompt_tokens using tiktoken
ChatLogger.log(log_config=self._log_config, **kwargs)
def _process(self, item) -> MessageChunk:
return self._processor(item)
def __next__(self) -> MessageChunk:
try:
if self._closed:
raise StopIteration
# next_item = None
# while next_item is None:
next_item = next(self._iterator)
item: MessageChunk = self._process(next_item)
if item.content:
self.message += item.content
self.tokens += 1
return item
except StopIteration:
self._log()
raise
def __iter__(self) -> Iterator[MessageChunk]:
return self
def close(self):
try:
self._closed = True
# todo: make this work for packages other than openai
self._iterator.response.close()
except AttributeError:
pass
class LogConfig(BaseModel):
api_key: str
endpoint: str = "https://api.getspeck.ai"
class Config:
extra = "allow"
class ChatConfig:
# Todo: add typed params here
# Todo: Create conversions to other formats
def __init__(
self,
*,
provider: str = None,
model: OpenAIModel,
stream: bool = False,
_log: bool = True,
temperature: Union[Optional[float], NotGiven] = NOT_GIVEN,
max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN,
top_p: Union[Optional[float], NotGiven] = NOT_GIVEN,
frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,
presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,
**config_kwargs,
):
if "log_config" in config_kwargs:
del config_kwargs["log_config"]
self.provider = provider
self.model = model
self.stream = stream
self._log = _log
self.temperature = temperature
self.max_tokens = max_tokens
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self.chat_args = config_kwargs
# If this is modified, update to_dict too
def to_dict(self):
return {
"provider": self.provider,
"model": str(self.model), # Assuming model can be represented as a string
"stream": self.stream,
"_log": self._log,
"temperature": self._convert_optional(self.temperature),
"max_tokens": self._convert_optional(self.max_tokens),
"top_p": self._convert_optional(self.top_p),
"frequency_penalty": self._convert_optional(self.frequency_penalty),
"presence_penalty": self._convert_optional(self.presence_penalty),
"chat_args": self.chat_args,
}
def _convert_optional(self, value):
return None if isinstance(value, NotGiven) else value
@classmethod
def create(cls, config: ChatConfigTypes, kwargs: dict = None) -> "ChatConfig":
if isinstance(config, cls):
if kwargs is not None:
return cls(**{**config.__dict__, **kwargs})
else:
return config
elif isinstance(config, dict):
return cls(**config)
elif kwargs:
return cls(**kwargs)
else:
raise NotImplementedError
def get(self, key: str, default: Any = None) -> Any:
return getattr(self, key, default)
def convert(self, provider: str = "speck") -> "ChatConfig":
"""
Convert to another config format
"""
if provider == "openai":
return OpenAIChatConfig(
model=self.model,
stream=self.stream,
_log=self._log,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
**self._kwargs,
)
return self
def log_chat(
self,
*,
log_config: LogConfig,
prompt: Prompt,
response: Response,
provider: str = "speck",
):
config = self.convert()
ChatLogger.log(
log_config=log_config,
provider=provider,
model=str(config.model),
prompt=prompt,
response=response,
**config.chat_args,
)
def encode(self, encoding: str = "utf-8"):
return self.__str__().encode(encoding)
def __str__(self):
return f"ChatConfig(provider={self.provider}, model={self.model}, stream={self.stream}, _log={self._log}, temperature={self.temperature}, max_tokens={self.max_tokens}, top_p={self.top_p}, frequency_penalty={self.frequency_penalty}, presence_penalty={self.presence_penalty}, _kwargs={self._kwargs})"
class OpenAIChatConfig(ChatConfig):
def __init__(
self,
model: OpenAIModel,
stream: bool = False,
_log: bool = True,
temperature: Union[Optional[float], NotGiven] = NOT_GIVEN,
max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN,
top_p: Union[Optional[float], NotGiven] = NOT_GIVEN,
frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,
presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,
**config_kwargs,
):
self.model = model
self.stream = stream
self._log = _log
self.temperature = temperature
self.max_tokens = max_tokens
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self._kwargs = config_kwargs
def convert(self, provider: str = "speck") -> ChatConfig:
"""
Maps config to universal format then converts to another config format
"""
universal_config = ChatConfig(
model=self.model,
stream=self.stream,
_log=self._log,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
**self._kwargs,
)
return universal_config.convert(provider=provider)
class IChatClient(ABC):
def debug_chat(
self, prompt: "Prompt", config: "ChatConfig"
) -> ("Prompt", "ChatConfig"): | data = run_debug_websocket(self._client, self, prompt, config) | 1 | 2023-11-15 05:46:05+00:00 | 4k |
hahnyuan/ASVD4LLM | binary_search.py | [
{
"identifier": "evaluate_model",
"path": "evaluate.py",
"snippet": "@torch.no_grad()\ndef evaluate_model(\n model,\n tokenizer,\n model_name,\n tasks,\n eval_ppl=\"\",\n num_fewshot=0,\n limit=-1,\n batch_size=1,\n):\n \"\"\"\n model: model name\n limit: number of test samples for debug, set to -1 is no limit\n tasks: str tasks are split by ,\n num_fewshot: Number of examples in few-shot context\n eval_ppl: str datasets are split by , such as 'wikitext2,ptb,c4'\n \"\"\"\n lm = EvalLM(model, tokenizer, batch_size=batch_size)\n results = {}\n if eval_ppl:\n for dataset in eval_ppl.split(\",\"):\n cache_testloader = (\n f\"/tmp/{dataset}_testloader_{model_name.replace('/', '_')}_all.cache\"\n )\n if os.path.exists(cache_testloader):\n testloader = torch.load(cache_testloader)\n # print(f\"load calibration from {cache_testloader}\")\n else:\n testloader = get_eval_loaders(dataset, tokenizer)\n torch.save(testloader, cache_testloader)\n # print(dataset)\n testenc = testloader.input_ids\n nsamples = testenc.numel() // lm.seqlen\n use_cache = lm.model.config.use_cache\n lm.model.config.use_cache = False\n lm.model.eval()\n nlls = []\n\n for i in tqdm(range(nsamples)):\n batch = testenc[:, (i * lm.seqlen) : ((i + 1) * lm.seqlen)].to(\n lm.device\n )\n outputs = lm.model.model(batch)\n hidden_states = outputs[0] # .to(lm.model.lm_head.weight.device)\n logits = lm.model.lm_head(hidden_states) # .contiguous()\n shift_logits = logits[:, :-1, :] # .contiguous()\n shift_labels = testenc[:, (i * lm.seqlen) : ((i + 1) * lm.seqlen)][\n :, 1:\n ].to(lm.device)\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(\n shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1),\n )\n neg_log_likelihood = loss.float() * lm.seqlen\n nlls.append(neg_log_likelihood)\n if i == limit:\n break\n # if i == 1:\n # print(\n # \"memory_allocated\",\n # i,\n # torch.cuda.memory_allocated() / 1024 / 1024,\n # \"max memory_allocated\",\n # torch.cuda.max_memory_allocated() / 1024**2,\n # )\n\n ppl = torch.exp(torch.stack(nlls).sum() / (len(nlls) * lm.seqlen))\n print(dataset, ppl.item())\n lm.model.config.use_cache = use_cache\n # pprint(model)\n results[dataset] = ppl.item()\n if tasks == \"mmlu\":\n tasks = \"hendrycksTest-abstract_algebra,hendrycksTest-anatomy,hendrycksTest-astronomy,hendrycksTest-business_ethics,hendrycksTest-clinical_knowledge,hendrycksTest-college_biology,hendrycksTest-college_chemistry,hendrycksTest-college_computer_science,hendrycksTest-college_mathematics,hendrycksTest-college_medicine,hendrycksTest-college_physics,hendrycksTest-computer_security,hendrycksTest-conceptual_physics,hendrycksTest-econometrics,hendrycksTest-electrical_engineering,hendrycksTest-elementary_mathematics,hendrycksTest-formal_logic,hendrycksTest-global_facts,hendrycksTest-high_school_biology,hendrycksTest-high_school_chemistry,hendrycksTest-high_school_computer_science,hendrycksTest-high_school_european_history,hendrycksTest-high_school_geography,hendrycksTest-high_school_government_and_politics,hendrycksTest-high_school_macroeconomics,hendrycksTest-high_school_mathematics,hendrycksTest-high_school_microeconomics,hendrycksTest-high_school_physics,hendrycksTest-high_school_psychology,hendrycksTest-high_school_statistics,hendrycksTest-high_school_us_history,hendrycksTest-high_school_world_history,hendrycksTest-human_aging,hendrycksTest-human_sexuality,hendrycksTest-international_law,hendrycksTest-jurisprudence,hendrycksTest-logical_fallacies,hendrycksTest-machine_learning,hendrycksTest-management,hendrycksTest-marketing,hendrycksTest-medical_genetics,hendrycksTest-miscellaneous,hendrycksTest-moral_disputes,hendrycksTest-moral_scenarios,hendrycksTest-nutrition,hendrycksTest-philosophy,hendrycksTest-prehistory,hendrycksTest-professional_accounting,hendrycksTest-professional_law,hendrycksTest-professional_medicine,hendrycksTest-professional_psychology,hendrycksTest-public_relations,hendrycksTest-security_studies,hendrycksTest-sociology,hendrycksTest-us_foreign_policy,hendrycksTest-virology,hendrycksTest-world_religions\"\n if tasks == \"llmqat\":\n # tasks = \"boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa\"\n tasks = \"lambada_openai,openbookqa\"\n if tasks != \"\":\n t_results = evaluator.simple_evaluate(\n lm,\n tasks=tasks.split(\",\"),\n batch_size=batch_size,\n num_fewshot=num_fewshot,\n limit=None if limit == -1 else limit,\n no_cache=True,\n )\n t_results = t_results[\"results\"]\n acc_list = [\n t_results[key][\"acc\"] for key in t_results.keys() if \"acc\" in t_results[key]\n ]\n t_results[\"mean\"] = sum(acc_list) / len(acc_list)\n results.update(t_results)\n print(results)\n # print mean\n print(f\"\\n\\n===== mean acc: {sum(acc_list)/len(acc_list)} =====\\n\\n\")\n\n return results"
},
{
"identifier": "evaluate_perplexity",
"path": "evaluate.py",
"snippet": "@torch.no_grad()\ndef evaluate_perplexity(model, dataset, limit):\n \"\"\"\n dataset: input ids tensor of shape [batch, sequence length]\n \"\"\"\n nsamples, seqlen = dataset.size()\n\n nlls = []\n\n for i in range(nsamples):\n if i == limit:\n break\n input_ids = dataset[i:i+1,:-1].to(model.device)\n labels = dataset[i:i+1,1:].contiguous()\n logits = model(input_ids=input_ids)[0]\n shift_logits = logits[:, :, :]\n shift_labels = labels.to(model.device)\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(\n shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1),\n )\n neg_log_likelihood = loss.float() * seqlen\n nlls.append(neg_log_likelihood)\n ppl = torch.exp(torch.stack(nlls).sum() / (len(nlls) * seqlen))\n return ppl.item()"
},
{
"identifier": "SVDLinear",
"path": "modules/svd_linear.py",
"snippet": "class SVDLinear(nn.Module):\n def __init__(self, U, S, V, bias=None,sigma_fuse='UV') -> None:\n super().__init__()\n self.ALinear = nn.Linear(U.size(1), U.size(0), bias=bias is not None)\n \n if bias is not None:\n self.ALinear.bias.data = bias\n self.BLinear = nn.Linear(V.size(1), V.size(0), bias=False)\n self.truncation_rank=S.size(0)\n if sigma_fuse == 'UV':\n self.ALinear.weight.data = U.mul(S.sqrt()).contiguous()\n self.BLinear.weight.data = V.t().mul(S.sqrt().view(-1, 1)).contiguous()\n elif sigma_fuse == 'U':\n self.ALinear.weight.data = U.mul(S).contiguous()\n self.BLinear.weight.data = V.t().contiguous()\n elif sigma_fuse == 'V':\n self.ALinear.weight.data = U.contiguous()\n self.BLinear.weight.data = V.t().mul(S.view(-1, 1)).contiguous()\n \n\n @staticmethod\n def from_linear(\n linear: nn.Linear,\n param_ratio: float,\n act_aware=False,\n ic_split=1,\n oc_split=1,\n alpha=1,\n sigma_fuse=\"UV\"\n ):\n if param_ratio >= 1:\n return linear\n n_params = linear.weight.numel()\n compressed_params = int(n_params * param_ratio)\n assert ic_split == 1 or oc_split == 1\n rank = compressed_params // (linear.in_features + linear.out_features)\n # print(\"rank\", rank)\n w = linear.weight.data.float()\n if act_aware:\n scaling_diag_matrix = 1 # avoid zero division\n if hasattr(linear, \"scaling_diag_matrix\"):\n # print(\"WARNING: scaling_diag_matrix is used\")\n scaling_diag_matrix *= linear.scaling_diag_matrix**alpha\n # scaling_diag_matrix *= linear.scaling_diag_matrix**0.5\n if hasattr(linear, \"fisher_info\"):\n scaling_diag_matrix *= linear.fisher_info**alpha\n # scaling_diag_matrix *= linear.fisher_info**1\n # if not (scaling_diag_matrix == scaling_diag_matrix).all():\n # breakpoint()\n scaling_diag_matrix += 1e-6 # avoid zero division\n w = w * scaling_diag_matrix.view(1, -1)\n Us = []\n Ss = []\n Vs = []\n try:\n U, S, V = torch.svd_lowrank(w, q=rank)\n except:\n print(f\"svd failed for {linear}, disable act_aware\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n if act_aware:\n V = V / scaling_diag_matrix.view(-1, 1)\n Us = [U]\n Ss = [S]\n Vs = [V]\n\n if linear.bias is not None:\n bias = linear.bias.data\n else:\n bias = None\n\n # nan or inf check\n for S in Ss:\n if (S!=S).any():\n print(\"nan in S\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n for U in Us:\n if (U!=U).any():\n print(\"nan in U\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n for V in Vs:\n if (V!=V).any():\n print(\"nan in V\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n\n assert len(Us) == len(Ss) == len(Vs) == 1\n new_linear = SVDLinear(Us[0], Ss[0], Vs[0], bias,sigma_fuse)\n return new_linear.to(linear.weight.dtype)\n\n def forward(self, inp):\n # compute USV^Tx + b\n y = self.BLinear(inp)\n y = self.ALinear(y)\n return y"
}
] | import os
import torch
import torch.nn as nn
from evaluate import evaluate_model, evaluate_perplexity
from modules.svd_linear import SVDLinear
from tqdm import tqdm | 3,314 |
def binary_search_truncation_rank(model, sensitivity_dict, calib_loader, args):
module_dict = {name: module for name, module in model.named_modules()}
full_name_dict = {module: name for name, module in model.named_modules()}
linear_info = {}
modules = [model]
while len(modules) > 0:
submodule = modules.pop()
for name, raw_linear in submodule.named_children():
if isinstance(raw_linear, nn.Linear):
full_name = full_name_dict[raw_linear]
linear_info[raw_linear] = {
"father": submodule,
"name": name,
"full_name": full_name,
}
else:
modules.append(raw_linear)
sensitivity_list = []
for layername, v in sensitivity_dict.items():
for ratio, ppl in v.items():
sensitivity_list.append((layername, ratio, ppl))
sorted_sensitive_list = sorted(sensitivity_list, key=lambda x: -x[2])
# binary search
high = len(sorted_sensitive_list) - 1
low = 0
assert args.ppl_target > 0 or args.param_ratio_target > 0
input_ids = torch.cat([_["input_ids"] for _ in calib_loader], 0)
while low < high:
mid = (low + high) // 2
layers_min_ratio = {layername: 1 for layername in sensitivity_dict.keys()}
for layername, ratio, ppl in sorted_sensitive_list[mid:]:
layers_min_ratio[layername] = min(layers_min_ratio[layername], ratio)
tot_params = 0
compress_params = 0
if args.ppl_target > 0:
for layername, ratio in layers_min_ratio.items():
raw_linear = module_dict[layername]
info = linear_info[raw_linear]
svd_linear = SVDLinear.from_linear(
raw_linear,
param_ratio=ratio,
alpha=args.alpha,
act_aware=args.act_aware,
sigma_fuse=args.sigma_fuse,
)
setattr(info["father"], info["name"], svd_linear)
tot_params += raw_linear.weight.numel()
compress_params += raw_linear.weight.numel() * ratio
|
def binary_search_truncation_rank(model, sensitivity_dict, calib_loader, args):
module_dict = {name: module for name, module in model.named_modules()}
full_name_dict = {module: name for name, module in model.named_modules()}
linear_info = {}
modules = [model]
while len(modules) > 0:
submodule = modules.pop()
for name, raw_linear in submodule.named_children():
if isinstance(raw_linear, nn.Linear):
full_name = full_name_dict[raw_linear]
linear_info[raw_linear] = {
"father": submodule,
"name": name,
"full_name": full_name,
}
else:
modules.append(raw_linear)
sensitivity_list = []
for layername, v in sensitivity_dict.items():
for ratio, ppl in v.items():
sensitivity_list.append((layername, ratio, ppl))
sorted_sensitive_list = sorted(sensitivity_list, key=lambda x: -x[2])
# binary search
high = len(sorted_sensitive_list) - 1
low = 0
assert args.ppl_target > 0 or args.param_ratio_target > 0
input_ids = torch.cat([_["input_ids"] for _ in calib_loader], 0)
while low < high:
mid = (low + high) // 2
layers_min_ratio = {layername: 1 for layername in sensitivity_dict.keys()}
for layername, ratio, ppl in sorted_sensitive_list[mid:]:
layers_min_ratio[layername] = min(layers_min_ratio[layername], ratio)
tot_params = 0
compress_params = 0
if args.ppl_target > 0:
for layername, ratio in layers_min_ratio.items():
raw_linear = module_dict[layername]
info = linear_info[raw_linear]
svd_linear = SVDLinear.from_linear(
raw_linear,
param_ratio=ratio,
alpha=args.alpha,
act_aware=args.act_aware,
sigma_fuse=args.sigma_fuse,
)
setattr(info["father"], info["name"], svd_linear)
tot_params += raw_linear.weight.numel()
compress_params += raw_linear.weight.numel() * ratio | ppl = evaluate_perplexity(model, input_ids, args.n_calib_samples) | 1 | 2023-11-10 02:18:36+00:00 | 4k |
chaiNNer-org/spandrel | src/spandrel/__helpers/loader.py | [
{
"identifier": "canonicalize_state_dict",
"path": "src/spandrel/__helpers/canonicalize.py",
"snippet": "def canonicalize_state_dict(state_dict: StateDict) -> StateDict:\n \"\"\"\n Canonicalize a state dict.\n\n This function is used to canonicalize a state dict, so that it can be\n used for architecture detection and loading.\n\n This function is not intended to be used in production code.\n \"\"\"\n\n # the real state dict might be inside a dict with a known key\n unwrap_keys = [\"state_dict\", \"params_ema\", \"params-ema\", \"params\", \"model\", \"net\"]\n for unwrap_key in unwrap_keys:\n if unwrap_key in state_dict and isinstance(state_dict[unwrap_key], dict):\n state_dict = state_dict[unwrap_key]\n break\n\n # remove known common prefixes\n state_dict = remove_common_prefix(state_dict, [\"module.\", \"netG.\"])\n\n return state_dict"
},
{
"identifier": "MAIN_REGISTRY",
"path": "src/spandrel/__helpers/main_registry.py",
"snippet": "MAIN_REGISTRY = ArchRegistry()"
},
{
"identifier": "ModelDescriptor",
"path": "src/spandrel/__helpers/model_descriptor.py",
"snippet": "T = TypeVar(\"T\", bound=torch.nn.Module, covariant=True)\n SUPPORTED = 1\n DISCOURAGED = 2\n INTERNAL = 3\nclass SizeRequirements:\nclass ModelTiling(Enum):\nclass ModelBase(ABC, Generic[T]):\nclass ImageModelDescriptor(ModelBase[T], Generic[T]):\nclass MaskedImageModelDescriptor(ModelBase[T], Generic[T]):\n def none(self) -> bool:\n def check(self, width: int, height: int) -> bool:\n def __init__(\n self,\n model: T,\n state_dict: StateDict,\n architecture: str,\n tags: list[str],\n supports_half: bool,\n supports_bfloat16: bool,\n scale: int,\n input_channels: int,\n output_channels: int,\n size_requirements: SizeRequirements | None = None,\n tiling: ModelTiling = ModelTiling.SUPPORTED,\n ):\n def purpose(self) -> Purpose:\n def device(self) -> torch.device:\n def to(self, device: str | torch.device):\n def eval(self):\n def train(self, mode: bool = True):\n def __init__(\n self,\n model: T,\n state_dict: StateDict,\n architecture: str,\n purpose: Literal[\"SR\", \"FaceSR\", \"Restoration\"],\n tags: list[str],\n supports_half: bool,\n supports_bfloat16: bool,\n scale: int,\n input_channels: int,\n output_channels: int,\n size_requirements: SizeRequirements | None = None,\n tiling: ModelTiling = ModelTiling.SUPPORTED,\n call_fn: Callable[[T, Tensor], Tensor] | None = None,\n ):\n def purpose(self) -> Literal[\"SR\", \"FaceSR\", \"Restoration\"]:\n def __call__(self, image: Tensor) -> Tensor:\n def __init__(\n self,\n model: T,\n state_dict: StateDict,\n architecture: str,\n purpose: Literal[\"Inpainting\"],\n tags: list[str],\n supports_half: bool,\n supports_bfloat16: bool,\n input_channels: int,\n output_channels: int,\n size_requirements: SizeRequirements | None = None,\n tiling: ModelTiling = ModelTiling.SUPPORTED,\n call_fn: Callable[[T, Tensor, Tensor], Tensor] | None = None,\n ):\n def purpose(self) -> Literal[\"Inpainting\"]:\n def __call__(self, image: Tensor, mask: Tensor) -> Tensor:"
},
{
"identifier": "ArchRegistry",
"path": "src/spandrel/__helpers/registry.py",
"snippet": "class ArchRegistry:\n \"\"\"\n A registry of architectures.\n\n Architectures are detected/loaded in insertion order unless `before` is specified.\n \"\"\"\n\n def __init__(self):\n self._architectures: list[ArchSupport] = []\n self._ordered: list[ArchSupport] = []\n self._by_id: dict[str, ArchSupport] = {}\n\n def copy(self) -> ArchRegistry:\n \"\"\"\n Returns a copy of the registry.\n \"\"\"\n new = ArchRegistry()\n new._architectures = self._architectures.copy()\n new._ordered = self._ordered.copy()\n new._by_id = self._by_id.copy()\n return new\n\n def __contains__(self, id: str) -> bool:\n return id in self._by_id\n\n def __getitem__(self, id: str) -> ArchSupport:\n return self._by_id[id]\n\n def get(self, id: str) -> ArchSupport | None:\n return self._by_id.get(id, None)\n\n def architectures(\n self,\n order: Literal[\"insertion\", \"detection\"] = \"insertion\",\n ) -> list[ArchSupport]:\n \"\"\"\n Returns a new list with all architectures in the registry.\n\n The order of architectures in the list is either insertion order or the order in which architectures are detected.\n \"\"\"\n if order == \"insertion\":\n return list(self._architectures)\n elif order == \"detection\":\n return list(self._ordered)\n else:\n raise ValueError(f\"Invalid order: {order}\")\n\n def add(self, *architectures: ArchSupport):\n \"\"\"\n Adds the given architectures to the registry.\n\n Throws an error if an architecture with the same ID already exists.\n Throws an error if a circular dependency of `before` references is detected.\n\n If an error is thrown, the registry is left unchanged.\n \"\"\"\n\n new_architectures = self._architectures.copy()\n new_by_id = self._by_id.copy()\n for arch in architectures:\n if arch.id in new_by_id:\n raise ValueError(f\"Duplicate architecture ID: {arch.id}\")\n\n new_architectures.append(arch)\n new_by_id[arch.id] = arch\n\n new_ordered = ArchRegistry._get_ordered(new_architectures)\n\n self._architectures = new_architectures\n self._ordered = new_ordered\n self._by_id = new_by_id\n\n @staticmethod\n def _get_ordered(architectures: list[ArchSupport]) -> list[ArchSupport]:\n inv_before: dict[str, list[str]] = {}\n by_id: dict[str, ArchSupport] = {}\n for arch in architectures:\n by_id[arch.id] = arch\n for before in arch.before:\n if before not in inv_before:\n inv_before[before] = []\n inv_before[before].append(arch.id)\n\n ordered: list[ArchSupport] = []\n seen: set[ArchSupport] = set()\n stack: list[str] = []\n\n def visit(arch: ArchSupport):\n if arch.id in stack:\n raise ValueError(\n f\"Circular dependency in architecture detection: {' -> '.join([*stack, arch.id])}\"\n )\n if arch in seen:\n return\n seen.add(arch)\n stack.append(arch.id)\n\n for before in inv_before.get(arch.id, []):\n visit(by_id[before])\n\n ordered.append(arch)\n stack.pop()\n\n for arch in architectures:\n visit(arch)\n\n return ordered\n\n def load(self, state_dict: StateDict) -> ModelDescriptor:\n \"\"\"\n Detects the architecture of the given state dict and loads it.\n\n This will canonicalize the state dict if it isn't already.\n\n Throws an `UnsupportedModelError` if the model architecture is not supported.\n \"\"\"\n\n state_dict = canonicalize_state_dict(state_dict)\n\n for arch in self._ordered:\n if arch.detect(state_dict):\n return arch.load(state_dict)\n\n raise UnsupportedModelError"
},
{
"identifier": "RestrictedUnpickle",
"path": "src/spandrel/__helpers/unpickler.py",
"snippet": "class RestrictedUnpickler(pickle.Unpickler):\n def find_class(self, module: str, name: str):"
}
] | import os
import torch
from pathlib import Path
from safetensors.torch import load_file
from .canonicalize import canonicalize_state_dict
from .main_registry import MAIN_REGISTRY
from .model_descriptor import ModelDescriptor, StateDict
from .registry import ArchRegistry
from .unpickler import RestrictedUnpickle | 2,515 | from __future__ import annotations
class ModelLoader:
"""Class for automatically loading a pth file into any architecture"""
def __init__(
self,
device: str | torch.device | None = None,
registry: ArchRegistry = MAIN_REGISTRY,
):
if isinstance(device, str):
device = torch.device(device)
self.device: torch.device = device or torch.device("cpu")
self.registry: ArchRegistry = registry
"""
The architecture registry to use for loading models.
*Note:* Unless initialized with a custom registry, this is the global main registry (`MAIN_REGISTRY`).
Modifying this registry will affect all `ModelLoader` instances without a custom registry.
"""
def load_from_file(self, path: str | Path) -> ModelDescriptor:
"""
Load a model from the given file path.
Throws a `ValueError` if the file extension is not supported.
Throws an `UnsupportedModelError` if the model architecture is not supported.
"""
state_dict = self.load_state_dict_from_file(path)
return self.load_from_state_dict(state_dict)
def load_state_dict_from_file(self, path: str | Path) -> StateDict:
"""
Load the state dict of a model from the given file path.
State dicts are typically only useful to pass them into the `load`
function of a specific architecture.
Throws a `ValueError` if the file extension is not supported.
"""
extension = os.path.splitext(path)[1].lower()
state_dict: StateDict
if extension == ".pt":
state_dict = self._load_torchscript(path)
elif extension == ".pth":
state_dict = self._load_pth(path)
elif extension == ".ckpt":
state_dict = self._load_ckpt(path)
elif extension == ".safetensors":
state_dict = self._load_safetensors(path)
else:
raise ValueError(
f"Unsupported model file extension {extension}. Please try a supported model type."
)
return canonicalize_state_dict(state_dict)
def load_from_state_dict(self, state_dict: StateDict) -> ModelDescriptor:
"""
Load a model from the given state dict.
Throws an `UnsupportedModelError` if the model architecture is not supported.
"""
return self.registry.load(state_dict).to(self.device)
def _load_pth(self, path: str | Path) -> StateDict:
return torch.load(
path,
map_location=self.device,
| from __future__ import annotations
class ModelLoader:
"""Class for automatically loading a pth file into any architecture"""
def __init__(
self,
device: str | torch.device | None = None,
registry: ArchRegistry = MAIN_REGISTRY,
):
if isinstance(device, str):
device = torch.device(device)
self.device: torch.device = device or torch.device("cpu")
self.registry: ArchRegistry = registry
"""
The architecture registry to use for loading models.
*Note:* Unless initialized with a custom registry, this is the global main registry (`MAIN_REGISTRY`).
Modifying this registry will affect all `ModelLoader` instances without a custom registry.
"""
def load_from_file(self, path: str | Path) -> ModelDescriptor:
"""
Load a model from the given file path.
Throws a `ValueError` if the file extension is not supported.
Throws an `UnsupportedModelError` if the model architecture is not supported.
"""
state_dict = self.load_state_dict_from_file(path)
return self.load_from_state_dict(state_dict)
def load_state_dict_from_file(self, path: str | Path) -> StateDict:
"""
Load the state dict of a model from the given file path.
State dicts are typically only useful to pass them into the `load`
function of a specific architecture.
Throws a `ValueError` if the file extension is not supported.
"""
extension = os.path.splitext(path)[1].lower()
state_dict: StateDict
if extension == ".pt":
state_dict = self._load_torchscript(path)
elif extension == ".pth":
state_dict = self._load_pth(path)
elif extension == ".ckpt":
state_dict = self._load_ckpt(path)
elif extension == ".safetensors":
state_dict = self._load_safetensors(path)
else:
raise ValueError(
f"Unsupported model file extension {extension}. Please try a supported model type."
)
return canonicalize_state_dict(state_dict)
def load_from_state_dict(self, state_dict: StateDict) -> ModelDescriptor:
"""
Load a model from the given state dict.
Throws an `UnsupportedModelError` if the model architecture is not supported.
"""
return self.registry.load(state_dict).to(self.device)
def _load_pth(self, path: str | Path) -> StateDict:
return torch.load(
path,
map_location=self.device, | pickle_module=RestrictedUnpickle, # type: ignore | 4 | 2023-11-17 01:11:47+00:00 | 4k |
ottoweiss/pdf-to-audiobook | main.py | [
{
"identifier": "get_audiobook",
"path": "src/audiobook.py",
"snippet": "def get_audiobook(json_file, book_title=\"audiobook\", voice=\"onyx\", speed=\"1.0\"):\n book_directory = f\"{book_title}\"\n atexit.register(save_full, book_title)\n\n if not os.path.exists(book_directory):\n os.makedirs(book_directory)\n\n with open(json_file, \"r\") as j:\n vals = json.load(j)\n\n for text_index, text_list in tqdm(enumerate(vals), total=len(vals)):\n audio_section_file = f\"{book_directory}/part{text_index}.mp3\"\n if not os.path.exists(audio_section_file):\n with ThreadPoolExecutor(max_workers=len(text_list)) as executor:\n futures = [executor.submit(play_split_and_combine, i, text, voice, speed) for i, text in enumerate(text_list)]\n combined_audio = AudioSegment.silent(duration=0)\n for future in futures:\n try:\n ind, audio = future.result()\n combined_audio += audio\n except Exception as e:\n print(f\"Error processing segment {ind}: {e}\")\n combined_audio.export(audio_section_file, format=\"mp3\")\n save_full(book_title)"
},
{
"identifier": "get_rewrite",
"path": "src/clean_pdf.py",
"snippet": "def get_rewrite(text_json: str, output_json: str):\n with open(text_json, \"r\") as fj:\n text_json: dict[str, list] = json.load(fj)\n if not os.path.exists(output_json):\n with open(output_json, \"w\") as fj:\n json.dump({}, fj)\n with open(output_json, \"r\") as fj:\n current_texts: dict[str, list] = json.load(fj)\n if isinstance(current_texts, list):\n current_texts = {}\n new_texts_json = {}\n for section, chunks in tqdm(text_json.items(), total=len(text_json.keys())):\n if section in current_texts:\n new_texts_json[section] = current_texts[section]\n else:\n with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count() - 1) as executor:\n new_chunks = list(executor.map(gpt_rewrite, chunks))\n new_texts_json[section] = new_chunks\n with open(output_json, \"w\") as js:\n json.dump(new_texts_json, js, indent=2)\n with open(output_json, \"w\") as js:\n final_reformatted_list = finalize_list(new_texts_json)\n json.dump(final_reformatted_list, js, indent=2)"
},
{
"identifier": "extract_text_from_pdf",
"path": "src/pdf_to_json.py",
"snippet": "def extract_text_from_pdf(pdf_path, sections, page_difference=0):\n \"\"\"\n Extracts the text from specified sections of a PDF file.\n\n Parameters:\n pdf_path (str): The file path to the PDF file.\n sections (dict): Dictionary specifying sections and their corresponding page ranges.\n\n Returns:\n dict: Dictionary where keys are section titles and values are extracted text for each section.\n \"\"\"\n doc = fitz.open(pdf_path)\n section_ranges = [(title, (s + page_difference, e + page_difference)) for title, (s, e) in sections.items()]\n section_texts = {}\n for page_num in tqdm(range(1, len(doc) + 1)):\n page = doc[page_num - 1]\n cleaned_text = page.get_text()\n # Dictionary to store the extracted text for each section\n for title, (start, end) in section_ranges:\n if start <= page_num <= end:\n cleaned_text = cleaned_text.replace('–', '-').replace('—', '-') # Replace en-dash and em-dash with hyphen\n cleaned_text = cleaned_text.replace(\"\\x0c\", \"\") # Replace line breaks with spaces\n\n cleaned_text = spellcheck(cleaned_text).text\n \n # Append the processed text to the respective section\n if title in section_texts:\n section_texts[title] += cleaned_text\n else:\n section_texts[title] = cleaned_text\n break\n section_chunks = {}\n overall_count = 0\n total_text = \"\"\n for title, text in section_texts.items():\n total_text += text\n chunks = split_into_chunks(text)\n current_chunks = list()\n for i, chunk in enumerate(chunks):\n current_chunks.append(chunk)\n if i % 3 == 0 and i != 0:\n section_chunks[overall_count] = current_chunks\n overall_count += 1\n current_chunks = list()\n section_chunks[overall_count] = current_chunks\n\n encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')\n total_tokens = len(encoding.encode(total_text))\n rewrite_cost = (total_tokens/1000) * 0.0030\n audio_cost = ((len(total_text) * 4/5)/1000) * 0.015\n audio_time = (len(total_text) * 4/5) * 0.0014\n rewrite_time = (0.0145) * total_tokens\n total_time = audio_time + rewrite_time\n total_time_secs = str(int(total_time % 60))\n total_time_mins = str(int(total_time // 60))\n total_time_str = f\"{total_time_mins} mins {total_time_secs} secs\"\n total_cost = round(rewrite_cost + audio_cost, 2)\n total_cost_str = f\"{total_cost} $\"\n with open(pdf_path.replace(\".pdf\", \".json\"), \"w\", encoding=\"utf-8\") as sections_json:\n json.dump(section_chunks, sections_json, indent=2)\n return total_cost_str, total_time_str"
}
] | from src.audiobook import get_audiobook
from src.clean_pdf import get_rewrite
from src.pdf_to_json import extract_text_from_pdf
from colorama import Fore, Style
import os
import time | 1,712 |
def input_q(text):
print(Fore.YELLOW + text, end="")
inp = input()
print(Style.RESET_ALL, end="")
if inp == ":q":
exit()
return inp
def print_info(message):
print(Fore.CYAN + message + Style.RESET_ALL)
def print_error(message):
print(Fore.RED + "ERROR: " + message + Style.RESET_ALL)
def print_success(message):
print(Fore.GREEN + message + Style.RESET_ALL)
if __name__ == "__main__":
print_info("Enter :q to quit at any time.\n")
print_info("Before Continuing, ensure your Openai API key is in the config.py file.\n")
file_included = False
while not file_included:
pdf_file_q = input_q("Have you added the pdf file to this folder? (y/[n]): ")
if pdf_file_q.lower() == "y":
correct_pdf_file_name = False
while not correct_pdf_file_name:
pdf_file_name = input_q("Enter pdf file name: ")
if os.path.exists(pdf_file_name):
correct_pdf_file_name = True
else:
print_error("File not in folder. Please try again.")
file_included = True
else:
print_info("\nDownload File Here then Add to Folder: https://singlelogin.re/\n")
time.sleep(3)
correct_page_range = False
while not correct_page_range:
try:
page_start = int(input_q("What page should the audiobook start?: ").strip())
page_end = int(input_q("What page should the audiobook end?: ").strip())
title = input_q("Enter Your Book Title: ").strip()
|
def input_q(text):
print(Fore.YELLOW + text, end="")
inp = input()
print(Style.RESET_ALL, end="")
if inp == ":q":
exit()
return inp
def print_info(message):
print(Fore.CYAN + message + Style.RESET_ALL)
def print_error(message):
print(Fore.RED + "ERROR: " + message + Style.RESET_ALL)
def print_success(message):
print(Fore.GREEN + message + Style.RESET_ALL)
if __name__ == "__main__":
print_info("Enter :q to quit at any time.\n")
print_info("Before Continuing, ensure your Openai API key is in the config.py file.\n")
file_included = False
while not file_included:
pdf_file_q = input_q("Have you added the pdf file to this folder? (y/[n]): ")
if pdf_file_q.lower() == "y":
correct_pdf_file_name = False
while not correct_pdf_file_name:
pdf_file_name = input_q("Enter pdf file name: ")
if os.path.exists(pdf_file_name):
correct_pdf_file_name = True
else:
print_error("File not in folder. Please try again.")
file_included = True
else:
print_info("\nDownload File Here then Add to Folder: https://singlelogin.re/\n")
time.sleep(3)
correct_page_range = False
while not correct_page_range:
try:
page_start = int(input_q("What page should the audiobook start?: ").strip())
page_end = int(input_q("What page should the audiobook end?: ").strip())
title = input_q("Enter Your Book Title: ").strip() | cost, e_time = extract_text_from_pdf(pdf_file_name, {title: (page_start, page_end)}) | 2 | 2023-11-16 20:37:24+00:00 | 4k |
GoldenThrust/Virtual-Bank | api/transactions/views.py | [
{
"identifier": "Transaction",
"path": "api/transactions/models.py",
"snippet": "class Transaction(models.Model):\n TRANSACTION_TYPES = [\n ('DEPOSIT', 'Deposit'),\n ('TRANSFER', 'Transfer'),\n ('DEBIT_CARD', 'Debit Card'),\n ('PAYMENT', 'Payment'),\n ]\n\n account = models.ForeignKey(Account, on_delete=models.CASCADE, default=11)\n account_number = models.BigIntegerField()\n transaction_type = models.CharField(max_length=20, choices=TRANSACTION_TYPES, default=\"DEPOSIT\")\n amount = models.DecimalField(max_digits=15, decimal_places=2)\n identifier = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"Transaction ID: {self.pk} - Type: {self.get_transaction_type_display()} - Amount: {self.amount}\""
},
{
"identifier": "TransactionSerializer",
"path": "api/transactions/serializers.py",
"snippet": "class TransactionSerializer(serializers.ModelSerializer):\n date = serializers.DateTimeField(read_only=True)\n account = AccountSerializer(read_only=True)\n account_number = serializers.CharField(write_only=True)\n\n class Meta:\n model = Transaction\n fields = [\n \"id\",\n \"account\",\n \"account_number\",\n \"transaction_type\",\n \"amount\",\n \"identifier\",\n \"date\",\n ]\n\n extra_kwargs = {\n \"account\": {\"read_only\": True},\n }"
},
{
"identifier": "TransferTransactionSerializer",
"path": "api/transactions/serializers.py",
"snippet": "class TransferTransactionSerializer(serializers.ModelSerializer):\n date = serializers.DateTimeField(read_only=True)\n transaction_partner_account_number = serializers.CharField(write_only=True)\n account = AccountSerializer(read_only=True)\n account_number = serializers.CharField(write_only=True)\n\n class Meta:\n model = Transaction\n fields = [\n \"account\",\n \"account_number\",\n \"transaction_partner_account_number\",\n \"transaction_type\",\n \"amount\",\n \"identifier\",\n \"date\",\n ]\n\n extra_kwargs = {\n \"transaction_type\": {\"read_only\": True},\n }"
},
{
"identifier": "DebitCardPaymentSerializer",
"path": "api/transactions/serializers.py",
"snippet": "class DebitCardPaymentSerializer(serializers.ModelSerializer):\n account = AccountSerializer(read_only=True)\n cvv = serializers.CharField(write_only=True)\n card_number = serializers.CharField(write_only=True)\n expiry_date = serializers.CharField(write_only=True)\n cvv = serializers.CharField(write_only=True)\n\n\n class Meta:\n model = Transaction\n fields = [\n \"account\",\n \"account_number\",\n \"card_number\",\n \"transaction_type\",\n \"expiry_date\",\n \"cvv\",\n \"amount\",\n \"identifier\",\n \"date\",\n ]"
},
{
"identifier": "TransactionHistorySerializer",
"path": "api/transactions/serializers.py",
"snippet": "class TransactionHistorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Transaction\n fields = [\"transaction_type\"]\n\n # Custom representation method to handle serialization based on transaction type\n def to_representation(self, instance):\n from transfers.serializers import TransferSerializer\n from debit_cards.serializers import TransactionDebitCardSerializer\n from deposits.serializers import DepositSerializer\n\n # Fetching transaction type, current user, and URL name from context\n transaction_type = instance.transaction_type\n user = self.context['request'].user\n url_name = self.context['request'].resolver_match.url_name\n \n # Initializing serializer and related_instance variables\n serializer = None\n related_instance = None\n \n # Checking transaction type and assigning related instance accordingly\n if transaction_type == 'TRANSFER':\n related_instance = instance.transfer\n elif transaction_type == 'DEBIT_CARD':\n related_instance = instance.debit_card\n elif transaction_type == 'DEPOSIT':\n related_instance = instance.deposit\n \n if related_instance:\n # Checking permissions based on transaction type and user access\n if transaction_type == 'DEPOSIT' or (related_instance.transaction.account.user == user or url_name == 'transactions_detail'):\n # Initializing the appropriate serializer based on transaction type\n if transaction_type == 'TRANSFER':\n serializer = TransferSerializer(instance=related_instance, context={'request': self.context['request']})\n elif transaction_type == 'DEBIT_CARD':\n serializer = TransactionDebitCardSerializer(instance=related_instance, context={'request': self.context['request']})\n elif transaction_type == 'DEPOSIT':\n serializer = DepositSerializer(instance=related_instance, context={'request': self.context['request']})\n\n # Returning serialized data if serializer exists and has a 'data' attribute, otherwise None\n return serializer.data if (serializer and hasattr(serializer, 'data')) else serializer"
}
] | from rest_framework import generics
from datetime import datetime
from notifications.utils import process_notifications
from django.utils.timezone import localtime
from debit_cards.utils import luhn_checksum
from .models import Transaction
from accounts.models import Account
from deposits.models import Deposit
from transfers.models import Transfer
from .serializers import (
TransactionSerializer,
TransferTransactionSerializer,
DebitCardPaymentSerializer,
TransactionHistorySerializer,
)
from debit_cards.models import DebitCardTransaction, DebitCard
from rest_framework import permissions, status
from rest_framework.response import Response
from rest_framework import exceptions | 3,237 | expiry_date = serializer.validated_data.pop("expiry_date")
transaction_amount = serializer.validated_data.get("amount")
# Validation of expiry date
try:
month, year = expiry_date.split("/")
month = int(month)
year = int(year)
current_year = int(str(datetime.utcnow().year)[2:])
current_month = datetime.utcnow().month
if not (1 <= month <= 12):
raise DateError("Invalid month")
elif year < current_year or (
year == current_year and month < current_month
):
raise DateError("Card has expired")
elif not (year <= 99):
raise DateError("Invalid year")
except DateError as e:
raise exceptions.PermissionDenied(str(e))
except ValueError:
raise exceptions.PermissionDenied("Invalid expiry date")
# validate card number using luhn algorithm
if luhn_checksum(card_number) != 0:
raise exceptions.PermissionDenied("Invalid card number")
card = DebitCard.objects.filter(
card_number=card_number,
cvv=cvv,
expiration_date__year=f"20{year}",
expiration_date__month=month,
).first()
if not card:
print(card)
raise exceptions.PermissionDenied("Invalid card")
if int(account_number) == int(card.account.number):
#notification
notification_message = "The debit card transaction could not be completed."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
raise exceptions.PermissionDenied(
"Sender and transaction partner accounts cannot be the same"
)
self.transaction_partner_account_number = card.account.number
if card.account.balance >= transaction_amount:
transaction_partner_account_name = f'{card.account.user.first_name} {card.acount.user.last_name}'
serializer.save(transaction_type="DEBIT_CARD", account=account)
# Update Account Balances
card.account.balance -= transaction_amount
account.balance += transaction_amount
card.account.save()
account.save()
# Create Transfer
transfer = DebitCardTransaction.objects.create(
transaction=serializer.instance,
transaction_partner_account=card.account,
)
# notification
notification_message = f"You've successfully initiated a debit card transaction. {transaction_amount} was debited from your account and sent to {user_name}'s account."
process_notifications(
card.account.user, "transaction_notification", notification_message
)
# notification
notification_message = f"You've received {transaction_amount} from {transaction_partner_account_name} through a debit card transaction."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
else:
# notification
notification_message = f"The debit card transaction from {user_name} could not be completed due to insufficient funds in their account."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
# notification
notification_message = "Your debit card transaction couldn't be completed due to insufficient funds."
process_notifications(
card.account.user, "transaction_notification", notification_message
)
raise exceptions.PermissionDenied("Insufficient funds")
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
serialized_data = serializer.data
user = Account.objects.get(number=self.transaction_partner_account_number).user
serialized_data[
"transaction_partner_name"
] = f"{user.first_name} {user.last_name}"
serialized_data["transaction_partner_account_number"] = int(
self.transaction_partner_account_number
)
headers = self.get_success_headers(serializer.data)
return Response(
serialized_data, status=status.HTTP_201_CREATED, headers=headers
)
class TransactionHistory(generics.ListAPIView):
queryset = Transaction.objects.all()
|
# Models and Serializers
# from payments.models import Payment
class DateError(Exception):
pass
class TransactionList(generics.ListCreateAPIView):
queryset = Transaction.objects.all()
serializer_class = TransactionSerializer
permission_classes = [permissions.IsAdminUser]
class TransactionDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Transaction.objects.all()
serializer_class = TransactionSerializer
permission_classes = [permissions.IsAdminUser]
class TransactionDepositCreate(generics.CreateAPIView):
queryset = Transaction.objects.all()
serializer_class = TransactionSerializer
permission_classes = [permissions.IsAuthenticated]
def perform_create(self, serializer):
account_number = serializer.validated_data.get("account_number")
account = Account.objects.filter(number=account_number).first()
if not account:
raise exceptions.NotFound("Account not found")
if account.user != self.request.user:
raise exceptions.PermissionDenied("Account does not belong to this user")
serializer.save(transaction_type="DEPOSIT", account=account)
# Update Account Balance
transaction_amount = serializer.validated_data.get("amount")
account.balance += transaction_amount
account.save()
# Create Deposit
deposit = Deposit.objects.create(transaction=serializer.instance)
# notification
notification_message = f"A deposit of {transaction_amount} has been credited to your account ({account_number})."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
class TransactionTransferCreate(generics.CreateAPIView):
queryset = Transaction.objects.all()
serializer_class = TransferTransactionSerializer
permission_classes = [permissions.IsAuthenticated]
transaction_partner_account_number = None
def perform_create(self, serializer):
account_number = serializer.validated_data.get("account_number")
account = Account.objects.filter(number=account_number).first()
user = self.request.user
user_name = f"{user.first_name} {user.last_name}"
if not account:
raise exceptions.NotFound("Account not found")
if account.user != user:
# account.user.is_active = False
# account.user.save()
# notification
notification_message = f"{user_name} attempted a transfer using your account ({account.number}). For security purposes, the action has been flagged."
process_notifications(account.user, "security_notification", notification_message)
raise exceptions.PermissionDenied("Account does not belong to this user")
transaction_amount = serializer.validated_data.get("amount")
self.transaction_partner_account_number = serializer.validated_data.pop(
"transaction_partner_account_number"
)
if int(account_number) == int(self.transaction_partner_account_number):
# notification
notification_message = "The transfer could not be completed."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
raise exceptions.PermissionDenied(
"Sender and transaction partner accounts cannot be identical."
)
transaction_partner_account = Account.objects.filter(
number=self.transaction_partner_account_number
).first()
if not transaction_partner_account:
# notification
notification_message = "The transfer could not be completed due to an invalid transaction partner account number."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
raise exceptions.NotFound("Transaction partner Account not found")
if account.balance >= transaction_amount:
transaction_partner_account_name = f'{transaction_partner_account.user.first_name} {transaction_partner_account.user.last_name}'
serializer.save(transaction_type="TRANSFER", account=account)
# Update Account Balances
account.balance -= transaction_amount
transaction_partner_account.balance += transaction_amount
account.save()
transaction_partner_account.save()
# Create Transfer
transfer = Transfer.objects.create(
transaction=serializer.instance,
transaction_partner_account=transaction_partner_account,
)
# notification
notification_message = f"The transfer of {transaction_amount} to {transaction_partner_account_name}'s account was successful."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
# notification
notification_message = f"{user_name} has sent {transaction_amount} to your account ({transaction_partner_account.number})."
process_notifications(
transaction_partner_account.user, "transaction_notification", notification_message
)
else:
# notification
notification_message = "The transfer could not be completed due to insufficient funds."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
raise exceptions.PermissionDenied("Insufficient funds")
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
serialized_data = serializer.data
user = Account.objects.get(number=self.transaction_partner_account_number).user
serialized_data[
"transaction_partner_name"
] = f"{user.first_name} {user.last_name}"
serialized_data["transaction_partner_account_number"] = int(
self.transaction_partner_account_number
)
headers = self.get_success_headers(serializer.data)
return Response(
serialized_data, status=status.HTTP_201_CREATED, headers=headers
)
class TransactionDebitCardCreate(generics.CreateAPIView):
queryset = Transaction.objects.all()
serializer_class = DebitCardPaymentSerializer
permission_classes = [permissions.IsAuthenticated]
card_owner = None
transaction_partner_account_number = None
def perform_create(self, serializer):
account_number = serializer.validated_data.get("account_number")
account = Account.objects.filter(number=account_number).first()
user = self.request.user
user_name = f"{user.first_name} {user.last_name}"
if not account:
raise exceptions.NotFound("Account not found")
if account.user != self.request.user:
raise exceptions.PermissionDenied("Account does not belong to this user")
transaction_amount = serializer.validated_data.get("amount")
card_number = serializer.validated_data.pop("card_number")
cvv = serializer.validated_data.pop("cvv")
expiry_date = serializer.validated_data.pop("expiry_date")
transaction_amount = serializer.validated_data.get("amount")
# Validation of expiry date
try:
month, year = expiry_date.split("/")
month = int(month)
year = int(year)
current_year = int(str(datetime.utcnow().year)[2:])
current_month = datetime.utcnow().month
if not (1 <= month <= 12):
raise DateError("Invalid month")
elif year < current_year or (
year == current_year and month < current_month
):
raise DateError("Card has expired")
elif not (year <= 99):
raise DateError("Invalid year")
except DateError as e:
raise exceptions.PermissionDenied(str(e))
except ValueError:
raise exceptions.PermissionDenied("Invalid expiry date")
# validate card number using luhn algorithm
if luhn_checksum(card_number) != 0:
raise exceptions.PermissionDenied("Invalid card number")
card = DebitCard.objects.filter(
card_number=card_number,
cvv=cvv,
expiration_date__year=f"20{year}",
expiration_date__month=month,
).first()
if not card:
print(card)
raise exceptions.PermissionDenied("Invalid card")
if int(account_number) == int(card.account.number):
#notification
notification_message = "The debit card transaction could not be completed."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
raise exceptions.PermissionDenied(
"Sender and transaction partner accounts cannot be the same"
)
self.transaction_partner_account_number = card.account.number
if card.account.balance >= transaction_amount:
transaction_partner_account_name = f'{card.account.user.first_name} {card.acount.user.last_name}'
serializer.save(transaction_type="DEBIT_CARD", account=account)
# Update Account Balances
card.account.balance -= transaction_amount
account.balance += transaction_amount
card.account.save()
account.save()
# Create Transfer
transfer = DebitCardTransaction.objects.create(
transaction=serializer.instance,
transaction_partner_account=card.account,
)
# notification
notification_message = f"You've successfully initiated a debit card transaction. {transaction_amount} was debited from your account and sent to {user_name}'s account."
process_notifications(
card.account.user, "transaction_notification", notification_message
)
# notification
notification_message = f"You've received {transaction_amount} from {transaction_partner_account_name} through a debit card transaction."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
else:
# notification
notification_message = f"The debit card transaction from {user_name} could not be completed due to insufficient funds in their account."
process_notifications(
self.request.user, "transaction_notification", notification_message
)
# notification
notification_message = "Your debit card transaction couldn't be completed due to insufficient funds."
process_notifications(
card.account.user, "transaction_notification", notification_message
)
raise exceptions.PermissionDenied("Insufficient funds")
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
serialized_data = serializer.data
user = Account.objects.get(number=self.transaction_partner_account_number).user
serialized_data[
"transaction_partner_name"
] = f"{user.first_name} {user.last_name}"
serialized_data["transaction_partner_account_number"] = int(
self.transaction_partner_account_number
)
headers = self.get_success_headers(serializer.data)
return Response(
serialized_data, status=status.HTTP_201_CREATED, headers=headers
)
class TransactionHistory(generics.ListAPIView):
queryset = Transaction.objects.all() | serializer_class = TransactionHistorySerializer | 4 | 2023-11-10 12:39:38+00:00 | 4k |
Mj23978/OpenServer | openserver/server/chat/completions.py | [
{
"identifier": "logger",
"path": "openserver/core/utils/utils.py",
"snippet": "def trim_string(string, count: int) -> str:\ndef run_with_time(func):"
},
{
"identifier": "extract_json_from_string",
"path": "openserver/core/utils/json.py",
"snippet": "def extract_json_from_string(text: str):\n json_str = parse_json_markdown(text, parser=parse_partial_json)\n if json_str is None:\n json_str = extract_json(text, parser=parse_partial_json)\n if isinstance(json_str, list) and len(json_str) > 0:\n json_str = json_str[0]\n return json_str"
},
{
"identifier": "base_messages_to_default",
"path": "openserver/core/utils/langchain.py",
"snippet": "def base_messages_to_default(messages: List[BaseMessage]):\n new_messages = []\n for message in messages:\n if message.type.lower() in [\"system\"]:\n new_messages.append(SystemMessage(content=message.content))\n elif message.type.lower() in [\"human\", \"user\"]:\n new_messages.append(HumanMessage(content=message.content))\n elif message.type.lower() in [\"assisstant\", \"ai\", \"chat\"]:\n new_messages.append(AIMessage(content=message.content))\n else:\n new_messages.append(message)\n return new_messages "
},
{
"identifier": "ChatConfig",
"path": "openserver/core/config/chat_config.py",
"snippet": "class ChatConfig(BaseConfig):\n\n def __init__(self, with_envs: bool = False):\n configs = init_config(\"configs/chat_config.yaml\", with_envs)\n self.configs = configs\n self.chat_providers: ConfigProvidersIn = self.parse(configs.get_configs().get(\n \"chat_providers\") or {})\n\n def get_chat_providers(self, model: str, functions: bool):\n providers = self.functions_available(functions)\n providers = self.chat_providers.find_model(providers, model)\n provider = self.chat_providers.choose_model(providers, model)\n return provider\n\n def functions_available(self, functions: bool):\n if functions is True:\n return list(filter(lambda x: x.args.get(\"functions\") is True, self.chat_providers.providers))\n return self.chat_providers.providers"
},
{
"identifier": "PromptConfig",
"path": "openserver/core/config/prompt_config.py",
"snippet": "class PromptConfig:\n\n def __init__(self):\n configs = init_config(\"configs/prompts_config.yaml\")\n self.configs = configs\n self.prompts: Dict[str, Any] = configs.get_configs().get(\n \"prompts\") # type: ignore\n\n def prompt_template(self, type=\"function_call\", **kwargs):\n prompt_template = \"\"\n if self.prompts.get(type) == None:\n raise ValueError(f\"There is no prompt template name : {type}\")\n if self.prompts[type].get(\"content\") == None:\n self.add_prompt_to_config()\n prompt_template = self.prompts[type][\"content\"]\n return prompt_template\n\n def add_prompt_to_config(self):\n ROOT_DIR: str = os.path.dirname(Path(__file__).parent.parent.parent)\n for prompt in self.prompts:\n prompt_path = self.prompts[prompt][\"file\"]\n prompt_file = ROOT_DIR + \"/\" + prompt_path\n\n if os.path.exists(prompt_file):\n with open(prompt_file, \"r\") as file:\n self.prompts[prompt][\"content\"] = file.read()\n\n def extract_text(self, text: str, **kwargs):\n pattern = r'<<(.*?)>>(.*?)<</\\1>>'\n matches = re.findall(pattern, text, re.DOTALL)\n messages = [BaseMessage(type=tag, content=content.format(\n **kwargs)) for tag, content in matches]\n return base_messages_to_default(messages)"
},
{
"identifier": "LLmInputInterface",
"path": "openserver/core/llm_models/base.py",
"snippet": "class LLmInputInterface:\n def __init__(self, model: str, api_key: str | None = None, stop: List[str] = [\"### Humen:\", \"### Instruction:\", \"### Assistant:\", \"\\nQuestion:\"], max_tokens=4196, repeat_penalty=0.2,\n responses: List[str] | None = None, top_k=30, top_p=0.95, streaming: bool = False, temperature=0.2, cache=True, verbose=True, max_retries=10, n_ctx: int = 2048, f16_kv=True, \n n_gpu_layers: int = 50, n_threads=4, metadata: Dict[str, Any] | None = None, callbacks: Callbacks | None = None, grammer: str | LlamaGrammar | None = None, \n grammer_path: str | Path | None = None, model_kwargs={}, base_url: str | None = None):\n self.api_key: str | None = api_key\n self.model_name: str = model\n self.model_kwargs: Dict[str, Any] = model_kwargs\n self.stop: List[str] = stop\n self.max_tokens: int = max_tokens\n self.repeat_penalty: float = repeat_penalty\n self.top_k: int = top_k\n self.top_p: float = top_p\n self.temperature: float = temperature\n self.cache: bool = cache\n self.verbose: bool = verbose\n self.max_retries: int = max_retries\n self.responses: List[str] | None = responses\n self.stream: bool = streaming\n self.n_ctx: int = n_ctx\n self.f16_kv: bool = f16_kv\n self.n_gpu_layers: int = n_gpu_layers\n self.n_threads: int = n_threads\n self.grammer: str | LlamaGrammar | None = grammer\n self.grammer_path: str | Path | None = grammer_path\n self.callbacks: Callbacks = callbacks\n self.metadata = metadata\n self.base_url = base_url"
},
{
"identifier": "LLMFactory",
"path": "openserver/core/llm_models/llm_model_factory.py",
"snippet": "class LLMFactory:\n\n @classmethod\n def get_model(cls, input: LLmInputInterface, provider_name: LLMType | str = 'free'):\n if isinstance(provider_name, str):\n provider_name = LLMType.get_type(provider_name.lower())\n\n if provider_name == LLMType.OPENAI:\n return OpenAIModel(input)\n\n elif provider_name == LLMType.COHERE:\n return CohereModel(input)\n\n elif provider_name == LLMType.LLAMACPP:\n return LlamaCppModel(input)\n\n elif provider_name == LLMType.FAKE:\n return FakeModel(input)\n\n elif provider_name == LLMType.AI21:\n return AI21Model(input)\n\n elif provider_name == LLMType.FIREWORKS:\n return FireworksModel(input)\n\n elif provider_name == LLMType.PALM:\n return GooglePalmModel(input)\n\n elif provider_name == LLMType.TOGETHER:\n return TogetherModel(input)\n\n else:\n return G4FModel(input)\n\n @classmethod\n def get_chat_model(cls, input: LLmInputInterface, provider_name: LLMType | str = 'free'):\n if isinstance(provider_name, str):\n provider_name = LLMType.get_type(provider_name.lower())\n\n if provider_name == LLMType.OPENAI:\n return ChatOpenAIModel(input)\n\n elif provider_name == LLMType.PALM:\n return ChatGooglePalmModel(input)\n\n elif provider_name == LLMType.COHERE:\n return ChatCohereModel(input)\n\n elif provider_name == LLMType.FAKE:\n return FakeChatModel(input)\n\n elif provider_name == LLMType.FIREWORKS:\n return ChatFireworksModel(input)\n\n elif provider_name == LLMType.TOGETHER:\n # else:\n return ChatTogetherModel(input)\n\n else:\n return ChatG4FModel(input)"
},
{
"identifier": "completion_price_calculator",
"path": "openserver/core/utils/cost.py",
"snippet": "def completion_price_calculator(cost_prompt: float, cost_completion: float, prompt_token: int, completion_token: int):\n cost_inp = cost_prompt / 1000000 * prompt_token\n cost_out = cost_completion / 1000000 * completion_token\n return round(cost_inp + cost_out, 7)"
},
{
"identifier": "app",
"path": "openserver/server/app.py",
"snippet": ""
},
{
"identifier": "llm_result_to_str",
"path": "openserver/server/utils.py",
"snippet": "def llm_result_to_str(response: LLMResult):\n final_result = \"\"\n\n for generation in response.generations:\n for item in generation:\n final_result += item.text\n return final_result"
},
{
"identifier": "num_tokens_from_string",
"path": "openserver/server/utils.py",
"snippet": "def num_tokens_from_string(string: str, encoding_name: str = \"cl100k_base\") -> int:\n \"\"\"Returns the number of tokens in a text string.\"\"\"\n encoding = tiktoken.get_encoding(encoding_name)\n num_tokens = len(encoding.encode(string))\n return num_tokens"
}
] | import json
import os
import random
import string
import time
from pathlib import Path
from typing import Any, Dict, List
from flask import Request, jsonify, request
from langchain.schema import BaseMessage
from openserver.core.utils import extract_json_from_string, base_messages_to_default, logger
from openserver.core.config import ChatConfig, PromptConfig
from openserver.core.llm_models.base import LLmInputInterface
from openserver.core.llm_models.llm_model_factory import LLMFactory
from openserver.core.utils.cost import completion_price_calculator
from openserver.server.app import app
from openserver.server.utils import llm_result_to_str, num_tokens_from_string | 3,038 |
class ChatCompletionsRequest:
def __init__(self, request: Request):
try:
self.model: str = request.get_json().get("model", "gpt-3.5-turbo")
self.stream: bool = request.get_json().get("stream", False)
self.api_key: str | None = request.get_json().get("api_key") or (request.authorization.token if request.authorization is not None else None)
self.messages: List[Dict[str, Any]
] = request.get_json().get("messages")
self.functions = request.get_json().get("functions")
self.n_gpu_layers: int = request.get_json().get("n_gpu_layers", 99)
self.temperature: float = request.get_json().get("temperature", 0.4)
self.max_tokens: int = request.get_json().get("max_tokens", 1000)
self.top_p: int = request.get_json().get("top_p", 1)
self.cache: bool = request.get_json().get("cache", False)
self.n_ctx: int = request.get_json().get("n_ctx", 8196)
except Exception as e:
return jsonify({'reason': "request data error", 'error': str(e)}), 500
@app.route("/chat/completions", methods=["POST"])
def chat_completions():
try:
request_data = ChatCompletionsRequest(request)
available_functions = False
if "functions" in request.get_json():
available_functions = True
configs = ChatConfig(with_envs=True)
provider = configs.get_chat_providers(
request_data.model, available_functions)
logger.info(provider)
chat_input = LLmInputInterface(
api_key=request_data.api_key or provider.args.get("api_key_name"),
model=provider.key or provider.name,
model_kwargs={
"chat_format": "mistral",
},
streaming=request_data.stream,
n_gpu_layers=request_data.n_gpu_layers,
temperature=request_data.temperature,
max_tokens=request_data.max_tokens,
top_p=request_data.top_p,
cache=request_data.cache,
n_ctx=request_data.n_ctx,
base_url=provider.args.get("base_url")
)
messages = [BaseMessage(
type=message["role"], content=message["content"]) for message in request_data.messages]
messages = base_messages_to_default(messages)
if available_functions is True:
configs = PromptConfig()
new_messages = configs.extract_text(configs.prompt_template(
), prompt=messages[-1].content, functions=request_data.functions)
messages.pop()
messages = messages + new_messages
ROOT_DIR: str = os.path.dirname(Path(__file__).parent.parent.parent)
chat_input.grammer_path = ROOT_DIR + "/docs/json.gbnf"
chat_input.f16_kv = True
chatProvider = LLMFactory.get_chat_model(
input=chat_input, provider_name=provider.provider)
response = chatProvider.compelete(
prompts=[messages])
response_str = llm_result_to_str(response)
completion_id = "".join(random.choices(
string.ascii_letters + string.digits, k=28))
completion_timestamp = int(time.time())
if not request_data.stream:
inp_token = num_tokens_from_string(
"".join([message.content for message in messages]))
out_token = num_tokens_from_string(response_str)
function_out = None
if available_functions is True:
function_out = extract_json_from_string(response_str)
res = {
"id": f"chatcmpl-{completion_id}",
"object": "chat.completion",
"created": completion_timestamp,
"model": provider.name,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": response_str,
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": inp_token,
"completion_tokens": out_token,
"total_tokens": inp_token + out_token,
|
class ChatCompletionsRequest:
def __init__(self, request: Request):
try:
self.model: str = request.get_json().get("model", "gpt-3.5-turbo")
self.stream: bool = request.get_json().get("stream", False)
self.api_key: str | None = request.get_json().get("api_key") or (request.authorization.token if request.authorization is not None else None)
self.messages: List[Dict[str, Any]
] = request.get_json().get("messages")
self.functions = request.get_json().get("functions")
self.n_gpu_layers: int = request.get_json().get("n_gpu_layers", 99)
self.temperature: float = request.get_json().get("temperature", 0.4)
self.max_tokens: int = request.get_json().get("max_tokens", 1000)
self.top_p: int = request.get_json().get("top_p", 1)
self.cache: bool = request.get_json().get("cache", False)
self.n_ctx: int = request.get_json().get("n_ctx", 8196)
except Exception as e:
return jsonify({'reason': "request data error", 'error': str(e)}), 500
@app.route("/chat/completions", methods=["POST"])
def chat_completions():
try:
request_data = ChatCompletionsRequest(request)
available_functions = False
if "functions" in request.get_json():
available_functions = True
configs = ChatConfig(with_envs=True)
provider = configs.get_chat_providers(
request_data.model, available_functions)
logger.info(provider)
chat_input = LLmInputInterface(
api_key=request_data.api_key or provider.args.get("api_key_name"),
model=provider.key or provider.name,
model_kwargs={
"chat_format": "mistral",
},
streaming=request_data.stream,
n_gpu_layers=request_data.n_gpu_layers,
temperature=request_data.temperature,
max_tokens=request_data.max_tokens,
top_p=request_data.top_p,
cache=request_data.cache,
n_ctx=request_data.n_ctx,
base_url=provider.args.get("base_url")
)
messages = [BaseMessage(
type=message["role"], content=message["content"]) for message in request_data.messages]
messages = base_messages_to_default(messages)
if available_functions is True:
configs = PromptConfig()
new_messages = configs.extract_text(configs.prompt_template(
), prompt=messages[-1].content, functions=request_data.functions)
messages.pop()
messages = messages + new_messages
ROOT_DIR: str = os.path.dirname(Path(__file__).parent.parent.parent)
chat_input.grammer_path = ROOT_DIR + "/docs/json.gbnf"
chat_input.f16_kv = True
chatProvider = LLMFactory.get_chat_model(
input=chat_input, provider_name=provider.provider)
response = chatProvider.compelete(
prompts=[messages])
response_str = llm_result_to_str(response)
completion_id = "".join(random.choices(
string.ascii_letters + string.digits, k=28))
completion_timestamp = int(time.time())
if not request_data.stream:
inp_token = num_tokens_from_string(
"".join([message.content for message in messages]))
out_token = num_tokens_from_string(response_str)
function_out = None
if available_functions is True:
function_out = extract_json_from_string(response_str)
res = {
"id": f"chatcmpl-{completion_id}",
"object": "chat.completion",
"created": completion_timestamp,
"model": provider.name,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": response_str,
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": inp_token,
"completion_tokens": out_token,
"total_tokens": inp_token + out_token, | "cost": "{:.6f}".format(completion_price_calculator(provider.cost.input, provider.cost.output, inp_token, out_token)) | 7 | 2023-11-11 00:32:31+00:00 | 4k |
AI-sandbox/HyperFast | hyperfast/hyperfast.py | [
{
"identifier": "config",
"path": "hyperfast/config.py",
"snippet": ""
},
{
"identifier": "seed_everything",
"path": "hyperfast/utils.py",
"snippet": "def seed_everything(seed: int):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.set_num_threads(1)"
},
{
"identifier": "transform_data_for_main_network",
"path": "hyperfast/utils.py",
"snippet": "def transform_data_for_main_network(X, cfg, rf, pca):\n with torch.no_grad():\n X = rf(X)\n if cfg.torch_pca:\n X = pca.transform(X)\n else:\n X = torch.from_numpy(pca.transform(X.cpu().numpy())).to(cfg.device)\n X = torch.clamp(X, -cfg.clip_data_value, cfg.clip_data_value)\n return X"
},
{
"identifier": "forward_main_network",
"path": "hyperfast/utils.py",
"snippet": "def forward_main_network(x, main_network):\n for n, layer in enumerate(main_network):\n if n % 2 == 0:\n residual_connection = x\n matrix, bias = layer\n x = torch.mm(x, matrix) + bias\n if n % 2 == 1 and n != len(main_network) - 1:\n x = x + residual_connection\n\n if n != len(main_network) - 1:\n x = F.relu(x)\n if n == len(main_network) - 2:\n intermediate_activations = x\n return x, intermediate_activations"
},
{
"identifier": "nn_bias_logits",
"path": "hyperfast/utils.py",
"snippet": "def nn_bias_logits(\n test_logits, test_samples, train_samples, train_labels, bias_param, n_classes\n):\n with torch.no_grad():\n nn = NN(train_samples, train_labels)\n preds = nn.predict(test_samples)\n preds_onehot = F.one_hot(preds, n_classes)\n test_logits[preds_onehot.bool()] += bias_param\n return test_logits"
},
{
"identifier": "fine_tune_main_network",
"path": "hyperfast/utils.py",
"snippet": "def fine_tune_main_network(\n cfg,\n X,\n y,\n n_classes,\n rf,\n pca,\n main_network_layers,\n nn_bias,\n device,\n optimize_steps,\n batch_size,\n):\n main_model = MainNetworkTrainable(\n cfg, n_classes, rf, pca, main_network_layers, nn_bias\n ).to(device)\n dataset = TensorDataset(X, y)\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.AdamW(main_model.parameters(), lr=cfg.lr)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode=\"min\", factor=0.1, patience=10, verbose=True\n )\n\n for step in range(optimize_steps):\n for inputs, targets in dataloader:\n optimizer.zero_grad()\n outputs = main_model(inputs, targets)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n # print(f\"Step: [{step+1}/{optimize_steps}], Loss: {loss.item()}\")\n\n if scheduler is not None:\n if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):\n scheduler.step(loss.item())\n else:\n scheduler.step()\n return main_model.get_main_network_parts()"
},
{
"identifier": "HyperFast",
"path": "hyperfast/model.py",
"snippet": "class HyperFast(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.n_dims = cfg.n_dims\n self.max_categories = cfg.max_categories\n self.rf_size = cfg.rf_size\n self.torch_pca = cfg.torch_pca\n self.clip_data_value = cfg.clip_data_value\n self.hn_n_layers = cfg.hn_n_layers\n self.hn_hidden_size = cfg.hn_hidden_size\n self.main_n_layers = cfg.main_n_layers\n\n middle_layers = []\n for n in range(self.hn_n_layers - 2):\n middle_layers.append(nn.Linear(self.hn_hidden_size, self.hn_hidden_size))\n middle_layers.append(nn.ReLU())\n self.num_input_features_hn = self.n_dims + self.max_categories\n\n self.hypernetworks = nn.ModuleList()\n self.hn_emb_to_weights = nn.ModuleList()\n\n for n in range(self.main_n_layers - 1):\n if n > 0:\n self.num_input_features_hn = self.n_dims * 2 + self.max_categories\n num_input_features_hn = self.num_input_features_hn + self.n_dims * 2\n\n hn_layers = []\n hn_layers.append(nn.Linear(num_input_features_hn, self.hn_hidden_size))\n hn_layers.append(nn.ReLU())\n hn_layers = hn_layers + middle_layers\n\n self.hypernetworks.append(nn.Sequential(*hn_layers))\n self.output_size_hn = (self.n_dims + 1) * self.n_dims\n self.hn_emb_to_weights.append(\n nn.Linear(self.hn_hidden_size, self.output_size_hn)\n )\n\n hn_layers = []\n last_hn_output_size = self.n_dims + 1\n self.num_input_features_hn += self.n_dims * 2\n\n hn_layers.append(nn.Linear(self.num_input_features_hn, self.hn_hidden_size))\n hn_layers.append(nn.ReLU())\n hn_layers = hn_layers + middle_layers\n hn_layers.append(nn.Linear(self.hn_hidden_size, last_hn_output_size))\n self.hypernetworks.append(nn.Sequential(*hn_layers))\n self.nn_bias = nn.Parameter(torch.ones(2))\n\n def forward(self, X, y, n_classes):\n X = X.flatten(start_dim=1)\n rf_linear = nn.Linear(X.shape[1], self.rf_size, bias=False)\n nn.init.kaiming_normal_(rf_linear.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n rf_linear.weight.requires_grad = False\n rf = nn.Sequential(rf_linear, nn.ReLU()).to(X.device)\n with torch.no_grad():\n X = rf(X)\n if self.torch_pca:\n self.pca = TorchPCA(n_components=self.n_dims)\n else:\n self.pca = PCA(n_components=self.n_dims)\n if self.torch_pca:\n X = self.pca.fit_transform(X)\n else:\n X = torch.from_numpy(self.pca.fit_transform(X.cpu().numpy())).to(X.device)\n X = torch.clamp(X, -self.clip_data_value, self.clip_data_value)\n\n out = X\n pca_global_mean = torch.mean(out, axis=0)\n pca_perclass_mean = []\n for lab in range(n_classes):\n if torch.sum((y == lab)) > 0:\n class_mean = torch.mean(out[y == lab], dim=0, keepdim=True)\n else:\n class_mean = torch.mean(out, dim=0, keepdim=True)\n pca_perclass_mean.append(class_mean)\n pca_perclass_mean = torch.cat(pca_perclass_mean)\n\n pca_concat = []\n for ii, lab in enumerate(y):\n if pca_perclass_mean.ndim == 1:\n pca_perclass_mean = pca_perclass_mean.unsqueeze(0)\n if out.ndim == 1:\n out = out.unsqueeze(0)\n\n lab_index = lab.item() if torch.is_tensor(lab) else lab\n lab_index = min(lab_index, pca_perclass_mean.size(0) - 1)\n\n row = torch.cat((out[ii], pca_global_mean, pca_perclass_mean[lab_index]))\n pca_concat.append(row)\n pca_output = torch.vstack(pca_concat)\n y_onehot = F.one_hot(y, self.max_categories)\n\n main_network = []\n for n in range(self.main_n_layers - 1):\n if n > 0:\n data = torch.cat((out, pca_output, y_onehot), dim=1)\n else:\n data = torch.cat((pca_output, y_onehot), dim=1)\n if n % 2 == 0:\n residual_connection = out\n\n weights = get_main_weights(\n data, self.hypernetworks[n], self.hn_emb_to_weights[n]\n )\n out, main_linear_layer = forward_linear_layer(out, weights, self.n_dims)\n if n % 2 == 0:\n out = F.relu(out)\n else:\n out = out + residual_connection\n out = F.relu(out)\n main_network.append(main_linear_layer)\n data = torch.cat((out, pca_output, y_onehot), dim=1)\n weights_per_sample = get_main_weights(data, self.hypernetworks[-1])\n\n weights = []\n last_input_mean = []\n for lab in range(n_classes):\n if torch.sum((y == lab)) > 0:\n w = torch.mean(weights_per_sample[y == lab], dim=0, keepdim=True)\n input_mean = torch.mean(out[y == lab], dim=0, keepdim=True)\n else:\n w = torch.mean(weights_per_sample, dim=0, keepdim=True)\n input_mean = torch.mean(out, dim=0, keepdim=True)\n weights.append(w)\n last_input_mean.append(input_mean)\n weights = torch.cat(weights)\n last_input_mean = torch.cat(last_input_mean)\n weights[:, :-1] = weights[:, :-1] + last_input_mean\n weights = weights.T\n out, last_linear_layer = forward_linear_layer(out, weights, n_classes)\n main_network.append(last_linear_layer)\n\n return rf, self.pca, main_network"
}
] | import os
import math
import torch
import requests
import numpy as np
import pandas as pd
import torch.nn.functional as F
from torch import Tensor
from types import SimpleNamespace
from .config import config
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from .utils import (
seed_everything,
transform_data_for_main_network,
forward_main_network,
nn_bias_logits,
fine_tune_main_network,
)
from .model import HyperFast | 2,694 |
class HyperFastClassifier(BaseEstimator):
"""
A scikit-learn-like interface for the HyperFast model.
Attributes:
device (str): Device to run the model on.
n_ensemble (int): Number of ensemble models to use.
batch_size (int): Size of the batch for weight prediction and ensembling.
nn_bias (bool): Whether to use nearest neighbor bias.
optimization (str): Strategy for optimization, can be None, 'optimize', or 'ensemble_optimize'.
optimize_steps (int): Number of optimization steps.
torch_pca (bool): Whether to use PyTorch-based PCA optimized for GPU (fast) or scikit-learn PCA (slower).
seed (int): Random seed for reproducibility.
"""
def __init__(
self,
device="cuda:0",
n_ensemble=16,
batch_size=2048,
nn_bias=False,
optimization="ensemble_optimize",
optimize_steps=64,
torch_pca=True,
seed=3,
):
self.device = device
self.n_ensemble = n_ensemble
self.batch_size = batch_size
self.nn_bias = nn_bias
self.optimization = optimization
self.optimize_steps = optimize_steps
self.torch_pca = torch_pca
self.seed = seed
seed_everything(self.seed)
|
class HyperFastClassifier(BaseEstimator):
"""
A scikit-learn-like interface for the HyperFast model.
Attributes:
device (str): Device to run the model on.
n_ensemble (int): Number of ensemble models to use.
batch_size (int): Size of the batch for weight prediction and ensembling.
nn_bias (bool): Whether to use nearest neighbor bias.
optimization (str): Strategy for optimization, can be None, 'optimize', or 'ensemble_optimize'.
optimize_steps (int): Number of optimization steps.
torch_pca (bool): Whether to use PyTorch-based PCA optimized for GPU (fast) or scikit-learn PCA (slower).
seed (int): Random seed for reproducibility.
"""
def __init__(
self,
device="cuda:0",
n_ensemble=16,
batch_size=2048,
nn_bias=False,
optimization="ensemble_optimize",
optimize_steps=64,
torch_pca=True,
seed=3,
):
self.device = device
self.n_ensemble = n_ensemble
self.batch_size = batch_size
self.nn_bias = nn_bias
self.optimization = optimization
self.optimize_steps = optimize_steps
self.torch_pca = torch_pca
self.seed = seed
seed_everything(self.seed) | self._cfg = self._load_config(config, self.device, self.torch_pca, self.nn_bias) | 0 | 2023-11-14 05:56:47+00:00 | 4k |
TCLResearchEurope/torch-dag | node_api_conversion/convert_cell_to_dag_module.py | [
{
"identifier": "from_nd_converter",
"path": "node_api_conversion/from_nd_converter.py",
"snippet": "def adjust_padding(padding, kernel_size):\ndef convert_node(node: nd.nodes, inst: nd.nodes.NodeInstance = None) -> torch.nn.Module:\ndef _(node: nd.cells.Cell, inst: nd.nodes.NodeInstance = None) -> torch.nn.Module:\ndef _(node: nd.ops.Conv2D, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.DepthwiseConv, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Conv2DTranspose, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Dense, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.BatchNorm, inst: nd.nodes.NodeInstance):\ndef _(node: nd.ops.EfficientAttention, inst: nd.nodes.NodeInstance):\ndef _(node: nd.ops.LayerNormalization, inst: nd.nodes.NodeInstance):\ndef _(node: nd.ops.Activation, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.ZeroPadding2D, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.MaxPool2D, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.GlobalMeanPool2D, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Flatten, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Sum, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Sub, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Mul, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.ChannelAffine, inst: nd.nodes.NodeInstance = None):\ndef _(node: LambdaOpNode, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.MeanPool2D, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.TokenizeImage, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.DetokenizeImage, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.MatMul, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.SubpixelDownSampling, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Concat, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.TfImageResize, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Reshape, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Dropout, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.TensorMerger, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.TensorExtractor, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.BilinearUpSampling, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Reduce, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.Split, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.SubpixelDownSampling, inst: nd.nodes.NodeInstance = None):\ndef _(node: nd.ops.SubpixelUpSampling, inst: nd.nodes.NodeInstance = None):\ndef get_torch_tensor(tensor: nd.backend.VARIABLE_AND_TENSOR_TYPE):\ndef find_instance(\n icn: nd.cells.InnerCellNode,\n cell_instance: nd.cells.CellInstance,\n) -> nd.nodes.NodeInstance:\ndef build_from_cell(\n cell: nd.cells.Cell,\n cell_instance: nd.cells.CellInstance,\n) -> dag_module.DagModule:\ndef convert_cell_to_torch_dag_module(\n cell: nd.cells.Cell,\n input_shape_without_batch: Tuple[int, ...],\n batch_size_for_verification: int = 4,\n) -> Tuple[dag_module.DagModule, float]:"
},
{
"identifier": "DagVisualizer",
"path": "torch_dag/visualization/visualize_dag.py",
"snippet": "class DagVisualizer:\n\n def __init__(self, dag: DagModule):\n self.dag = dag\n self.dag.cache_forward_dict = True\n self.flops_list = None\n\n @staticmethod\n def get_name(namescope_index: str, index: str):\n if namescope_index is None:\n return f\"{index}\"\n else:\n return f\"{namescope_index}_{index}\"\n\n def visualize(\n self,\n max_depth: int = 0,\n input_shape: Tuple[int, ...] = None,\n saving_path: Optional[str] = None,\n saving_format: str = \"pdf\",\n ):\n if input_shape is not None:\n self.dag.eval()\n _ = self.dag(torch.ones(size=input_shape))\n if max_depth == 0:\n self.flops_list = build_full_flops_list(\n dag=self.dag, input_shape_without_batch=input_shape[1:], normalize=True)\n\n graph, input_node_names, output_node_names = self._visualize(\n dag=self.dag,\n max_depth=max_depth,\n )\n if saving_path is not None:\n graph.render(saving_path, format=saving_format)\n\n return graph\n\n # def get_weights_stats(self, node: nd.nodes.Node):\n # if isinstance(node, (nd.ops.Conv2D, nd.ops.DepthwiseConv)):\n # return self.compute_tensor_stats(node.filters)\n # elif isinstance(node, nd.ops.Dense):\n # return self.compute_tensor_stats(node.kernel)\n # else:\n # return None\n\n def get_module_meta(self, module: nn.Module) -> Dict:\n meta = {}\n if isinstance(module, nn.Conv2d):\n meta['kernel_size'] = module.kernel_size\n meta['in_channels'] = module.in_channels\n meta['out_channels'] = module.out_channels\n meta['groups'] = module.groups\n elif isinstance(module, smodules.ACTIVATION_MODULES):\n meta['activation_function'] = module.__class__.__name__\n # weights_stats = self.get_weights_stats(node)\n # if weights_stats is not None:\n # mean, std, maximum, minimum = weights_stats\n # meta['weights_mean_std_max_min'] = f'{mean:.3f}, {std:.3f}, {maximum:.3f}, {minimum:.3f}'\n\n return meta\n\n def add_nested_dag_as_subgraph(\n self,\n g: graphviz.Digraph,\n name: str,\n dag: DagModule,\n depth: int,\n max_depth: int,\n ) -> Tuple[graphviz.Digraph, List[str], List[str]]:\n with g.subgraph(name=f'cluster_{dag.name}') as s:\n fillcolor = self.get_depth_fill_color(depth)\n s.attr(\n label=f'{dag.name}',\n style='filled',\n fillcolor=fillcolor,\n )\n return self._visualize(\n dag=dag,\n graph=s,\n namescope_index=name,\n max_depth=max_depth,\n depth=depth,\n )\n\n def get_depth_fill_color(self, depth: int):\n if depth == 1:\n return DEPTH_1_FILL_COLOR\n elif depth == 2:\n return DEPTH_2_FILL_COLOR\n\n def compute_tensor_stats(self, tensor: torch.Tensor):\n mean = tensor.mean()\n std = torch.std(tensor)\n maximum = tensor.max()\n minimum = tensor.min()\n return mean, std, maximum, minimum\n\n def _visualize(\n self,\n dag: DagModule,\n graph: graphviz.Digraph = None,\n namescope_index: str = None,\n max_depth: int = 0,\n depth: int = 0,\n ) -> Tuple[graphviz.Digraph, List[str], List[str]]:\n if graph is None:\n g = graphviz.Digraph('model')\n else:\n g = graph\n g.node_attr.update(style='filled', shape='box')\n go_deeper = True if max_depth > 0 else False\n names = {}\n input_vertex_names = []\n output_vertex_names = []\n for k, vertex in enumerate(dag.vertices):\n name = self.get_name(namescope_index, str(k))\n names[k] = name\n if isinstance(vertex, InputVertex):\n label = f'input_{k}'\n g.node(\n name,\n label=label,\n color=NODE_SHAPE_COLOR,\n fillcolor=INPUT_COLOR,\n shape=INPUT_SHAPE,\n )\n input_vertex_names.append(name)\n\n else:\n predecessors_indices = [dag.vertices.index(pd) for pd in vertex.predecessors]\n if isinstance(vertex.module, DagModule) and go_deeper:\n sgraph, inputs, outputs = self.add_nested_dag_as_subgraph(\n g=g,\n name=name,\n dag=vertex.module,\n depth=depth + 1,\n max_depth=max_depth - 1\n )\n\n for l, pd in enumerate(predecessors_indices):\n edge = names[pd], inputs[l]\n g.edge(edge[0], edge[1])\n names[k] = outputs[0]\n if vertex == dag.output_vertex:\n output_vertex_names = [name]\n\n else:\n module = vertex.module\n fillcolor = get_vertex_color(module)\n color = get_vertex_color(module)\n\n label = f'{vertex.name}'\n label += f'_idx_{dag.vertices.index(vertex)}'\n if max_depth == 0 and depth == 0 and self.flops_list is not None:\n flops = self.flops_list[self.dag.inner_vertices.index(vertex)]\n label += f' \\n kmapp: {flops}'\n if vertex.orbit is not None:\n label += f' \\n orbit: {vertex.orbit}'\n\n if len(self.get_module_meta(module).keys()) > 0:\n label += f' \\n ----------'\n\n # add meta node info\n for key, value in self.get_module_meta(module).items():\n label += f' \\n {key}: {value}'\n\n # add output shape visualization\n if dag.forward_dict is not None:\n if isinstance(vertex.module, smodules.ArgModule):\n pass\n else:\n label += f' \\n ----------'\n tensors = dag.forward_dict[vertex]\n if not isinstance(dag.forward_dict[vertex], List):\n tensors = [tensors]\n shapes = []\n for el in tensors:\n if isinstance(el, torch.Tensor):\n shapes.append(tuple([int(e) for e in el.shape]))\n else:\n shapes.append(tuple())\n\n for tensor, shape in zip(tensors, shapes):\n label += f' \\n {shape}'\n if isinstance(tensor, torch.Tensor) and tensor.dtype == torch.float:\n mean, std, maximum, minimum = self.compute_tensor_stats(tensor)\n label += f' \\n mean: {mean:.3f}, std: {std:.3f}, max: {maximum:.3f}, min: {minimum:.3f}'\n\n if vertex == dag.output_vertex:\n shape = OUTPUT_SHAPE\n fillcolor = f'{OUTPUT_COLOR}:{fillcolor}'\n output_vertex_names = [name]\n else:\n shape = NODE_SHAPE\n\n g.node(\n name,\n label=label,\n shape=shape,\n color=NODE_SHAPE_COLOR,\n fillcolor=fillcolor,\n )\n for pd in predecessors_indices:\n edge = names[pd], names[k]\n g.edge(edge[0], edge[1])\n\n return g, input_vertex_names, output_vertex_names"
},
{
"identifier": "log_cell_characteristics",
"path": "node_api_conversion/utils.py",
"snippet": "def log_cell_characteristics(\n cell: nd.cells.Cell,\n input_shape_without_batch: Tuple[int, ...],\n):\n if len(input_shape_without_batch) < 2:\n logger.warning(f'One cannot compute `kmapp` for cell: {cell.name}, since the input_shape_without_batch '\n f'has length less than 2.')\n return\n cell.predict()\n static_kmapp = compute_static_kmapp(cell, input_shape_without_batch)\n static_multiadds = compute_static_kmapp(cell, input_shape_without_batch, normalized=False)\n x = tf.ones(shape=(1,) + input_shape_without_batch)\n result = cell(x)\n logger.info(f'static_kmapp: {static_kmapp}')\n logger.info(f'static_multiadds (M): {static_multiadds / 1e6}')\n num_params = get_num_params(cell) / 1e6\n logger.info(f'number params (M): {num_params}')\n logger.info(f'number of output tensors: {len(result.output_tensors)}')\n for k, tensor in enumerate(result.output_tensors):\n logger.info(f'output shape of output tensor {k}: {tensor.shape}')"
}
] | import argparse
import logging
import node_api as nd
import modelhub_client as mh
from node_api_conversion import from_nd_converter
from torch_dag.visualization.visualize_dag import DagVisualizer
from node_api_conversion.utils import log_cell_characteristics | 3,475 | #
# Copyright © TCL Research Europe. All rights reserved.
#
logger = logging.getLogger(__name__)
def find_icns_to_remove(cell: nd.cells.Cell):
result = []
for icn in cell.inner_cell_nodes:
if isinstance(icn.node, nd.ops.Activation):
if icn.node.activation_name in (None, 'none', 'identity'):
result.append(icn)
if isinstance(icn.node, nd.ops.Sum) and len(icn.predecessors) == 1:
result.append(icn)
if isinstance(icn.node, nd.ops.Concat) and len(icn.predecessors) == 1:
result.append(icn)
if isinstance(icn.node, nd.ops.Mul) and len(icn.predecessors) == 1:
result.append(icn)
if isinstance(icn.node, nd.cells.Cell):
result.extend(find_icns_to_remove(icn.node))
return result
def clean_up(cell: nd.cells.Cell):
to_remove = find_icns_to_remove(cell)
for icn in to_remove:
icn.cell.remove_node(icn)
logger.info(f'Removing {icn} with class {icn.node.__class__.__name__}')
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--model_path')
arg_parser.add_argument('--hub_name')
arg_parser.add_argument('--saving_path')
arg_parser.add_argument(
'--input_shape',
type=int,
nargs='+',
default=(1, 320, 320, 3),
)
args = arg_parser.parse_args()
return args
def main():
args = parse_args()
if args.hub_name:
model = mh.api.Model.get(args.hub_name)
cell, _ = model.load_cell()
else:
cell, _ = nd.io.load_cell(args.model_path)
input_shape = tuple(args.input_shape)
cell = cell.flatten()
cell.predict()
clean_up(cell)
nd.cells_utils.fuse_padding_nodes(cell, input_size=input_shape)
| #
# Copyright © TCL Research Europe. All rights reserved.
#
logger = logging.getLogger(__name__)
def find_icns_to_remove(cell: nd.cells.Cell):
result = []
for icn in cell.inner_cell_nodes:
if isinstance(icn.node, nd.ops.Activation):
if icn.node.activation_name in (None, 'none', 'identity'):
result.append(icn)
if isinstance(icn.node, nd.ops.Sum) and len(icn.predecessors) == 1:
result.append(icn)
if isinstance(icn.node, nd.ops.Concat) and len(icn.predecessors) == 1:
result.append(icn)
if isinstance(icn.node, nd.ops.Mul) and len(icn.predecessors) == 1:
result.append(icn)
if isinstance(icn.node, nd.cells.Cell):
result.extend(find_icns_to_remove(icn.node))
return result
def clean_up(cell: nd.cells.Cell):
to_remove = find_icns_to_remove(cell)
for icn in to_remove:
icn.cell.remove_node(icn)
logger.info(f'Removing {icn} with class {icn.node.__class__.__name__}')
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--model_path')
arg_parser.add_argument('--hub_name')
arg_parser.add_argument('--saving_path')
arg_parser.add_argument(
'--input_shape',
type=int,
nargs='+',
default=(1, 320, 320, 3),
)
args = arg_parser.parse_args()
return args
def main():
args = parse_args()
if args.hub_name:
model = mh.api.Model.get(args.hub_name)
cell, _ = model.load_cell()
else:
cell, _ = nd.io.load_cell(args.model_path)
input_shape = tuple(args.input_shape)
cell = cell.flatten()
cell.predict()
clean_up(cell)
nd.cells_utils.fuse_padding_nodes(cell, input_size=input_shape) | log_cell_characteristics(cell, input_shape[1:]) | 2 | 2023-11-17 15:36:44+00:00 | 4k |
timlrx/simple-ai-agents | simple_ai_agents/cli.py | [
{
"identifier": "ChatAgent",
"path": "simple_ai_agents/chat_agent.py",
"snippet": "class ChatAgent(BaseModel):\n \"\"\"\n A chatbot class that provides additional functionality\n for creating and managing chat sessions.\n\n Args:\n character (str, optional): The name of the chatbot for console display.\n Defaults to None.\n system (str, optional): System prompt for chatbot message.\n If None is provided it defaults to \"You are a helpful assistant.\"\n id (Union[str, UUID], optional): Initial session ID of the chatbot.\n Defaults to uuid4().\n prime (bool, optional): Whether to prime the chatbot with initial messages.\n Defaults to True.\n default_session (bool, optional): Whether to create a default chat session.\n Defaults to True.\n console (bool, optional): Whether to enable interactive console mode.\n Defaults to False.\n **kwargs: Additional options to pass to the `new_session` method.\n To customize GPT options, pass a `llm_options` dictionary.\n\n Attributes:\n sessions (dict): A dictionary of chat sessions,\n where the keys are the session IDs and the values\n are the corresponding `ChatSession` objects.\n default_session (ChatSession): The default chat session.\n character (str): The name of the chatbot for console display.\n ai_text_color (str): Print color of the chatbot's messages.\n\n Methods:\n new_session: Creates a new chat session.\n interactive_console: Starts an interactive console for the chatbot.\n \"\"\"\n\n default_session: Optional[ChatLLMSession]\n sessions: Dict[Union[str, UUID], ChatLLMSession] = {}\n character: Optional[str] = \"Chat Agent\"\n ai_text_color: str = \"bright_magenta\"\n\n def __init__(\n self,\n character: Optional[str] = None,\n system: Optional[str] = None,\n id: Union[str, UUID] = uuid4(),\n prime: bool = True,\n default_session: bool = True,\n console: bool = False,\n ai_text_color: Optional[str] = None,\n display_names: bool = True,\n **kwargs,\n ):\n \"\"\"\n Initialize a chatbot agent.\n\n Args:\n character (str, optional): The name of the chatbot for console display.\n Defaults to None.\n system (str, optional): System prompt for chatbot message.\n If None is provided it defaults to \"You are a helpful assistant.\"\n id (Union[str, UUID], optional): Initial session ID of the chatbot.\n Defaults to uuid4().\n prime (bool, optional): Whether to prime the chatbot with initial messages.\n Defaults to True.\n default_session (bool, optional): Whether to create a default chat session.\n Defaults to True.\n console (bool, optional): Whether to enable interactive console mode.\n Defaults to False.\n ai_text_color (str, optional): Print color of the chatbot's messages.\n display_names (bool, optional):\n Whether to display character names in the console.\n **kwargs: Additional options to pass to the `new_session` method.\n To customize GPT options, pass a `llm_options` dictionary.\n \"\"\"\n system_format = self.build_system(system)\n sessions = {}\n new_default_session = None\n super().__init__(default_session=new_default_session, sessions=sessions)\n\n if character:\n self.character = character\n if ai_text_color:\n self.ai_text_color = ai_text_color\n if default_session:\n new_default_session = self.new_session(\n set_default=True, system=system_format, id=id, **kwargs\n )\n if console:\n if not new_default_session:\n raise ValueError(\n \"A default session needs to exists to run in interactive mode.\"\n )\n new_default_session.title = character\n # print(kwargs)\n self.interactive_console(\n character=self.character,\n prime=prime,\n display_names=display_names,\n prompt=kwargs[\"prompt\"] if \"prompt\" in kwargs else None,\n )\n\n def new_session(\n self,\n set_default: bool = True,\n **kwargs,\n ) -> ChatLLMSession:\n \"\"\"\n Create a new chat session.\n\n Args:\n set_default (bool, optional): Whether to set the new session as the default.\n Defaults to True.\n **kwargs: Additional options to pass to the `ChatLLMSession` constructor.\n \"\"\"\n sess = ChatLLMSession(**kwargs)\n self.sessions[sess.id] = sess\n if set_default:\n self.default_session = sess\n return sess\n\n def get_session(self, id: Optional[Union[str, UUID]] = None) -> ChatLLMSession:\n \"\"\"\n Get a chat session by ID. If no ID is provided, return the default session.\n \"\"\"\n try:\n sess = self.sessions[id] if id else self.default_session\n except KeyError:\n raise KeyError(\"No session by that key exists.\")\n if not sess:\n raise ValueError(\"No default session exists.\")\n return sess\n\n def list_sessions(self) -> list[Union[str, UUID]]:\n \"\"\"\n List all session IDs.\n \"\"\"\n return list(self.sessions.keys())\n\n def reset_session(self, id: Optional[Union[str, UUID]] = None) -> None:\n \"\"\"\n Reset a chat session by ID.\n \"\"\"\n sess = self.get_session(id)\n sess.messages = []\n\n def delete_session(self, id: Optional[Union[str, UUID]] = None) -> None:\n \"\"\"\n Delete a chat session by ID.\n \"\"\"\n sess = self.get_session(id)\n if self.default_session:\n if sess.id == self.default_session.id:\n self.default_session = None\n del self.sessions[sess.id]\n del sess\n\n @contextmanager\n def session(self, **kwargs):\n sess = self.new_session(set_default=True, **kwargs)\n try:\n yield sess\n finally:\n self.delete_session(sess.id)\n\n def __call__(\n self,\n prompt: Union[str, Any],\n id: Optional[Union[str, UUID]] = None,\n system: Optional[str] = None,\n save_messages: Optional[bool] = None,\n llm_options: Optional[LLMOptions] = None,\n console_output: bool = False,\n ) -> str:\n \"\"\"\n Generate a response from the AI.\n \"\"\"\n sess = self.get_session(id)\n if console_output:\n console = Console(highlight=False, force_jupyter=False)\n ai_text_color = self.ai_text_color\n console.print(f\"[b]{self.character}[/b]: \", end=\"\", style=ai_text_color)\n stream = sess.stream(\n prompt,\n system=system,\n save_messages=save_messages,\n llm_options=llm_options,\n )\n for chunk in stream:\n console.print(chunk[\"delta\"], end=\"\", style=ai_text_color)\n console.print()\n return chunk[\"response\"] # type: ignore\n else:\n return sess.gen(\n prompt,\n system=system,\n save_messages=save_messages,\n llm_options=llm_options,\n )\n\n def stream(\n self,\n prompt: Union[str, Any],\n id: Optional[Union[str, UUID]] = None,\n system: Optional[str] = None,\n save_messages: Optional[bool] = None,\n llm_options: Optional[LLMOptions] = None,\n ):\n \"\"\"\n Generate a response from the AI.\n \"\"\"\n sess = self.get_session(id)\n return sess.stream(\n prompt,\n system=system,\n save_messages=save_messages,\n llm_options=llm_options,\n )\n\n def gen_model(\n self,\n prompt: Union[str, Any],\n response_model: Type[T],\n id: Optional[Union[str, UUID]] = None,\n system: Optional[str] = None,\n llm_options: Optional[LLMOptions] = None,\n ):\n \"\"\"\n Generate a pydantic typed model from the AI.\n \"\"\"\n sess = self.get_session(id)\n return sess.gen_model(\n prompt,\n response_model,\n system=system,\n llm_options=llm_options,\n )\n\n def build_system(self, system: Optional[str] = None) -> str:\n default = \"You are a helpful assistant.\"\n if system:\n return system\n else:\n return default\n\n def interactive_console(\n self,\n character: Optional[str] = None,\n prime: bool = True,\n prompt: Optional[str] = None,\n display_names: bool = True,\n ) -> None:\n \"\"\"\n Start an interactive console for the chatbot.\n \"\"\"\n console = Console(highlight=False, force_jupyter=False)\n sess = self.default_session\n ai_text_color = self.ai_text_color\n user_prompt_suffix = \"[b]You:[/b]\" if display_names else \"> \"\n agent_prompt_suffix = (\n f\"[b]{character}[/b]: \" if display_names and character else \"\"\n )\n\n if not sess:\n raise ValueError(\"No default session exists.\")\n\n # prime with a unique starting response to the user\n if prime:\n console.print(agent_prompt_suffix, end=\"\", style=ai_text_color)\n for chunk in sess.stream(\"Hello!\"):\n console.print(chunk[\"delta\"], end=\"\", style=ai_text_color)\n\n start = True\n while True:\n console.print()\n try:\n user_input = (\n prompt\n if start and prompt\n else console.input(user_prompt_suffix).strip()\n )\n start = False\n if not user_input:\n break\n\n console.print(agent_prompt_suffix, end=\"\", style=ai_text_color)\n for chunk in sess.stream(user_input):\n console.print(chunk[\"delta\"], end=\"\", style=ai_text_color)\n except KeyboardInterrupt:\n break\n\n def __str__(self) -> str | None:\n if self.default_session:\n return self.default_session.model_dump_json(exclude_none=True, indent=2)\n\n def __repr__(self) -> str:\n return \"\"\n\n def save_session(\n self,\n output_path: Optional[str] = None,\n id: Optional[Union[str, UUID]] = None,\n format: str = \"csv\",\n ):\n sess = self.get_session(id)\n sess_dict = sess.model_dump(\n exclude_none=True,\n )\n output_path = output_path or f\"chat_session.{format}\"\n if format == \"csv\":\n with open(output_path, \"w\", encoding=\"utf-8\") as f:\n fields = [\n \"role\",\n \"content\",\n \"received_at\",\n \"prompt_length\",\n \"completion_length\",\n \"total_length\",\n \"finish_reason\",\n ]\n w = csv.DictWriter(f, fieldnames=fields)\n w.writeheader()\n for message in sess_dict[\"messages\"]:\n # datetime must be in common format to be loaded into spreadsheet\n # for human-readability, the timezone is set to local machine\n local_datetime = message[\"received_at\"].astimezone()\n message[\"received_at\"] = local_datetime.strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n w.writerow(message)\n elif format == \"json\":\n with open(output_path, \"w\") as f:\n f.write(sess.model_dump_json(exclude_none=True))\n\n def load_session(self, input_path: str, id: Union[str, UUID] = uuid4(), **kwargs):\n assert input_path.endswith(\".csv\") or input_path.endswith(\n \".json\"\n ), \"Only CSV and JSON imports are accepted.\"\n\n if input_path.endswith(\".csv\"):\n with open(input_path, \"r\", encoding=\"utf-8\") as f:\n r = csv.DictReader(f)\n messages = []\n for row in r:\n # need to convert the datetime back to UTC\n local_datetime = datetime.datetime.strptime(\n row[\"received_at\"], \"%Y-%m-%d %H:%M:%S\"\n ).replace(tzinfo=tz.tzlocal())\n row[\"received_at\"] = local_datetime.astimezone(\n datetime.timezone.utc\n )\n # https://stackoverflow.com/a/68305271\n row = {k: (None if v == \"\" else v) for k, v in row.items()}\n messages.append(ChatMessage(**row)) # type: ignore\n\n sess = self.new_session(id=id, **kwargs)\n sess.messages = messages\n return sess\n\n if input_path.endswith(\".json\"):\n with open(input_path, \"r\") as f:\n sess_dict = json.loads(f.read())\n # update session with info not loaded, e.g. auth/api_url\n for arg in kwargs:\n sess_dict[arg] = kwargs[arg]\n sess = self.new_session(**sess_dict)\n return sess\n\n # Tabulators for returning total token counts\n def message_totals(self, attr: str, id: Optional[Union[str, UUID]] = None) -> int:\n sess = self.get_session(id)\n return getattr(sess, attr)\n\n @property\n def total_prompt_length(self, id: Optional[Union[str, UUID]] = None) -> int:\n return self.message_totals(\"total_prompt_length\", id)\n\n @property\n def total_completion_length(self, id: Optional[Union[str, UUID]] = None) -> int:\n return self.message_totals(\"total_completion_length\", id)\n\n @property\n def total_length(self, id: Optional[Union[str, UUID]] = None) -> int:\n return self.message_totals(\"total_length\", id)\n\n # alias total_tokens to total_length for common use\n @property\n def total_tokens(self, id: Optional[Union[str, UUID]] = None) -> int:\n return self.total_length(id) # type: ignore"
},
{
"identifier": "SYSTEM_PROMPT",
"path": "simple_ai_agents/prompts.py",
"snippet": "SYSTEM_PROMPT = \"You are a helpful assistant.\""
}
] | import sys
import click
from dotenv import load_dotenv
from simple_ai_agents.chat_agent import ChatAgent
from simple_ai_agents.prompts import SYSTEM_PROMPT | 3,551 |
load_dotenv()
@click.command()
@click.option("--character", default=None, help="Specify the character")
@click.option("--prime/--no-prime", default=False, help="Enable priming")
@click.option(
"-m",
"--model",
default="gpt-3.5-turbo",
help="""Specify the LLM model e.g. gpt-3.5-turbo, ollama/mistral.
Uses gpt-3.5-turbo by default.""",
)
@click.option("--temperature", default=0.7, help="LLM temperature. Default is 0.7.")
|
load_dotenv()
@click.command()
@click.option("--character", default=None, help="Specify the character")
@click.option("--prime/--no-prime", default=False, help="Enable priming")
@click.option(
"-m",
"--model",
default="gpt-3.5-turbo",
help="""Specify the LLM model e.g. gpt-3.5-turbo, ollama/mistral.
Uses gpt-3.5-turbo by default.""",
)
@click.option("--temperature", default=0.7, help="LLM temperature. Default is 0.7.") | @click.option("-s", "--system", default=SYSTEM_PROMPT, help="System prompt") | 1 | 2023-11-10 06:01:25+00:00 | 4k |
DIAGNijmegen/HoVer-UNet | train/apply_postprocessing.py | [
{
"identifier": "DatasetPannuke",
"path": "data/pannuke_distillation_dataset.py",
"snippet": "class DatasetPannuke(Dataset):\n \"\"\"\n Distillaton pannuke dataset\n \"\"\"\n\n def __init__(self, path: str, mode: str = 'train', true_labels: bool = False,\n hovernet_predictions: bool = True):\n \"\"\"\n :param path: path of processed pannuke dataset, h5 file\n :param mode: train or infer\n :param true_labels: load ground truth\n :param hovernet_predictions: load hovernet predictions\n \"\"\"\n assert isinstance(path, str), \"path have be instance of string\"\n assert isinstance(mode, str) and mode in ['train', 'infer'], \"mode must be either train or infer\"\n assert isinstance(hovernet_predictions, bool) and isinstance(true_labels, bool) and (\n hovernet_predictions or true_labels), \\\n \"hovernet_predictions and true_labels must be boolean, and at least one must be true\"\n\n self.path = path\n self.input_shape = (256, 256)\n self.output_shape = (256, 256)\n self.nr_types = 6\n self.mode = mode\n self.true_labels = true_labels\n self.hovernet_predictions = hovernet_predictions\n data = h5py.File(path, 'r')\n self.images = data['images']\n if mode == 'infer':\n self.types = data['types']\n if true_labels:\n self.labels = data['true_labels']\n if hovernet_predictions:\n self.hovernet = data['hovernet_predictions']\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx: int):\n outputs = ((self.images[idx] / 255).astype('float32'),)\n if self.mode == 'train':\n if self.true_labels:\n outputs += (self.labels[idx].astype('float32'),)\n if self.hovernet_predictions:\n outputs += (self.hovernet[idx].astype('float32'),)\n if len(outputs) == 3:\n outputs = (outputs[0], np.concatenate(outputs[1:], axis=-1))\n elif self.mode == 'infer':\n outputs += ('%s_%s' % (idx, self.types[idx].decode('utf8')),)\n\n return outputs"
},
{
"identifier": "process",
"path": "models/HoVerNet/post_proc.py",
"snippet": "def process(pred_map, nr_types=None, return_centroids=False):\n \"\"\"Post processing script for image tiles.\n\n Args:\n pred_map: commbined output of tp, np and hv branches, in the same order\n nr_types: number of types considered at output of nc branch\n overlaid_img: img to overlay the predicted instances upon, `None` means no\n type_colour (dict) : `None` to use random, else overlay instances of a type to colour in the dict\n output_dtype: data type of output\n \n Returns:\n pred_inst: pixel-wise nuclear instance segmentation prediction\n pred_type_out: pixel-wise nuclear type prediction \n\n \"\"\"\n if nr_types is not None:\n pred_type = pred_map[..., :1]\n pred_inst = pred_map[..., 1:]\n pred_type = pred_type.astype(np.int32)\n else:\n pred_inst = pred_map\n\n pred_inst = np.squeeze(pred_inst)\n pred_inst = __proc_np_hv(pred_inst)\n\n inst_info_dict = None\n if return_centroids or nr_types is not None:\n inst_id_list = np.unique(pred_inst)[1:] # exlcude background\n inst_info_dict = {}\n for inst_id in inst_id_list:\n inst_map = pred_inst == inst_id\n # TODO: chane format of bbox output\n rmin, rmax, cmin, cmax = get_bounding_box(inst_map)\n inst_bbox = np.array([[rmin, cmin], [rmax, cmax]])\n inst_map = inst_map[\n inst_bbox[0][0]: inst_bbox[1][0], inst_bbox[0][1]: inst_bbox[1][1]\n ]\n inst_map = inst_map.astype(np.uint8)\n inst_moment = cv2.moments(inst_map)\n inst_contour = cv2.findContours(\n inst_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n # * opencv protocol format may break\n inst_contour = np.squeeze(inst_contour[0][0].astype(\"int32\"))\n # < 3 points dont make a contour, so skip, likely artifact too\n # as the contours obtained via approximation => too small or sthg\n if inst_contour.shape[0] < 3:\n continue\n if len(inst_contour.shape) != 2:\n continue # ! check for trickery shape\n inst_centroid = [\n (inst_moment[\"m10\"] / inst_moment[\"m00\"]),\n (inst_moment[\"m01\"] / inst_moment[\"m00\"]),\n ]\n inst_centroid = np.array(inst_centroid)\n inst_contour[:, 0] += inst_bbox[0][1] # X\n inst_contour[:, 1] += inst_bbox[0][0] # Y\n inst_centroid[0] += inst_bbox[0][1] # X\n inst_centroid[1] += inst_bbox[0][0] # Y\n inst_info_dict[inst_id] = { # inst_id should start at 1\n \"bbox\": inst_bbox,\n \"centroid\": inst_centroid,\n \"contour\": inst_contour,\n \"type_prob\": None,\n \"type\": None,\n }\n\n if nr_types is not None:\n #### * Get class of each instance id, stored at index id-1\n for inst_id in list(inst_info_dict.keys()):\n rmin, cmin, rmax, cmax = (inst_info_dict[inst_id][\"bbox\"]).flatten()\n inst_map_crop = pred_inst[rmin:rmax, cmin:cmax]\n inst_type_crop = pred_type[rmin:rmax, cmin:cmax]\n inst_map_crop = (\n inst_map_crop == inst_id\n ) # TODO: duplicated operation, may be expensive\n inst_type = inst_type_crop[inst_map_crop]\n type_list, type_pixels = np.unique(inst_type, return_counts=True)\n type_list = list(zip(type_list, type_pixels))\n type_list = sorted(type_list, key=lambda x: x[1], reverse=True)\n inst_type = type_list[0][0]\n if inst_type == 0: # ! pick the 2nd most dominant if exist\n if len(type_list) > 1:\n inst_type = type_list[1][0]\n type_dict = {v[0]: v[1] for v in type_list}\n type_prob = type_dict[inst_type] / (np.sum(inst_map_crop) + 1.0e-6)\n inst_info_dict[inst_id][\"type\"] = int(inst_type)\n if inst_type not in [0,1,2,3,4,5]:\n pass\n inst_info_dict[inst_id][\"type_prob\"] = float(type_prob)\n\n # print('here')\n # ! WARNING: ID MAY NOT BE CONTIGUOUS\n # inst_id in the dict maps to the same value in the `pred_inst`\n return pred_inst, inst_info_dict"
}
] | import colorsys
import json
import os
import random
import cv2
import numpy as np
import segmentation_models_pytorch as smp
import torch
import torch.nn.functional as F
from multiprocessing import Pool
from time import time
from torch.utils.data import DataLoader
from tqdm import tqdm
from data.pannuke_distillation_dataset import DatasetPannuke
from models.HoVerNet.post_proc import process | 2,460 |
def random_colors(N, bright=True):
"""Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def visualize_instances_dict(
input_image, inst_dict, draw_dot=False, type_colour=None, line_thickness=2
):
"""Overlays segmentation results (dictionary) on image as contours.
Args:
input_image: input image
inst_dict: dict of output prediction, defined as in this library
draw_dot: to draw a dot for each centroid
type_colour: a dict of {type_id : (type_name, colour)} ,
`type_id` is from 0-N and `colour` is a tuple of (R, G, B)
line_thickness: line thickness of contours
"""
# overlay = np.copy((input_image))
overlay = np.zeros(input_image.shape)
inst_rng_colors = random_colors(len(inst_dict))
inst_rng_colors = np.array(inst_rng_colors) * 255
inst_rng_colors = inst_rng_colors.astype(np.uint8)
for idx, [inst_id, inst_info] in enumerate(inst_dict.items()):
inst_contour = inst_info["contour"]
if "type" in inst_info and type_colour is not None:
inst_colour = type_colour[inst_info["type"]][1]
else:
inst_colour = (inst_rng_colors[idx]).tolist()
cv2.drawContours(overlay, [inst_contour], -1, inst_colour, line_thickness)
if draw_dot:
inst_centroid = inst_info["centroid"]
inst_centroid = tuple([int(v) for v in inst_centroid])
overlay = cv2.circle(overlay, inst_centroid, 3, (255, 0, 0), -1)
return overlay
def create_mask(x, _pred):
instance_map, _centroids = x
mask = np.zeros(instance_map.shape + (6,))
for idx, info in _centroids.items():
try:
mask[..., info['type']][instance_map == idx] = idx
mask[..., 0][instance_map == idx] = 1
except Exception:
print(_pred[-1])
return mask
def _postprocess(_pred):
x = process(_pred[1], nr_types=6)
mask = create_mask(x, _pred)
return _pred[0], x, _pred[2], mask
def apply_postprocessing(path_weights, path_test, model):
model.load_state_dict(torch.load(path_weights)['model_state_dict'])
model.eval()
|
def random_colors(N, bright=True):
"""Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def visualize_instances_dict(
input_image, inst_dict, draw_dot=False, type_colour=None, line_thickness=2
):
"""Overlays segmentation results (dictionary) on image as contours.
Args:
input_image: input image
inst_dict: dict of output prediction, defined as in this library
draw_dot: to draw a dot for each centroid
type_colour: a dict of {type_id : (type_name, colour)} ,
`type_id` is from 0-N and `colour` is a tuple of (R, G, B)
line_thickness: line thickness of contours
"""
# overlay = np.copy((input_image))
overlay = np.zeros(input_image.shape)
inst_rng_colors = random_colors(len(inst_dict))
inst_rng_colors = np.array(inst_rng_colors) * 255
inst_rng_colors = inst_rng_colors.astype(np.uint8)
for idx, [inst_id, inst_info] in enumerate(inst_dict.items()):
inst_contour = inst_info["contour"]
if "type" in inst_info and type_colour is not None:
inst_colour = type_colour[inst_info["type"]][1]
else:
inst_colour = (inst_rng_colors[idx]).tolist()
cv2.drawContours(overlay, [inst_contour], -1, inst_colour, line_thickness)
if draw_dot:
inst_centroid = inst_info["centroid"]
inst_centroid = tuple([int(v) for v in inst_centroid])
overlay = cv2.circle(overlay, inst_centroid, 3, (255, 0, 0), -1)
return overlay
def create_mask(x, _pred):
instance_map, _centroids = x
mask = np.zeros(instance_map.shape + (6,))
for idx, info in _centroids.items():
try:
mask[..., info['type']][instance_map == idx] = idx
mask[..., 0][instance_map == idx] = 1
except Exception:
print(_pred[-1])
return mask
def _postprocess(_pred):
x = process(_pred[1], nr_types=6)
mask = create_mask(x, _pred)
return _pred[0], x, _pred[2], mask
def apply_postprocessing(path_weights, path_test, model):
model.load_state_dict(torch.load(path_weights)['model_state_dict'])
model.eval() | data_infer = DatasetPannuke(path_test, mode='infer') | 0 | 2023-11-10 09:37:29+00:00 | 4k |
StanislavPetrovV/3D-Number-Renderer-with-UMAP | renderer.py | [
{
"identifier": "ShaderProgram",
"path": "shader_program.py",
"snippet": "class ShaderProgram:\n def __init__(self, renderer):\n self.app = renderer.app\n self.ctx = renderer.ctx\n self.camera = renderer.camera\n\n # -------- shaders -------- #\n self.axis = self.get_program(shader_name='axis')\n self.point_cloud = self.get_program(shader_name='point_cloud')\n\n # UBO structure:\n #\n # mat4 m_proj == 64 bytes\n # vec3 center == 12\n # float rot_speed == 4\n # mat4 m_view == 64\n # vec3 cam_pos == 12\n # float u_time == 4\n\n self.ubo_bytes = {\n 'm_proj': 64,\n 'center': 12,\n 'rot_speed': 4,\n 'm_view': 64,\n 'cam_pos': 12,\n 'u_time': 4\n }\n\n # offsets\n self.offsets = dict(zip(\n self.ubo_bytes, accumulate(self.ubo_bytes.values(), initial=0)))\n\n # get buffer size\n min_size = sum(self.ubo_bytes.values())\n buffer_size = min_size if not min_size % 16 else ((min_size // 16) + 1) * 16\n # padding = buffer_size - min_size\n\n # ubo\n self.uniform_buffer = self.ctx.buffer(reserve=buffer_size)\n self.uniform_buffer.bind_to_uniform_block(binding=UBO_BIND_VALUE)\n\n # binding\n self.axis['UBO'].binding = UBO_BIND_VALUE\n\n self.set_uniforms_on_init()\n\n def set_uniforms_on_init(self):\n #\n self.uniform_buffer.write(\n data=self.camera.m_proj, offset=self.offsets['m_proj'])\n #\n self.uniform_buffer.write(\n data=self.app.data_loader.center, offset=self.offsets['center'])\n #\n self.uniform_buffer.write(\n data=struct.pack('=1f', WORLD_ROT_SPEED), offset=self.offsets['rot_speed'])\n\n def update(self):\n #\n self.uniform_buffer.write(\n data=self.camera.m_view, offset=self.offsets['m_view'])\n #\n self.uniform_buffer.write(\n data=self.camera.position, offset=self.offsets['cam_pos'])\n #\n self.uniform_buffer.write(\n data=struct.pack('=1f', self.app.time), offset=self.offsets['u_time'])\n\n def get_program(self, shader_name):\n with open(f'shaders/{shader_name}.vert') as file:\n vertex_shader = file.read()\n\n with open(f'shaders/{shader_name}.frag') as file:\n fragment_shader = file.read()\n\n program = self.ctx.program(vertex_shader=vertex_shader, fragment_shader=fragment_shader)\n return program"
},
{
"identifier": "Camera",
"path": "camera.py",
"snippet": "class Camera:\n def __init__(self, app, position=INIT_CAM_POS, yaw=INIT_CAM_YAW, pitch=INIT_CAM_PITCH):\n self.app = app\n\n self.position = glm.vec3(position)\n self.yaw = glm.radians(yaw)\n self.pitch = glm.radians(pitch)\n\n self.up = glm.vec3(0, 1, 0)\n self.right = glm.vec3(1, 0, 0)\n self.forward = glm.vec3(0, 0, -1)\n\n self.m_proj = glm.perspective(V_FOV, ASPECT_RATIO, NEAR, FAR)\n self.m_view = glm.mat4()\n\n def mouse_control(self):\n mouse_dx, mouse_dy = pg.mouse.get_rel()\n if mouse_dx:\n self.rotate_yaw(delta_x=mouse_dx * MOUSE_SENSITIVITY)\n if mouse_dy:\n self.rotate_pitch(delta_y=mouse_dy * MOUSE_SENSITIVITY)\n\n def keyboard_control(self):\n key_state = pg.key.get_pressed()\n vel = CAM_SPEED * self.app.delta_time\n next_step = glm.vec3()\n #\n if key_state[KEYS['FORWARD']]:\n next_step += self.move_forward(vel)\n if key_state[KEYS['BACK']]:\n next_step += self.move_back(vel)\n if key_state[KEYS['STRAFE_R']]:\n next_step += self.move_right(vel)\n if key_state[KEYS['STRAFE_L']]:\n next_step += self.move_left(vel)\n #\n if key_state[KEYS['UP']]:\n self.move_up(vel)\n if key_state[KEYS['DOWN']]:\n self.move_down(vel)\n #\n self.position += next_step\n\n def update(self):\n self.keyboard_control()\n self.mouse_control()\n #\n self.update_vectors()\n self.update_view_matrix()\n\n def update_view_matrix(self):\n self.m_view = glm.lookAt(self.position, self.position + self.forward, self.up)\n\n def update_vectors(self):\n self.forward.x = glm.cos(self.yaw) * glm.cos(self.pitch)\n self.forward.y = glm.sin(self.pitch)\n self.forward.z = glm.sin(self.yaw) * glm.cos(self.pitch)\n\n self.forward = glm.normalize(self.forward)\n self.right = glm.normalize(glm.cross(self.forward, glm.vec3(0, 1, 0)))\n self.up = glm.normalize(glm.cross(self.right, self.forward))\n\n def rotate_pitch(self, delta_y):\n self.pitch -= delta_y\n self.pitch = glm.clamp(self.pitch, -PITCH_MAX, PITCH_MAX)\n\n def rotate_yaw(self, delta_x):\n self.yaw += delta_x\n\n def move_left(self, velocity):\n return -self.right * velocity\n\n def move_right(self, velocity):\n return self.right * velocity\n\n def move_up(self, velocity):\n self.position += self.up * velocity\n\n def move_down(self, velocity):\n self.position -= self.up * velocity\n\n def move_forward(self, velocity):\n return self.forward * velocity\n\n def move_back(self, velocity):\n return -self.forward * velocity"
},
{
"identifier": "PointCloudMesh",
"path": "meshes/point_cloud_mesh.py",
"snippet": "class PointCloudMesh:\n def __init__(self, renderer):\n self.ctx = renderer.ctx\n self.program = renderer.shader_program.point_cloud\n self.data_loader = renderer.app.data_loader\n #\n self.vao = self.get_vao()\n\n def get_vao(self):\n point_position_buffer = self.ctx.buffer(self.data_loader.point_positions)\n point_prime_flag_buffer = self.ctx.buffer(self.data_loader.prime_flags)\n #\n vao = self.ctx.vertex_array(\n self.program,\n [\n (point_position_buffer, '3f', 'in_position'),\n (point_prime_flag_buffer, '1i1', 'is_prime'),\n ],\n skip_errors=True\n )\n return vao\n\n def render(self):\n self.vao.render(mode=mgl.POINTS)"
},
{
"identifier": "AxisMesh",
"path": "meshes/axis_mesh.py",
"snippet": "class AxisMesh:\n def __init__(self, renderer, scale=1.0, center=glm.vec3(0.0)):\n self.ctx = renderer.ctx\n self.program = renderer.shader_program.axis\n\n self.scale = scale * 0.85\n self.center = center\n\n self.vbo_format = '3f'\n self.vbo_attrs = ('in_position',)\n self.vao = self.get_vao()\n\n def get_vao(self):\n vertex_data = self.get_vertex_data()\n vbo = self.ctx.buffer(vertex_data)\n vao = self.ctx.vertex_array(\n self.program, [\n (vbo, self.vbo_format, *self.vbo_attrs)\n ],\n skip_errors=True\n )\n return vao\n\n def render(self):\n self.vao.render(mgl.LINES)\n\n def get_vertex_data(self):\n vert_data = np.array(\n [[-1, 0, 0], [1, 0, 0],\n [0, 0, 1], [0, 0, -1],\n [0, -1, 0], [0, 1, 0],], dtype='float32')\n\n vert_data = vert_data * self.scale + self.center\n return vert_data"
}
] | from shader_program import ShaderProgram
from camera import Camera
from meshes.point_cloud_mesh import PointCloudMesh
from meshes.axis_mesh import AxisMesh | 2,066 |
class Renderer:
def __init__(self, app):
self.app = app
self.ctx = app.ctx
#
self.camera = Camera(app)
|
class Renderer:
def __init__(self, app):
self.app = app
self.ctx = app.ctx
#
self.camera = Camera(app) | self.shader_program = ShaderProgram(renderer=self) | 0 | 2023-11-11 10:35:37+00:00 | 4k |
fofr/cog-sdxl-lcm-multi-controlnet-lora | predict.py | [
{
"identifier": "WeightsManager",
"path": "weights_manager.py",
"snippet": "class WeightsManager:\n def __init__(self, predictor):\n self.predictor = predictor\n self.weights_cache = WeightsDownloadCache()\n\n def load_trained_weights(self, weights, pipe):\n from no_init import no_init_or_tensor\n\n # weights can be a URLPath, which behaves in unexpected ways\n weights = str(weights)\n if self.predictor.tuned_weights == weights:\n print(\"skipping loading .. weights already loaded\")\n return\n\n self.predictor.tuned_weights = weights\n\n local_weights_cache = self.weights_cache.ensure(weights)\n\n # load UNET\n print(\"Loading fine-tuned model\")\n self.predictor.is_lora = False\n\n maybe_unet_path = os.path.join(local_weights_cache, \"unet.safetensors\")\n if not os.path.exists(maybe_unet_path):\n print(\"Does not have Unet. assume we are using LoRA\")\n self.predictor.is_lora = True\n\n if not self.predictor.is_lora:\n print(\"Loading Unet\")\n\n new_unet_params = load_file(\n os.path.join(local_weights_cache, \"unet.safetensors\")\n )\n # this should return _IncompatibleKeys(missing_keys=[...], unexpected_keys=[])\n pipe.unet.load_state_dict(new_unet_params, strict=False)\n\n else:\n print(\"Loading Unet LoRA\")\n\n unet = pipe.unet\n\n tensors = load_file(os.path.join(local_weights_cache, \"lora.safetensors\"))\n\n unet_lora_attn_procs = {}\n name_rank_map = {}\n for tk, tv in tensors.items():\n # up is N, d\n if tk.endswith(\"up.weight\"):\n proc_name = \".\".join(tk.split(\".\")[:-3])\n r = tv.shape[1]\n name_rank_map[proc_name] = r\n\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = (\n None\n if name.endswith(\"attn1.processor\")\n else unet.config.cross_attention_dim\n )\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[\n block_id\n ]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n with no_init_or_tensor():\n module = LoRAAttnProcessor2_0(\n hidden_size=hidden_size,\n cross_attention_dim=cross_attention_dim,\n rank=name_rank_map[name],\n )\n unet_lora_attn_procs[name] = module.to(\"cuda\", non_blocking=True)\n\n unet.set_attn_processor(unet_lora_attn_procs)\n unet.load_state_dict(tensors, strict=False)\n\n # load text\n handler = TokenEmbeddingsHandler(\n [pipe.text_encoder, pipe.text_encoder_2], [pipe.tokenizer, pipe.tokenizer_2]\n )\n handler.load_embeddings(os.path.join(local_weights_cache, \"embeddings.pti\"))\n\n # load params\n with open(os.path.join(local_weights_cache, \"special_params.json\"), \"r\") as f:\n params = json.load(f)\n self.predictor.token_map = params\n\n self.predictor.tuned_model = True"
},
{
"identifier": "WeightsDownloader",
"path": "weights_downloader.py",
"snippet": "class WeightsDownloader:\n @staticmethod\n def download_if_not_exists(url, dest):\n if not os.path.exists(dest):\n WeightsDownloader.download(url, dest)\n\n @staticmethod\n def download(url, dest):\n start = time.time()\n print(\"downloading url: \", url)\n print(\"downloading to: \", dest)\n subprocess.check_call([\"pget\", \"-x\", url, dest], close_fds=False)\n print(\"downloading took: \", time.time() - start)"
},
{
"identifier": "ControlNet",
"path": "controlnet.py",
"snippet": "class ControlNet:\n CONTROLNET_MODELS = [\n \"none\",\n \"edge_canny\",\n \"illusion\",\n \"depth_leres\",\n \"depth_midas\",\n \"soft_edge_pidi\",\n \"soft_edge_hed\",\n \"lineart\",\n \"lineart_anime\",\n \"openpose\",\n # Preprocessors without an XL model yet\n # \"straight_edge_mlsd\",\n # \"face_detector\",\n # \"content_shuffle\",\n # \"normal_bae\",\n # \"segementation_sam\",\n ]\n\n def __init__(self, predictor):\n WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE)\n\n self.controlnet_preprocessor = ControlNetPreprocessor(predictor)\n self.models = {\n \"canny\": self.initialize_controlnet(\n \"diffusers/controlnet-canny-sdxl-1.0\",\n ),\n \"depth\": self.initialize_controlnet(\n \"diffusers/controlnet-depth-sdxl-1.0-small\",\n ),\n \"soft_edge\": self.initialize_controlnet(\n \"SargeZT/controlnet-sd-xl-1.0-softedge-dexined\",\n ),\n \"openpose\": self.initialize_controlnet(\n \"thibaud/controlnet-openpose-sdxl-1.0\",\n ),\n \"illusion\": self.initialize_controlnet(\n \"monster-labs/control_v1p_sdxl_qrcode_monster\",\n ),\n }\n\n def initialize_controlnet(self, model_name):\n print(\"Initializing\", model_name)\n return ControlNetModel.from_pretrained(\n model_name, cache_dir=CONTROLNET_MODEL_CACHE, torch_dtype=torch.float16\n )\n\n def get_model(self, controlnet_name):\n if controlnet_name in self.models:\n return self.models[controlnet_name]\n elif controlnet_name.startswith(\"edge_\"):\n return self.models[\"canny\"]\n elif controlnet_name.startswith(\"depth_\"):\n return self.models[\"depth\"]\n elif controlnet_name.startswith(\"soft_edge\") or controlnet_name.startswith(\n \"lineart\"\n ):\n return self.models[\"soft_edge\"]\n else:\n return None\n\n def get_models(self, controlnet_names):\n models = [\n self.get_model(controlnet_name) for controlnet_name in controlnet_names\n ]\n return list(filter(None, models))\n\n def preprocess(self, image, controlnet_name):\n # Illusion model needs no preprocessing\n if controlnet_name == \"illusion\" or controlnet_name == \"none\":\n return image\n\n return self.controlnet_preprocessor.process_image(image, controlnet_name)\n\n @staticmethod\n def get_controlnet_names():\n return ControlNet.CONTROLNET_MODELS"
},
{
"identifier": "SizingStrategy",
"path": "sizing_strategy.py",
"snippet": "class SizingStrategy:\n def __init__(self):\n pass\n\n def get_dimensions(self, image):\n original_width, original_height = image.size\n print(\n f\"Original dimensions: Width: {original_width}, Height: {original_height}\"\n )\n resized_width, resized_height = self.get_resized_dimensions(\n original_width, original_height\n )\n print(\n f\"Dimensions to resize to: Width: {resized_width}, Height: {resized_height}\"\n )\n return resized_width, resized_height\n\n def get_allowed_dimensions(self, base=LOWEST_DIMENSION, max_dim=MAX_DIMENSION):\n \"\"\"\n Function to generate allowed dimensions optimized around a base up to a max\n \"\"\"\n allowed_dimensions = []\n for i in range(base, max_dim + 1, 64):\n for j in range(base, max_dim + 1, 64):\n allowed_dimensions.append((i, j))\n return allowed_dimensions\n\n def get_resized_dimensions(self, width, height):\n allowed_dimensions = self.get_allowed_dimensions()\n aspect_ratio = width / height\n print(f\"Aspect Ratio: {aspect_ratio:.2f}\")\n # Find the closest allowed dimensions that maintain the aspect ratio\n # and are closest to the optimum dimension\n closest_dimensions = min(\n allowed_dimensions,\n key=lambda dim: abs(dim[0] / dim[1] - aspect_ratio)\n + abs(dim[0] - OPTIMUM_DIMENSION),\n )\n return closest_dimensions\n\n def resize_images(self, images, width, height):\n return [\n img.resize((width, height)) if img is not None else None for img in images\n ]\n\n def open_image(self, image_path):\n return Image.open(str(image_path)) if image_path is not None else None\n\n def apply(\n self,\n sizing_strategy,\n width,\n height,\n image=None,\n mask=None,\n control_1_image=None,\n control_2_image=None,\n control_3_image=None,\n ):\n image_dict = {\n \"input_image\": self.open_image(image),\n \"mask_image\": self.open_image(mask),\n \"controlnet_1_image\": self.open_image(control_1_image),\n \"controlnet_2_image\": self.open_image(control_2_image),\n \"controlnet_3_image\": self.open_image(control_3_image),\n }\n\n if sizing_strategy in image_dict:\n print(f\"Resizing based on {sizing_strategy}\")\n width, height = self.get_dimensions(image_dict[sizing_strategy])\n else:\n print(\"Using given dimensions\")\n\n resized_images = self.resize_images(\n list(image_dict.values()),\n width,\n height,\n )\n\n return width, height, resized_images"
}
] | import os
import time
import numpy as np
import torch
from typing import List, Optional
from cog import BasePredictor, Input, Path
from diffusers import (
DiffusionPipeline,
LCMScheduler,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionXLInpaintPipeline,
StableDiffusionXLControlNetPipeline,
StableDiffusionXLControlNetInpaintPipeline,
StableDiffusionXLControlNetImg2ImgPipeline,
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from transformers import CLIPImageProcessor
from weights_manager import WeightsManager
from weights_downloader import WeightsDownloader
from controlnet import ControlNet
from sizing_strategy import SizingStrategy | 2,900 |
SDXL_MODEL_CACHE = "./sdxl-cache"
REFINER_MODEL_CACHE = "./refiner-cache"
SAFETY_CACHE = "./safety-cache"
LCM_CACHE = "./lcm-cache"
FEATURE_EXTRACTOR = "./feature-extractor"
SDXL_URL = "https://weights.replicate.delivery/default/sdxl/sdxl-vae-upcast-fix.tar"
REFINER_URL = (
"https://weights.replicate.delivery/default/sdxl/refiner-no-vae-no-encoder-1.0.tar"
)
SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar"
class Predictor(BasePredictor):
def load_trained_weights(self, weights, pipe):
self.weights_manager.load_trained_weights(weights, pipe)
def build_controlnet_pipeline(self, pipeline_class, controlnet_models):
pipe = pipeline_class.from_pretrained(
SDXL_MODEL_CACHE,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
vae=self.txt2img_pipe.vae,
text_encoder=self.txt2img_pipe.text_encoder,
text_encoder_2=self.txt2img_pipe.text_encoder_2,
tokenizer=self.txt2img_pipe.tokenizer,
tokenizer_2=self.txt2img_pipe.tokenizer_2,
unet=self.txt2img_pipe.unet,
scheduler=self.txt2img_pipe.scheduler,
controlnet=self.controlnet.get_models(controlnet_models),
)
pipe.to("cuda")
return pipe
def setup(self, weights: Optional[Path] = None):
"""Load the model into memory to make running multiple predictions efficient"""
start = time.time()
self.sizing_strategy = SizingStrategy()
self.weights_manager = WeightsManager(self)
self.tuned_model = False
self.tuned_weights = None
if str(weights) == "weights":
weights = None
print("Loading safety checker...")
|
SDXL_MODEL_CACHE = "./sdxl-cache"
REFINER_MODEL_CACHE = "./refiner-cache"
SAFETY_CACHE = "./safety-cache"
LCM_CACHE = "./lcm-cache"
FEATURE_EXTRACTOR = "./feature-extractor"
SDXL_URL = "https://weights.replicate.delivery/default/sdxl/sdxl-vae-upcast-fix.tar"
REFINER_URL = (
"https://weights.replicate.delivery/default/sdxl/refiner-no-vae-no-encoder-1.0.tar"
)
SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar"
class Predictor(BasePredictor):
def load_trained_weights(self, weights, pipe):
self.weights_manager.load_trained_weights(weights, pipe)
def build_controlnet_pipeline(self, pipeline_class, controlnet_models):
pipe = pipeline_class.from_pretrained(
SDXL_MODEL_CACHE,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
vae=self.txt2img_pipe.vae,
text_encoder=self.txt2img_pipe.text_encoder,
text_encoder_2=self.txt2img_pipe.text_encoder_2,
tokenizer=self.txt2img_pipe.tokenizer,
tokenizer_2=self.txt2img_pipe.tokenizer_2,
unet=self.txt2img_pipe.unet,
scheduler=self.txt2img_pipe.scheduler,
controlnet=self.controlnet.get_models(controlnet_models),
)
pipe.to("cuda")
return pipe
def setup(self, weights: Optional[Path] = None):
"""Load the model into memory to make running multiple predictions efficient"""
start = time.time()
self.sizing_strategy = SizingStrategy()
self.weights_manager = WeightsManager(self)
self.tuned_model = False
self.tuned_weights = None
if str(weights) == "weights":
weights = None
print("Loading safety checker...") | WeightsDownloader.download_if_not_exists(SAFETY_URL, SAFETY_CACHE) | 1 | 2023-11-16 11:11:27+00:00 | 4k |
joyn-gg/discord.http | discord_http/http.py | [
{
"identifier": "NotFound",
"path": "discord_http/errors.py",
"snippet": "class NotFound(HTTPException):\n \"\"\" Raised whenever a HTTP request returns 404 \"\"\"\n pass"
},
{
"identifier": "DiscordServerError",
"path": "discord_http/errors.py",
"snippet": "class DiscordServerError(HTTPException):\n \"\"\" Raised whenever an unexpected HTTP error occurs \"\"\"\n pass"
},
{
"identifier": "Forbidden",
"path": "discord_http/errors.py",
"snippet": "class Forbidden(HTTPException):\n \"\"\" Raised whenever a HTTP request returns 403 \"\"\"\n pass"
},
{
"identifier": "HTTPException",
"path": "discord_http/errors.py",
"snippet": "class HTTPException(DiscordException):\n \"\"\" Base exception for HTTP requests \"\"\"\n def __init__(self, r: \"HTTPResponse\"):\n self.request = r\n self.status: int = r.status\n\n self.code: int\n self.text: str\n\n if isinstance(r.response, dict):\n self.code = r.response.get(\"code\", 0)\n self.text = r.response.get(\"message\", \"Unknown\")\n if r.response.get(\"errors\", None):\n self.text += f\"\\n{r.response['errors']}\"\n else:\n self.text: str = str(r.response)\n self.code = 0\n\n error_text = f\"HTTP {self.request.status} > {self.request.reason} (code: {self.code})\"\n if len(self.text):\n error_text += f\": {self.text}\"\n\n super().__init__(error_text)"
}
] | import aiohttp
import asyncio
import json
import logging
import sys
from aiohttp.client_exceptions import ContentTypeError
from collections import deque
from typing import (
Optional, Any, Union, Self, overload,
Literal, TypeVar, Generic, TYPE_CHECKING
)
from .errors import (
NotFound, DiscordServerError,
Forbidden, HTTPException
)
from .user import User
from .user import User | 2,307 | @overload
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: Literal["read"] = "read",
**kwargs
) -> HTTPResponse[bytes]:
...
@overload
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: Literal["text"] = "text",
**kwargs
) -> HTTPResponse[str]:
...
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: ResMethodTypes = "json",
**kwargs
) -> HTTPResponse:
"""
Make a request to the Discord API
Parameters
----------
method: `str`
Which HTTP method to use
path: `str`
The path to make the request to
res_method: `str`
The method to use to get the response
Returns
-------
`HTTPResponse`
The response from the request
Raises
------
`ValueError`
Invalid HTTP method
`DiscordServerError`
Something went wrong on Discord's end
`Forbidden`
You are not allowed to do this
`NotFound`
The resource was not found
`HTTPException`
Something went wrong
`RuntimeError`
Unreachable code, reached max tries (5)
"""
if "headers" not in kwargs:
kwargs["headers"] = {}
if "Authorization" not in kwargs["headers"]:
kwargs["headers"]["Authorization"] = f"Bot {self.token}"
if res_method == "json" and "Content-Type" not in kwargs["headers"]:
kwargs["headers"]["Content-Type"] = "application/json"
kwargs["headers"]["User-Agent"] = "discord.http Python/{0} aiohttp/{1}".format(
".".join(str(i) for i in sys.version_info[:3]),
aiohttp.__version__
)
reason = kwargs.pop("reason", None)
if reason:
kwargs["headers"]["X-Audit-Log-Reason"] = reason
_api_url = self.api_url
if kwargs.pop("webhook", False):
_api_url = self.base_url
ratelimit = self.get_ratelimit(f"{method} {path}")
async with ratelimit:
for tries in range(5):
try:
r: HTTPResponse = await query(
method,
f"{_api_url}{path}",
res_method=res_method,
**kwargs
)
_log.debug(f"HTTP {method.upper()} ({r.status}): {path}")
match r.status:
case x if x >= 200 and x <= 299:
ratelimit.update(r)
return r
case 429:
retry_after: float = r.response["retry_after"]
_log.warning(f"Ratelimit hit ({path}), waiting {retry_after}s...")
await asyncio.sleep(retry_after)
continue
case x if x in (500, 502, 503, 504):
# Try again, maybe it will work next time, surely...
await asyncio.sleep(1 + tries * 2)
continue
# The lovely exception hell
case x if x >= 500:
raise DiscordServerError(r)
case 403:
raise Forbidden(r)
case 404:
|
if TYPE_CHECKING:
MethodTypes = Literal["GET", "POST", "DELETE", "PUT", "HEAD", "PATCH", "OPTIONS"]
ResMethodTypes = Literal["text", "read", "json"]
ResponseT = TypeVar("ResponseT")
_log = logging.getLogger(__name__)
__all__ = (
"DiscordAPI",
"HTTPResponse",
)
class HTTPResponse(Generic[ResponseT]):
def __init__(
self,
*,
status: int,
response: ResponseT,
reason: str,
res_method: ResMethodTypes,
headers: dict[str, str],
):
self.status = status
self.response = response
self.res_method = res_method
self.reason = reason
self.headers = headers
def __repr__(self) -> str:
return f"<HTTPResponse status={self.status} res_method='{self.res_method}'>"
@overload
async def query(
method: MethodTypes,
url: str,
*,
res_method: Literal["text"],
) -> HTTPResponse[str]:
...
@overload
async def query(
method: MethodTypes,
url: str,
*,
res_method: Literal["json"],
) -> HTTPResponse[dict[Any, Any]]:
...
@overload
async def query(
method: MethodTypes,
url: str,
*,
res_method: Literal["read"],
) -> HTTPResponse[bytes]:
...
async def query(
method: MethodTypes,
url: str,
*,
res_method: ResMethodTypes = "text",
**kwargs
) -> HTTPResponse:
"""
Make a request using the aiohttp library
Parameters
----------
method: `Optional[str]`
The HTTP method to use, defaults to GET
url: `str`
The URL to make the request to
res_method: `Optional[str]`
The method to use to get the response, defaults to text
Returns
-------
`HTTPResponse`
The response from the request
"""
session = aiohttp.ClientSession()
if not res_method:
res_method = "text"
session_method = getattr(session, str(method).lower(), None)
if not session_method:
raise ValueError(f"Invalid HTTP method: {method}")
if res_method not in ("text", "read", "json"):
raise ValueError(
f"Invalid res_method: {res_method}, "
"must be either text, read or json"
)
async with session_method(str(url), **kwargs) as res:
try:
r = await getattr(res, res_method.lower())()
except ContentTypeError:
if res_method == "json":
try:
r = json.loads(await res.text())
except json.JSONDecodeError:
# Give up trying, something is really wrong...
r = await res.text()
res_method = "text"
output = HTTPResponse(
status=res.status,
response=r, # type: ignore
res_method=res_method,
reason=res.reason,
headers=res.headers
)
await session.close()
return output
class Ratelimit:
def __init__(self, key: str):
self._key: str = key
self.limit: int = 1
self.outgoing: int = 0
self.remaining = self.limit
self.reset_after: float = 0.0
self.expires: Optional[float] = None
self._loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
self._lock = asyncio.Lock()
self._last_request: float = self._loop.time()
self._pending_requests: deque[asyncio.Future[Any]] = deque()
def reset(self) -> None:
""" Reset the ratelimit """
self.remaining = self.limit - self.outgoing
self.expires = None
self.reset_after = 0.0
def update(self, response: HTTPResponse) -> None:
""" Update the ratelimit with the response headers """
self.remaining = int(response.headers.get("x-ratelimit-remaining", 0))
self.reset_after = float(response.headers.get("x-ratelimit-reset-after", 0))
self.expires = self._loop.time() + self.reset_after
def _wake_next(self) -> None:
while self._pending_requests:
future = self._pending_requests.popleft()
if not future.done():
future.set_result(None)
break
def _wake(self, count: int = 1) -> None:
awaken = 0
while self._pending_requests:
future = self._pending_requests.popleft()
if not future.done():
future.set_result(None)
awaken += 1
if awaken >= count:
break
async def _refresh(self):
async with self._lock:
_log.debug(
f"Ratelimit bucket hit ({self._key}), "
f"waiting {self.reset_after}s..."
)
await asyncio.sleep(self.reset_after)
_log.debug(f"Ratelimit bucket released ({self._key})")
self.reset()
self._wake(self.remaining)
def is_expired(self) -> bool:
return (
self.expires is not None and
self._loop.time() > self.expires
)
def is_inactive(self) -> bool:
return (
(self._loop.time() - self._last_request) >= 300 and
len(self._pending_requests) == 0
)
async def _queue_up(self) -> None:
self._last_request = self._loop.time()
if self.is_expired():
self.reset()
while self.remaining <= 0:
future = self._loop.create_future()
self._pending_requests.append(future)
try:
await future
except Exception:
future.cancel()
if self.remaining > 0 and not future.cancelled():
self._wake_next()
raise
self.remaining -= 1
self.outgoing += 1
async def __aenter__(self) -> Self:
await self._queue_up()
return self
async def __aexit__(self, type, value, traceback) -> None:
self.outgoing -= 1
tokens = self.remaining - self.outgoing
if not self._lock.locked():
if tokens <= 0:
await self._refresh()
elif self._pending_requests:
self._wake(tokens)
class DiscordAPI:
def __init__(
self,
*,
token: str,
application_id: Optional[int],
api_version: Optional[int] = None
):
self.token: str = token
self.application_id: Optional[int] = application_id
self.api_version: int = api_version or 10
if not isinstance(self.api_version, int):
raise TypeError("api_version must be an integer")
self.base_url: str = "https://discord.com/api"
self.api_url: str = f"{self.base_url}/v{self.api_version}"
self._buckets: dict[str, Ratelimit] = {}
def _clear_old_ratelimits(self) -> None:
if len(self._buckets) <= 256:
return
for key in [k for k, v in self._buckets.items() if v.is_inactive()]:
try:
del self._buckets[key]
except KeyError:
pass
def get_ratelimit(self, key: str) -> Ratelimit:
try:
value = self._buckets[key]
except KeyError:
self._buckets[key] = value = Ratelimit(key)
self._clear_old_ratelimits()
return value
@overload
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: Literal["json"] = "json",
**kwargs
) -> HTTPResponse[dict[Any, Any]]:
...
@overload
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: Literal["read"] = "read",
**kwargs
) -> HTTPResponse[bytes]:
...
@overload
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: Literal["text"] = "text",
**kwargs
) -> HTTPResponse[str]:
...
async def query(
self,
method: MethodTypes,
path: str,
*,
res_method: ResMethodTypes = "json",
**kwargs
) -> HTTPResponse:
"""
Make a request to the Discord API
Parameters
----------
method: `str`
Which HTTP method to use
path: `str`
The path to make the request to
res_method: `str`
The method to use to get the response
Returns
-------
`HTTPResponse`
The response from the request
Raises
------
`ValueError`
Invalid HTTP method
`DiscordServerError`
Something went wrong on Discord's end
`Forbidden`
You are not allowed to do this
`NotFound`
The resource was not found
`HTTPException`
Something went wrong
`RuntimeError`
Unreachable code, reached max tries (5)
"""
if "headers" not in kwargs:
kwargs["headers"] = {}
if "Authorization" not in kwargs["headers"]:
kwargs["headers"]["Authorization"] = f"Bot {self.token}"
if res_method == "json" and "Content-Type" not in kwargs["headers"]:
kwargs["headers"]["Content-Type"] = "application/json"
kwargs["headers"]["User-Agent"] = "discord.http Python/{0} aiohttp/{1}".format(
".".join(str(i) for i in sys.version_info[:3]),
aiohttp.__version__
)
reason = kwargs.pop("reason", None)
if reason:
kwargs["headers"]["X-Audit-Log-Reason"] = reason
_api_url = self.api_url
if kwargs.pop("webhook", False):
_api_url = self.base_url
ratelimit = self.get_ratelimit(f"{method} {path}")
async with ratelimit:
for tries in range(5):
try:
r: HTTPResponse = await query(
method,
f"{_api_url}{path}",
res_method=res_method,
**kwargs
)
_log.debug(f"HTTP {method.upper()} ({r.status}): {path}")
match r.status:
case x if x >= 200 and x <= 299:
ratelimit.update(r)
return r
case 429:
retry_after: float = r.response["retry_after"]
_log.warning(f"Ratelimit hit ({path}), waiting {retry_after}s...")
await asyncio.sleep(retry_after)
continue
case x if x in (500, 502, 503, 504):
# Try again, maybe it will work next time, surely...
await asyncio.sleep(1 + tries * 2)
continue
# The lovely exception hell
case x if x >= 500:
raise DiscordServerError(r)
case 403:
raise Forbidden(r)
case 404: | raise NotFound(r) | 0 | 2023-11-14 12:50:42+00:00 | 4k |
Ganymede-Bio/bio-curve-fit | tests/test_four_pl_logistic.py | [
{
"identifier": "FourPLLogistic",
"path": "bio_curve_fit/logistic.py",
"snippet": "class FourPLLogistic(BaseEstimator, RegressorMixin, BaseStandardCurve):\n def __init__(\n self,\n A=None,\n B=None,\n C=None,\n D=None,\n LLOD=None,\n ULOD=None,\n ULOD_y=None,\n LLOD_y=None,\n slope_direction_positive: Optional[bool] = None,\n slope_guess_num_points_to_use: int = 3,\n ):\n # A is the minimum asymptote\n self.A_ = A\n # B is the Hill's slope\n self.B_ = B\n # C is the inflection point (EC50)\n self.C_ = C\n # D is the maximum asymptote\n self.D_ = D\n self.cov_ = None\n # Initial guesses used when fitting the curve\n self.guess_A_ = None\n self.guess_B_ = None\n self.guess_C_ = None\n self.guess_D_ = None\n # Estimated Limits of Detection for response signal\n self.LLOD_y_ = LLOD_y\n self.ULOD_y_ = ULOD_y\n # Estimated Limits of Detection for concentration\n self.LLOD_ = LLOD\n self.ULOD_ = ULOD\n self.slope_direction_positive = slope_direction_positive\n self.slope_guess_num_points_to_use = slope_guess_num_points_to_use\n\n def check_fit(self):\n if self.A_ is None or self.B_ is None or self.C_ is None or self.D_ is None:\n raise Exception(\n \"Model is not fit yet. Please call 'fit' with appropriate data\"\n \" or initialize the model object with non-null parameters.\"\n )\n\n def get_params(self, deep=False):\n if deep:\n return {\n \"A\": self.A_,\n \"B\": self.B_,\n \"C\": self.C_,\n \"D\": self.D_,\n \"LLOD\": self.LLOD_,\n \"ULOD\": self.ULOD_,\n \"ULOD_y\": self.ULOD_y_,\n \"LLOD_y\": self.LLOD_y_,\n }\n else:\n return {\n \"A\": self.A_,\n \"B\": self.B_,\n \"C\": self.C_,\n \"D\": self.D_,\n }\n\n @staticmethod\n def four_param_logistic(x, A, B, C, D):\n \"\"\"4 Parameter Logistic (4PL) model.\"\"\"\n\n # For addressing fractional powers of negative numbers\n # https://stackoverflow.com/questions/45384602/numpy-runtimewarning-invalid-value-encountered-in-power\n z = np.sign(x / C) * np.abs(x / C) ** B\n\n return ((A - D) / (1.0 + z)) + D\n\n @staticmethod\n def inverse_variance_weight_function(y_data):\n \"\"\"\n Function for weighting residuals by 1/y^2 in `scipy.optimize.curve_fit`.\n \"\"\"\n # To avoid division by zero, add a small constant to y_data.\n return y_data + np.finfo(float).eps\n\n def _calculate_lod_replicate_variance(\n self,\n x_data,\n y_data,\n lower_std_dev_multiplier: float = 2.5,\n upper_std_dev_multiplier: float = 0.0,\n ):\n \"\"\"\n Calculate the Lower and Upper Limits of Detection (LLOD and ULOD) using variance\n of replicate max and min concentration standards. It ignore zero concentration\n standards. If there are no replicates, the standard deviation zero\n Possible TODO: sometimes a minimum variance is used in other software.\n\n In the notation below we assume the response signal is the Y-axis and the\n concentration is the X-axis.\n\n Example: Two replicates of the lowest concentration standard (conc=1.0 pg/ml)\n have standard deviation of 100 across their responses. LLOD will be `model.predict\n (1.0) + 100 * 2.5` where 2.5 is the `lower_std_dev_multiplier` parameter.\n\n :param bottom_std_dev: Standard deviation at the bottom calibration point.\n :param top_std_dev: Standard deviation at the top calibration point.\n :param std_dev_multiplier: Multiplier for the standard deviations (default 2.5).\n :return: Pair of tuples containing the LLOD and ULOD, and the corresponding x-values.\n \"\"\"\n\n x_indexed_y_data = pd.DataFrame({\"x\": x_data, \"y\": y_data}).set_index(\"x\")\n # remove zeros from x_data\n x_indexed_y_data = x_indexed_y_data[x_indexed_y_data.index > 0]\n x_min = np.min(x_indexed_y_data.index)\n x_max = np.max(x_indexed_y_data.index)\n bottom_std_dev = x_indexed_y_data.loc[x_min, \"y\"].std()\n top_std_dev = x_indexed_y_data.loc[x_max, \"y\"].std()\n\n # Calculate LLOD and ULOD of RESPONSE SIGNAL\n llod = self.predict(x_min) + (lower_std_dev_multiplier * bottom_std_dev)\n ulod = self.predict(x_max) - (upper_std_dev_multiplier * top_std_dev)\n\n # Calculate the limits of detection for CONCENTRATION\n llod_x = self.predict_inverse(llod)\n ulod_x = self.predict_inverse(ulod)\n return llod_x, ulod_x, llod, ulod\n\n def fit(self, x_data, y_data, weight_func=None, LOD_func=None, **kwargs):\n \"\"\"\n Fit the 4 Parameter Logistic (4PL) model.\n\n x_data: x data points\n y_data: y data points\n weight_func: optional Function that calculates weights from y_data. This is\n passed into the `curve_fit` function where the function minimized is `sum\n ((r / weight_func(y_data)) ** 2)` where r is the residuals.\n Thus for a typical 1/y^2 weighting, `weight_func` should be `lambda\n y_data: y_data`\n \"\"\"\n x_data = np.float64(x_data)\n y_data = np.float64(y_data)\n df_data = pd.DataFrame({\"x\": x_data, \"y\": y_data})\n df_data.sort_values(by=\"x\", inplace=True)\n\n if LOD_func is None:\n # default LOD_func is to use replicate variance\n LOD_func = self._calculate_lod_replicate_variance\n\n absolute_sigma = False\n weights = None\n if weight_func is not None:\n weights = weight_func(y_data)\n absolute_sigma = True\n\n # Initial guess for the parameters\n self.guess_A_ = np.min(y_data) # type: ignore\n if self.slope_direction_positive is not None:\n self.guess_B_ = 1.0 if self.slope_direction_positive else -1.0\n else:\n # type: ignore\n self.guess_B_ = (\n 1.0\n if np.mean(\n df_data.iloc[: np.minimum(self.slope_guess_num_points_to_use, len(df_data))][ # type: ignore\n \"y\"\n ]\n )\n < np.mean(\n df_data.iloc[-np.minimum(self.slope_guess_num_points_to_use, len(df_data)) :][ # type: ignore\n \"y\"\n ]\n )\n else -1.0\n )\n self.guess_C_ = np.mean(x_data) # type: ignore\n self.guess_D_ = np.max(y_data) # type: ignore\n initial_guess = [self.guess_A_, self.guess_B_, self.guess_C_, self.guess_D_]\n\n curve_fit_kwargs = {\n \"f\": self.four_param_logistic,\n \"xdata\": x_data,\n \"ydata\": y_data,\n \"p0\": initial_guess,\n \"maxfev\": 10000,\n \"sigma\": weights,\n \"absolute_sigma\": absolute_sigma,\n }\n\n # overwrite parameters with any kwargs passed in\n for k, v in kwargs.items():\n curve_fit_kwargs[k] = v\n\n # Perform the curve fit\n params, cov = curve_fit(**curve_fit_kwargs)\n self.A_, self.B_, self.C_, self.D_ = params\n self.cov_ = cov\n self.LLOD_, self.ULOD_, self.LLOD_y_, self.ULOD_y_ = LOD_func(x_data, y_data)\n return self\n\n @staticmethod\n def jacobian(x_data, A, B, C, D):\n \"\"\"\n Jacobian matrix of the 4PL function with respect to A, B, C, D.\n \"\"\"\n z = (x_data / C) ** B\n\n partial_A = 1.0 / (1.0 + z)\n partial_B = -(z * (A - D) * np.log(np.maximum(x_data / C, np.finfo(float).eps))) / ( # type: ignore\n (1.0 + z) ** 2\n )\n partial_C = (B * z * (A - D)) / (C * (1.0 + z) ** 2)\n partial_D = 1.0 - 1.0 / (1.0 + z)\n\n # Jacobian matrix\n J = np.array([partial_A, partial_B, partial_C, partial_D]).T\n return J\n\n def predict_confidence_band(self, x_data):\n \"\"\"\n Predict confidence bands of data points.\n\n See:\n https://www.graphpad.com/guides/prism/latest/curve-fitting/reg_graphing_confidence_and_predic.htm\n https://www.graphpad.com/guides/prism/latest/curve-fitting/reg_how_confidence_and_prediction_.htm\n https://stats.stackexchange.com/questions/15423/how-to-compute-prediction-bands-for-non-linear-regression\n\n \"\"\"\n if self.cov_ is None:\n raise Exception(\n \"Covariance matrix is not available. Please call 'fit' with appropriate data.\"\n )\n J = self.jacobian(x_data, self.A_, self.B_, self.C_, self.D_)\n pred_var = np.sum((J @ self.cov_) * J, axis=1)\n\n return np.sqrt(pred_var)\n\n def predict_prediction_band(self, x_data, y_data):\n \"\"\"\n Predict prediction bands of data points.\n TODO: still need to double-check the math here.\n \"\"\"\n ss = (y_data - self.predict(x_data)) ** 2\n df = len(x_data) - 4 # 4 parameters\n\n return np.sqrt(self.predict_confidence_band(x_data) ** 2 * ss / df)\n\n def predict_inverse(self, y):\n \"\"\"Inverse 4 Parameter Logistic (4PL) model.\n\n Used for calculating the x-value for a given y-value.\n Usually, standard curves are fitted using concentration as x-values and response as\n y-values, so that variance in response is modeled for a given known concentration.\n But for samples of unknown concentration, we want to get the concentration as given\n response, which is what this function does.\n\n \"\"\"\n self.check_fit()\n z = ((self.A_ - self.D_) / (y - self.D_)) - 1 # type: ignore\n\n # For addressing fractional powers of negative numbers, np.sign(z) * np.abs(z) used rather than z\n # https://stackoverflow.com/questions/45384602/numpy-runtimewarning-invalid-value-encountered-in-power\n return self.C_ * (np.sign(z) * np.abs(z) ** (1 / self.B_)) # type: ignore\n\n def predict(self, x_data):\n self.check_fit()\n return self.four_param_logistic(x_data, self.A_, self.B_, self.C_, self.D_)"
},
{
"identifier": "plot_standard_curve",
"path": "bio_curve_fit/plotting.py",
"snippet": "def plot_standard_curve(\n x_data,\n y_data,\n fitted_model: BaseStandardCurve,\n title=\"Standard Curve Fit\",\n x_label=\"Concentration\",\n y_label=\"Response\",\n show_plot: bool = False,\n) -> bytes:\n \"\"\"\n Generate a plot of the data and the fitted curve.\n \"\"\"\n # Plot the data and the fitted curve\n # set x-axis to log scale\n # set scales to log\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n data = pd.DataFrame({\"x\": x_data, \"y\": y_data})\n # remove zeros from x_data\n filtered_data = data[data[\"x\"] > 0]\n\n # Plot the fitted curve\n epsilon = 0.01\n x_min = np.log10(max(min(x_data), epsilon))\n x_max = max(x_data) * 2\n x = np.logspace(x_min, np.log10(x_max), 100) # type: ignore\n # Generate y-data based on the fitted parameters\n y_pred = fitted_model.predict(x)\n\n plt.plot(x, y_pred, label=\"Fitted curve\", color=\"red\")\n plt.scatter(filtered_data[\"x\"], filtered_data[\"y\"], label=\"Data\", s=12)\n formatter = ScalarFormatter()\n formatter.set_scientific(False)\n plt.gca().xaxis.set_major_formatter(formatter)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n\n # set horizontal and vertical lines for ULOD and LLOD\n llod_response, ulod_response = fitted_model.LLOD_y_, fitted_model.ULOD_y_\n plt.axhline(llod_response, color=\"red\", linestyle=\"--\", label=\"LLOD\") # type: ignore\n plt.axhline(ulod_response, color=\"blue\", linestyle=\"--\", label=\"ULOD\") # type: ignore\n plt.legend()\n if show_plot:\n plt.show()\n # Save the plot to a BytesIO object\n buf = io.BytesIO()\n plt.savefig(buf, format=\"png\")\n plt.clf()\n buf.seek(0)\n return buf.read()"
}
] | import numpy as np
import pandas as pd
import pytest
from bio_curve_fit.logistic import FourPLLogistic
from bio_curve_fit.plotting import plot_standard_curve | 3,536 |
# set a seed for reproducibility
np.random.seed(42)
def test_fit_and_plot():
TEST_PARAMS = [1.0, 1.0, 2.0, 3.0]
x_data = np.logspace(0.00001, 7, 100, base=np.e) # type: ignore
# generate y-data based on the test parameters
|
# set a seed for reproducibility
np.random.seed(42)
def test_fit_and_plot():
TEST_PARAMS = [1.0, 1.0, 2.0, 3.0]
x_data = np.logspace(0.00001, 7, 100, base=np.e) # type: ignore
# generate y-data based on the test parameters | y_data = FourPLLogistic.four_param_logistic( | 0 | 2023-11-13 15:06:15+00:00 | 4k |
chziakas/backbone-learn | experiments/benchmark_decision_tree.py | [
{
"identifier": "BackboneDecisionTree",
"path": "backbone_learn/backbone/backbone_decision_tree.py",
"snippet": "class BackboneDecisionTree(BackboneSupervised):\n \"\"\"\n Specific implementation of the Backbone method for sparse regression.\n\n This class combines Pearson correlation for feature screening, L0BnB for exact solving, and Lasso for heuristic solving to construct a sparse regression model.\n\n Inherits from:\n BackboneBase (ABC): The abstract base class for backbone algorithms.\n \"\"\"\n\n def set_solvers(\n self,\n alpha=0.5,\n depth=3,\n time_limit=1000,\n _lambda=0.5,\n num_threads=None,\n obj_mode=\"acc\",\n n_bins=2,\n is_data_fit=False,\n ):\n \"\"\"\n Initializes the sparse regression method with specified components.\n\n Args:\n alpha (float): Proportion of features to retain after screening. Defaults to 0.5.\n depth (int, optional): Depth of BendersOCT tree. Defaults to 3.\n time_limit (int): Time limit for the optimization process.\n _lambda (float): Regularization parameter.\n num_threads (int or None): Number of threads for parallel processing.\n obj_mode (str): Objective mode, e.g., 'acc' for accuracy.\n n_bins (int): Number of bins for KBinsDiscretizer. Defaults to 2.\n is_data_fit (bool): Whether data are in the format required for OCT\n \"\"\"\n self.screen_selector = PearsonCorrelationSelector(alpha)\n self.exact_solver = BendersOCTDecisionTree(\n depth=depth,\n time_limit=time_limit,\n _lambda=_lambda,\n num_threads=num_threads,\n obj_mode=obj_mode,\n n_bins=n_bins,\n is_data_fit=is_data_fit,\n )\n self.heuristic_solver = CARTDecisionTree()"
},
{
"identifier": "CARTDecisionTree",
"path": "backbone_learn/heuristic_solvers/cart_decision_tree.py",
"snippet": "class CARTDecisionTree(HeuristicSolverBase):\n \"\"\"\n Implements a Classification And Regression Tree (CART) Decision Tree with cross-validation using AUC.\n This solver is a heuristic approach for fitting a decision tree model and identifying significant features.\n\n Attributes:\n _model (DecisionTreeClassifier): An instance of the sklearn DecisionTreeClassifier.\n _auc_score (float): The maximum AUC score obtained during cross-validation.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes the CARTDecisionTree with a DecisionTreeClassifier model.\n \"\"\"\n self._model = DecisionTreeClassifier()\n self._auc_score = None\n\n @property\n def auc_score(self) -> float:\n \"\"\"\n Returns the maximum AUC score obtained from cross-validation.\n\n Returns:\n float: The maximum AUC score.\n \"\"\"\n return self._auc_score\n\n def fit(self, X: np.ndarray, y: np.ndarray, cv_folds: int = 5, random_state: int = 0) -> None:\n \"\"\"\n Fits a CART Decision Tree model to the data using hyperparameter tuning with cross-validation and evaluates it using AUC.\n\n Args:\n X (np.ndarray): The input features as a NumPy array.\n y (np.ndarray): The target labels as a NumPy array.\n cv_folds (int): The number of folds to use for cross-validation.\n\n \"\"\"\n self._model.set_params(random_state=random_state)\n # Define the parameter grid for hyperparameter tuning\n param_grid = {\"max_depth\": [None, 5, 10, 20], \"min_samples_leaf\": [1, 2, 4]}\n\n # Initialize GridSearchCV with the model and parameter grid\n grid_search = GridSearchCV(\n self._model, param_grid, cv=cv_folds, scoring=\"roc_auc\", verbose=1\n )\n\n # Perform the grid search on the provided data\n grid_search.fit(X, y)\n\n # Update the model with the best found parameters\n self._model = grid_search.best_estimator_\n\n # Store the best AUC score\n self._auc_score = grid_search.best_score_\n\n def get_relevant_variables(self, threshold: float) -> np.ndarray:\n \"\"\"\n Identifies features with importance greater than a specified threshold.\n\n Args:\n threshold (float): The threshold for determining feature relevance.\n\n Returns:\n np.ndarray: An array of indices of relevant features.\n \"\"\"\n\n significant_indices = np.where(self._model.feature_importances_ > threshold)[0]\n return significant_indices\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predicts the target labels for the given data.\n\n Args:\n X (np.ndarray): The input features as a NumPy array.\n\n Returns:\n np.ndarray: The predicted target labels.\n \"\"\"\n return self._model.predict(X)"
}
] | import time
from itertools import product
from sklearn.datasets import make_classification
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder
from utils import save_results
from backbone_learn.backbone.backbone_decision_tree import BackboneDecisionTree
from backbone_learn.heuristic_solvers.cart_decision_tree import CARTDecisionTree | 1,816 |
# Define parameter ranges for Backbone parameters
alpha_range = [0.1, 0.5]
beta_range = [0.5, 0.9]
num_subproblems_range = [5, 10]
num_iterations_range = [1]
# Define parameter ranges for FlowOCT parameters
depth_range = [2]
_lambda_range = [0.5]
# Define dataset parameters
n_informative = 4
n_bins = 5
n_features_range = [20]
n_samples = 500
n_classes = 2
random_state = 17
time_limit = 3600
log_filename = "decision_tree_results.json"
results = []
# Experiment loop
for n_features in n_features_range:
# Generate synthetic classification data
X, y = make_classification(
n_samples=n_samples,
n_informative=n_informative,
n_features=n_features,
n_classes=n_classes,
random_state=random_state,
)
# Convert features to binary
est_X = KBinsDiscretizer(
n_bins=n_bins, encode="ordinal", strategy="quantile", random_state=random_state
)
est_X.fit(X)
X_bin = est_X.transform(X)
enc = OneHotEncoder(handle_unknown="error", drop="if_binary")
X_cat_enc = enc.fit_transform(X_bin).toarray()
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_cat_enc, y, test_size=0.2, random_state=random_state
)
for depth in depth_range:
# CARTDecisionTree model iteration for heuristic_model
heuristic_model = CARTDecisionTree(max_depth=depth)
start_time = time.time()
heuristic_model.fit(X_train, y_train, random_state=random_state)
runtime = time.time() - start_time
y_pred_heuristic = heuristic_model.predict(X_test)
auc_score_heuristic = roc_auc_score(y_test, y_pred_heuristic)
# Record heuristic model results
result_heuristic = {
"model_name": "heuristic",
"n_features": int(n_features * n_bins),
"n_samples": n_samples,
"n_informative": n_informative,
"depth": depth,
"AUC Score": auc_score_heuristic,
"Runtime (seconds)": runtime,
}
results.append(result_heuristic)
save_results(results, log_filename)
for _lambda in _lambda_range:
# BackboneDecisionTree model iterations for 'backbone' solution
for alpha, beta, num_subproblems, num_iterations in product(
alpha_range, beta_range, num_subproblems_range, num_iterations_range
):
|
# Define parameter ranges for Backbone parameters
alpha_range = [0.1, 0.5]
beta_range = [0.5, 0.9]
num_subproblems_range = [5, 10]
num_iterations_range = [1]
# Define parameter ranges for FlowOCT parameters
depth_range = [2]
_lambda_range = [0.5]
# Define dataset parameters
n_informative = 4
n_bins = 5
n_features_range = [20]
n_samples = 500
n_classes = 2
random_state = 17
time_limit = 3600
log_filename = "decision_tree_results.json"
results = []
# Experiment loop
for n_features in n_features_range:
# Generate synthetic classification data
X, y = make_classification(
n_samples=n_samples,
n_informative=n_informative,
n_features=n_features,
n_classes=n_classes,
random_state=random_state,
)
# Convert features to binary
est_X = KBinsDiscretizer(
n_bins=n_bins, encode="ordinal", strategy="quantile", random_state=random_state
)
est_X.fit(X)
X_bin = est_X.transform(X)
enc = OneHotEncoder(handle_unknown="error", drop="if_binary")
X_cat_enc = enc.fit_transform(X_bin).toarray()
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_cat_enc, y, test_size=0.2, random_state=random_state
)
for depth in depth_range:
# CARTDecisionTree model iteration for heuristic_model
heuristic_model = CARTDecisionTree(max_depth=depth)
start_time = time.time()
heuristic_model.fit(X_train, y_train, random_state=random_state)
runtime = time.time() - start_time
y_pred_heuristic = heuristic_model.predict(X_test)
auc_score_heuristic = roc_auc_score(y_test, y_pred_heuristic)
# Record heuristic model results
result_heuristic = {
"model_name": "heuristic",
"n_features": int(n_features * n_bins),
"n_samples": n_samples,
"n_informative": n_informative,
"depth": depth,
"AUC Score": auc_score_heuristic,
"Runtime (seconds)": runtime,
}
results.append(result_heuristic)
save_results(results, log_filename)
for _lambda in _lambda_range:
# BackboneDecisionTree model iterations for 'backbone' solution
for alpha, beta, num_subproblems, num_iterations in product(
alpha_range, beta_range, num_subproblems_range, num_iterations_range
): | backbone_model = BackboneDecisionTree( | 0 | 2023-11-18 14:28:12+00:00 | 4k |
openclimatefix/Open-Source-Quartz-Solar-Forecast | quartz_solar_forecast/eval/forecast.py | [
{
"identifier": "get_nwp",
"path": "quartz_solar_forecast/data.py",
"snippet": "def get_nwp(site: PVSite, ts: datetime, nwp_source: str = \"icon\") -> xr.Dataset:\n \"\"\"\n Get GFS NWP data for a point time space and time\n\n :param site: the PV site\n :param ts: the timestamp for when you want the forecast for\n :param nwp_source: the nwp data source. Either \"gfs\" or \"icon\". Defaults to \"icon\"\n :return: nwp forecast in xarray\n \"\"\"\n\n variables = [\n \"visibility\",\n \"windspeed_10m\",\n \"temperature_2m\",\n \"precipitation\",\n \"shortwave_radiation\",\n \"direct_radiation\",\n \"cloudcover_low\",\n \"cloudcover_mid\",\n \"cloudcover_high\",\n ]\n\n start = ts.date()\n end = start + pd.Timedelta(days=7)\n\n # Getting NWP, from OPEN METEO\n url_nwp_source = None\n if nwp_source == \"icon\":\n url_nwp_source = \"dwd-icon\"\n elif nwp_source == \"gfs\":\n url_nwp_source = \"gfs\"\n else:\n raise Exception(f'Source ({nwp_source}) must be either \"icon\" or \"gfs\"')\n\n # Pull data from the nwp_source provided \n url = (\n f\"https://api.open-meteo.com/v1/{url_nwp_source}?\"\n f\"latitude={site.latitude}&longitude={site.longitude}\"\n f\"&hourly={','.join(variables)}\"\n f\"&start_date={start}&end_date={end}\"\n )\n r = requests.get(url)\n d = json.loads(r.text)\n\n # If the nwp_source is ICON, get visibility data from GFS as its not available for icon on Open Meteo\n if nwp_source == \"icon\":\n url = (\n f\"https://api.open-meteo.com/v1/gfs?\"\n f\"latitude={site.latitude}&longitude={site.longitude}\"\n f\"&hourly=visibility\"\n f\"&start_date={start}&end_date={end}\"\n )\n r_gfs = requests.get(url)\n d_gfs = json.loads(r_gfs.text)\n\n # extract visibility data from gfs reponse\n gfs_visibility_data = d_gfs[\"hourly\"][\"visibility\"]\n\n # add visibility to the icon reponse to make a complete json file \n d[\"hourly\"][\"visibility\"] = gfs_visibility_data\n\n # convert data into xarray\n df = pd.DataFrame(d[\"hourly\"])\n df[\"time\"] = pd.to_datetime(df[\"time\"])\n df = df.rename(\n columns={\n \"visibility\": \"vis\",\n \"windspeed_10m\": \"si10\",\n \"temperature_2m\": \"t\",\n \"precipitation\": \"prate\",\n \"shortwave_radiation\": \"dswrf\",\n \"direct_radiation\": \"dlwrf\",\n \"cloudcover_low\": \"lcc\",\n \"cloudcover_mid\": \"mcc\",\n \"cloudcover_high\": \"hcc\",\n }\n )\n df = df.set_index(\"time\")\n data_xr = format_nwp_data(df, nwp_source, site)\n\n return data_xr"
},
{
"identifier": "make_pv_data",
"path": "quartz_solar_forecast/data.py",
"snippet": "def make_pv_data(site: PVSite, ts: pd.Timestamp) -> xr.Dataset:\n \"\"\"\n Make fake PV data for the site\n\n Later we could add PV history here\n\n :param site: the PV site\n :param ts: the timestamp of the site\n :return: The fake PV dataset in xarray form\n \"\"\"\n\n # make fake pv data, this is where we could add history of a pv system\n generation_wh = [[np.nan]]\n lon = [site.longitude]\n lat = [site.latitude]\n timestamp = [ts]\n pv_id = [1]\n\n da = xr.DataArray(\n data=generation_wh,\n dims=[\"pv_id\", \"timestamp\"],\n coords=dict(\n longitude=([\"pv_id\"], lon),\n latitude=([\"pv_id\"], lat),\n timestamp=timestamp,\n pv_id=pv_id,\n kwp=([\"pv_id\"], [site.capacity_kwp]),\n tilt=([\"pv_id\"], [site.tilt]),\n orientation=([\"pv_id\"], [site.orientation]),\n ),\n )\n da = da.to_dataset(name=\"generation_wh\")\n\n return da"
},
{
"identifier": "PVSite",
"path": "quartz_solar_forecast/pydantic_models.py",
"snippet": "class PVSite(BaseModel):\n latitude: float = Field(..., description=\"the latitude of the site\", ge=-90, le=90)\n longitude: float = Field(..., description=\"the longitude of the site\", ge=-180, le=180)\n capacity_kwp: float = Field(..., description=\"the capacity [kwp] of the site\", ge=0)\n tilt: float = Field(\n default=35,\n description=\"the tilt of the site [degrees], the panels' angle relative to horizontal ground\",\n ge=0,\n le=90,\n )\n orientation: float = Field(\n default=180,\n description=\"the orientation of the site [degrees], the angle between north and the direction the panels face, measured on the horizontal plane.\",\n ge=0,\n le=360,\n )"
},
{
"identifier": "forecast_v1",
"path": "quartz_solar_forecast/forecasts/v1.py",
"snippet": "def forecast_v1(nwp_source:str, nwp_xr:xr.Dataset, pv_xr:xr.Dataset, ts:pd.Timestamp, model=None):\n \"\"\"\n Run the forecast\n\n This runs the pv-site-prediction model from the psp library.\n \"\"\"\n\n if model is None:\n model = load_model(f\"{dir_path}/../models/model-0.3.0.pkl\")\n\n # format pv and nwp data\n pv_data_source = NetcdfPvDataSource(\n pv_xr,\n id_dim_name=\"pv_id\",\n timestamp_dim_name=\"timestamp\",\n rename={\"generation_wh\": \"power\", \"kwp\": \"capacity\"},\n ignore_pv_ids=[],\n )\n # make NwpDataSource\n nwp = NwpDataSource(nwp_xr, value_name=nwp_source)\n model.set_data_sources(pv_data_source=pv_data_source, nwp_data_sources={nwp_source: nwp})\n\n # make prediction.\n # Note pv_id=1 is arbitrary, but the pv_xr must have this in it.\n x = X(pv_id=\"1\", ts=ts)\n pred = model.predict(x)\n\n # format into timerange and put into pd dataframe\n times = pd.date_range(start=x.ts, periods=len(pred.powers), freq=\"15min\")\n pred_df = pd.DataFrame({\"power_wh\": pred.powers}, index=times)\n\n return pred_df"
},
{
"identifier": "format_nwp_data",
"path": "quartz_solar_forecast/data.py",
"snippet": "def format_nwp_data(df: pd.DataFrame, nwp_source:str, site: PVSite):\n data_xr = xr.DataArray(\n data=df.values,\n dims=[\"step\", \"variable\"],\n coords=dict(\n step=(\"step\", df.index - df.index[0]),\n variable=df.columns,\n ),\n )\n data_xr = data_xr.to_dataset(name=nwp_source)\n data_xr = data_xr.assign_coords(\n {\"x\": [site.longitude], \"y\": [site.latitude], \"time\": [df.index[0]]}\n )\n return data_xr"
}
] | import os
import pandas as pd
import xarray as xr
from psp.data_sources.nwp import NwpDataSource
from psp.data_sources.pv import NetcdfPvDataSource
from psp.serialization import load_model
from psp.typings import X
from quartz_solar_forecast.data import get_nwp, make_pv_data
from quartz_solar_forecast.pydantic_models import PVSite
from quartz_solar_forecast.forecasts.v1 import forecast_v1
from quartz_solar_forecast.data import format_nwp_data
from datetime import datetime | 2,428 |
dir_path = os.path.dirname(os.path.realpath(__file__))
def run_forecast(pv_df: pd.DataFrame, nwp_df: pd.DataFrame, nwp_source="ICON") -> pd.DataFrame:
"""
Run the forecast from NWP data
:param pv_df: the PV site data. This should have columns timestamp, id, latitude, longitude, and capacity
:param nwp_df: all the nwp data for the site and location. This shoulw have the following rows
- timestamp: the timestamp of the site
- temperature_2m
- precipitation
- shortwave_radiation
- direct_radiation",
- cloudcover_low",
- cloudcover_mid",
- cloudcover_high",
maybe more
"""
# load model only once
model = load_model(f"{dir_path}/../models/model-0.3.0.pkl")
all_predictions = []
for i in range(len(pv_df)):
print(f"Running forecast for {i} of {len(pv_df)}")
pv_row = pv_df.iloc[i]
site = PVSite(
latitude=pv_row["latitude"],
longitude=pv_row["longitude"],
capacity_kwp=pv_row["capacity"],
)
nwp_site_df = nwp_df[
(nwp_df["pv_id"] == pv_row.pv_id) & (nwp_df["timestamp"] == pv_row.timestamp)
]
pv_id = pv_df["pv_id"][i]
ts = pv_df["timestamp"][i]
# format
for c in ["timestamp", "latitude", "longitude", "pv_id"]:
if c in nwp_site_df.columns:
nwp_site_df = nwp_site_df.drop(columns=c)
nwp_site_df.set_index("time", inplace=True, drop=True)
if isinstance(ts, str):
ts = datetime.fromisoformat(ts)
# make pv and nwp data from GFS
# TODO move this to model
print("Making pv and nwp data")
nwp_xr = format_nwp_data(df=nwp_site_df, nwp_source=nwp_source, site=site)
pv_xr = make_pv_data(site=site, ts=ts)
# run model
print('Running model')
|
dir_path = os.path.dirname(os.path.realpath(__file__))
def run_forecast(pv_df: pd.DataFrame, nwp_df: pd.DataFrame, nwp_source="ICON") -> pd.DataFrame:
"""
Run the forecast from NWP data
:param pv_df: the PV site data. This should have columns timestamp, id, latitude, longitude, and capacity
:param nwp_df: all the nwp data for the site and location. This shoulw have the following rows
- timestamp: the timestamp of the site
- temperature_2m
- precipitation
- shortwave_radiation
- direct_radiation",
- cloudcover_low",
- cloudcover_mid",
- cloudcover_high",
maybe more
"""
# load model only once
model = load_model(f"{dir_path}/../models/model-0.3.0.pkl")
all_predictions = []
for i in range(len(pv_df)):
print(f"Running forecast for {i} of {len(pv_df)}")
pv_row = pv_df.iloc[i]
site = PVSite(
latitude=pv_row["latitude"],
longitude=pv_row["longitude"],
capacity_kwp=pv_row["capacity"],
)
nwp_site_df = nwp_df[
(nwp_df["pv_id"] == pv_row.pv_id) & (nwp_df["timestamp"] == pv_row.timestamp)
]
pv_id = pv_df["pv_id"][i]
ts = pv_df["timestamp"][i]
# format
for c in ["timestamp", "latitude", "longitude", "pv_id"]:
if c in nwp_site_df.columns:
nwp_site_df = nwp_site_df.drop(columns=c)
nwp_site_df.set_index("time", inplace=True, drop=True)
if isinstance(ts, str):
ts = datetime.fromisoformat(ts)
# make pv and nwp data from GFS
# TODO move this to model
print("Making pv and nwp data")
nwp_xr = format_nwp_data(df=nwp_site_df, nwp_source=nwp_source, site=site)
pv_xr = make_pv_data(site=site, ts=ts)
# run model
print('Running model') | pred_df = forecast_v1(nwp_source, nwp_xr, pv_xr, ts, model=model) | 3 | 2023-11-16 07:37:42+00:00 | 4k |
newcastleuniversity/DISPEL | tests/providers/generic/activity/test_turning.py | [
{
"identifier": "Turn",
"path": "dispel/providers/generic/activity/turning.py",
"snippet": "class Turn:\n \"\"\"Class to encapsulate turns and turn related gyroscope data.\n\n Parameters\n ----------\n start\n The start date time of the turn.\n end\n The end date time of the turn.\n data\n The angular velocity time series of the turn. The data set should\n ensure to be at least of the duration of time provided with\n ``start`` and ``end``. It should be in rad/s and sampling has to be\n at a constant frequency.\n\n Attributes\n ----------\n start\n The start date time of the turn.\n end\n The end date time of the turn.\n \"\"\"\n\n def __init__(self, start: datetime, end: datetime, data: pd.Series):\n self.start = start\n self.end = end\n self._data = data\n\n def expand(self, threshold: float) -> \"Turn\":\n \"\"\"Expand the ``start`` and ``end`` of the turn to the given threshold.\n\n This expands the turn until start and end are below the provided\n threshold of turn speed. The expansion relies on the data provided\n during the construction of the turn and should not be confused with\n what is available via :data:`data`, which is always limited to the\n boundaries specified with :data:`start` and :data:`end`.\n\n Parameters\n ----------\n threshold\n The threshold until which to expand the turn.\n\n Returns\n -------\n Turn\n A new turn with expanded start and end time stamps based on\n associated data.\n \"\"\"\n below = self._data * self.direction < threshold\n\n before = below[: self.start] # type: ignore\n after = below[self.end :] # type: ignore\n\n start, end = self.start, self.end\n freq = self._data.index.freq.delta\n\n # adjust start\n if not before[before].empty:\n start = before[before].index[-1] + freq\n elif not before.any():\n start = before.index[0]\n\n # adjust end\n if not after[after].empty:\n end = after[after].index[0] - freq\n elif not after.any():\n end = after.index[-1]\n\n return Turn(start, end, self._data)\n\n @property\n def data(self) -> pd.Series:\n \"\"\"Get the angular velocity data associated with the turn.\n\n Returns\n -------\n pandas.Series\n Angular velocity series between :data:`start` and :data:`end` of\n the turn.\n \"\"\"\n return self._data[self.start : self.end] # type: ignore\n\n @property\n def duration(self) -> float:\n \"\"\"Get the duration of the turn.\n\n Returns\n -------\n float\n The duration of the turn in seconds.\n \"\"\"\n return (self.end - self.start).total_seconds()\n\n @property\n def direction(self) -> int:\n \"\"\"Get the direction of the turn.\n\n Returns\n -------\n int\n The direction of the turn. If the turning direction is positive\n ``1`` is returned. Otherwise ``-1``.\n \"\"\"\n return 1 if self.angle > 0 else -1\n\n @property\n def angle(self) -> float:\n \"\"\"Get the angle of the turn.\n\n Returns\n -------\n float\n The angle of the turn in rad/s.\n \"\"\"\n delta = self._data.index.freq.delta.total_seconds()\n return self.data.sum() * delta\n\n def merge(self, other) -> \"Turn\":\n \"\"\"Merge this turn with another one.\n\n Parameters\n ----------\n other\n The other turn to merge this one with.\n\n Returns\n -------\n Turn\n The new merged turn. The new turn uses the earlier start and later\n end of both, respectively. The data will be based on this turn.\n \"\"\"\n return Turn(min(self.start, other.start), max(self.end, other.end), self._data)\n\n def __repr__(self):\n return (\n f\"<Turn: {self.start} - {self.end} ({self.duration} s, \"\n f\"{self.angle.round(3)} rad, {self.direction})>\"\n )"
},
{
"identifier": "el_gohary_detect_turns",
"path": "dispel/providers/generic/activity/turning.py",
"snippet": "def el_gohary_detect_turns(data: pd.Series) -> List[Turn]:\n \"\"\"Detect turns based on the El Gohary et al. algorithm [1]_ .\n\n This method performs the detection after aligning and filtering the\n gyroscope time series (see El Gohary et al. algorithm 1, row 4).\n\n Parameters\n ----------\n data\n A pandas series of angular velocity used to search for turns.\n\n Returns\n -------\n List[Turn]\n A list of detected turns.\n\n References\n ----------\n .. [1] El-Gohary, Mahmoud, et al. \"Continuous monitoring of turning in\n patients with movement disability.\" Sensors 14.1 (2014): 356-369.\n https://doi.org/10.3390/s140100356\n \"\"\"\n # detect peaks\n peak_index, _ = signal.find_peaks(data.abs(), EL_GOHARY_TH_MAX)\n peaks = pd.Series(data[peak_index], index=data.index[peak_index])\n\n # initialize turns with peaks found\n turns = [Turn(x, x, data) for x in peaks.index.tolist()]\n\n # expand turns\n expanded_turns = [t.expand(EL_GOHARY_TH_MIN) for t in turns]\n\n # merge turns\n merged_turns = merge_turns(expanded_turns, EL_GOHARY_TH_MERGE)\n\n # filter turns\n filtered_turns = []\n for turn in merged_turns:\n if TH_MIN_TURN_DURATION < turn.duration < TH_MAX_TURN_DURATION and abs(\n turn.angle\n ) >= np.radians(TH_MIN_TURN_ANGLE_DEGREE):\n filtered_turns.append(turn)\n\n return filtered_turns"
}
] | import pandas as pd
import pytest
from dispel.providers.generic.activity.turning import Turn, el_gohary_detect_turns | 1,662 | """Tests for :mod:`dispel.providers.generic.activity.turning`."""
@pytest.fixture
def example_turn_data():
"""Get example turn data."""
index = pd.date_range("now", periods=61, freq="20ms")
values = [0] * 10 + list(range(20)) + [20] + list(reversed(range(20))) + [0] * 10
return pd.Series(values, index=index)
def test_turn_expand(example_turn_data):
"""Test :meth:`dispel.providers.generic.activity.turning.Turn.expand`."""
index = example_turn_data.index
| """Tests for :mod:`dispel.providers.generic.activity.turning`."""
@pytest.fixture
def example_turn_data():
"""Get example turn data."""
index = pd.date_range("now", periods=61, freq="20ms")
values = [0] * 10 + list(range(20)) + [20] + list(reversed(range(20))) + [0] * 10
return pd.Series(values, index=index)
def test_turn_expand(example_turn_data):
"""Test :meth:`dispel.providers.generic.activity.turning.Turn.expand`."""
index = example_turn_data.index | turn = Turn(index[30], index[30], example_turn_data) | 0 | 2023-11-14 10:06:46+00:00 | 4k |
runDMCA/home-assistant-mazda | custom_components/mazda/pymazda/connection.py | [
{
"identifier": "decrypt_aes128cbc_buffer_to_str",
"path": "custom_components/mazda/pymazda/crypto_utils.py",
"snippet": "def decrypt_aes128cbc_buffer_to_str(data, key, iv): # noqa: D103\n cipher = Cipher(algorithms.AES(key.encode(\"ascii\")), modes.CBC(iv.encode(\"ascii\")))\n decryptor = cipher.decryptor()\n decrypted = decryptor.update(data) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n return unpadder.update(decrypted) + unpadder.finalize()"
},
{
"identifier": "encrypt_aes128cbc_buffer_to_base64_str",
"path": "custom_components/mazda/pymazda/crypto_utils.py",
"snippet": "def encrypt_aes128cbc_buffer_to_base64_str(data, key, iv): # noqa: D103\n padder = padding.PKCS7(128).padder()\n padded_data = padder.update(data) + padder.finalize()\n cipher = Cipher(algorithms.AES(key.encode(\"ascii\")), modes.CBC(iv.encode(\"ascii\")))\n encryptor = cipher.encryptor()\n encrypted = encryptor.update(padded_data) + encryptor.finalize()\n return base64.b64encode(encrypted).decode(\"utf-8\")"
},
{
"identifier": "encrypt_rsaecbpkcs1_padding",
"path": "custom_components/mazda/pymazda/crypto_utils.py",
"snippet": "def encrypt_rsaecbpkcs1_padding(data, public_key): # noqa: D103\n public_key = serialization.load_der_public_key(base64.b64decode(public_key))\n return public_key.encrypt(data.encode(\"utf-8\"), asymmetric_padding.PKCS1v15())"
},
{
"identifier": "generate_usher_device_id_from_seed",
"path": "custom_components/mazda/pymazda/crypto_utils.py",
"snippet": "def generate_usher_device_id_from_seed(seed): # noqa: D103\n hash = hashlib.sha256(seed.encode()).hexdigest().upper()\n id = int(hash[0:8], 16)\n return \"ACCT\" + str(id)"
},
{
"identifier": "generate_uuid_from_seed",
"path": "custom_components/mazda/pymazda/crypto_utils.py",
"snippet": "def generate_uuid_from_seed(seed): # noqa: D103\n hash = hashlib.sha256(seed.encode()).hexdigest().upper()\n return (\n hash[0:8]\n + \"-\"\n + hash[8:12]\n + \"-\"\n + hash[12:16]\n + \"-\"\n + hash[16:20]\n + \"-\"\n + hash[20:32]\n )"
},
{
"identifier": "MazdaAccountLockedException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaAccountLockedException(Exception):\n \"\"\"Raised when account is locked from too many login attempts.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaAPIEncryptionException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaAPIEncryptionException(Exception):\n \"\"\"Raised when server reports that the request is not encrypted properly.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaAuthenticationException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaAuthenticationException(Exception):\n \"\"\"Raised when email address or password are invalid during authentication.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaConfigException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaConfigException(Exception): # noqa: D100\n \"\"\"Raised when Mazda API client is configured incorrectly.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaException(Exception):\n \"\"\"Raised when an unknown error occurs during API interaction.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaLoginFailedException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaLoginFailedException(Exception):\n \"\"\"Raised when login fails for an unknown reason.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaRequestInProgressException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaRequestInProgressException(Exception):\n \"\"\"Raised when a request fails because another request is already in progress.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "MazdaTokenExpiredException",
"path": "custom_components/mazda/pymazda/exceptions.py",
"snippet": "class MazdaTokenExpiredException(Exception):\n \"\"\"Raised when server reports that the access token has expired.\"\"\"\n\n def __init__(self, status):\n \"\"\"Initialize exception.\"\"\"\n super().__init__(status)\n self.status = status"
},
{
"identifier": "SensorDataBuilder",
"path": "custom_components/mazda/pymazda/sensordata/sensor_data_builder.py",
"snippet": "class SensorDataBuilder: # noqa: D101\n def __init__(self): # noqa: D107\n self.sensor_collection_start_timestamp = datetime.datetime.now(datetime.UTC)\n self.device_info_time = random.randrange(3, 8) * 1000\n\n self.system_info = SystemInfo()\n self.system_info.randomize()\n\n self.touch_event_list = TouchEventList()\n self.key_event_list = KeyEventList()\n self.background_event_list = BackgroundEventList()\n\n self.performance_test_results = PerformanceTestResults()\n self.performance_test_results.randomize()\n\n self.sensor_data_encryptor = SensorDataEncryptor()\n\n def generate_sensor_data(self): # noqa: D102\n self.touch_event_list.randomize(self.sensor_collection_start_timestamp)\n self.key_event_list.randomize(self.sensor_collection_start_timestamp)\n self.background_event_list.randomize(self.sensor_collection_start_timestamp)\n\n random_number = random.randrange(-(2**31), 2**31)\n\n orientation_event = self.generate_orientation_data_aa()\n orientation_event_count = orientation_event.count(\";\")\n motion_event = self.generate_motion_data_aa()\n motion_event_count = motion_event.count(\";\")\n\n sensor_data = \"\"\n sensor_data += SDK_VERSION\n sensor_data += \"-1,2,-94,-100,\"\n sensor_data += self.system_info.to_string()\n sensor_data += \",\"\n sensor_data += str(self.system_info.get_char_code_sum())\n sensor_data += \",\"\n sensor_data += str(random_number)\n sensor_data += \",\"\n sensor_data += str(\n int(timestamp_to_millis(self.sensor_collection_start_timestamp) / 2)\n )\n sensor_data += \"-1,2,-94,-101,\"\n sensor_data += \"do_en\"\n sensor_data += \",\"\n sensor_data += \"dm_en\"\n sensor_data += \",\"\n sensor_data += \"t_en\"\n sensor_data += \"-1,2,-94,-102,\"\n sensor_data += self.generate_edited_text()\n sensor_data += \"-1,2,-94,-108,\"\n sensor_data += self.key_event_list.to_string()\n sensor_data += \"-1,2,-94,-117,\"\n sensor_data += self.touch_event_list.to_string()\n sensor_data += \"-1,2,-94,-111,\"\n sensor_data += orientation_event\n sensor_data += \"-1,2,-94,-109,\"\n sensor_data += motion_event\n sensor_data += \"-1,2,-94,-144,\"\n sensor_data += self.generate_orientation_data_ac()\n sensor_data += \"-1,2,-94,-142,\"\n sensor_data += self.generate_orientation_data_ab()\n sensor_data += \"-1,2,-94,-145,\"\n sensor_data += self.generate_motion_data_ac()\n sensor_data += \"-1,2,-94,-143,\"\n sensor_data += self.generate_motion_event()\n sensor_data += \"-1,2,-94,-115,\"\n sensor_data += self.generate_misc_stat(\n orientation_event_count, motion_event_count\n )\n sensor_data += \"-1,2,-94,-106,\"\n sensor_data += self.generate_stored_values_f()\n sensor_data += \",\"\n sensor_data += self.generate_stored_values_g()\n sensor_data += \"-1,2,-94,-120,\"\n sensor_data += self.generate_stored_stack_traces()\n sensor_data += \"-1,2,-94,-112,\"\n sensor_data += self.performance_test_results.to_string()\n sensor_data += \"-1,2,-94,-103,\"\n sensor_data += self.background_event_list.to_string()\n\n encrypted_sensor_data = self.sensor_data_encryptor.encrypt_sensor_data(\n sensor_data\n )\n return encrypted_sensor_data\n\n def generate_edited_text(self): # noqa: D102\n return \"\"\n\n def generate_orientation_data_aa(self): # noqa: D102\n return \"\"\n\n def generate_motion_data_aa(self): # noqa: D102\n return \"\"\n\n def generate_orientation_data_ac(self): # noqa: D102\n return \"\"\n\n def generate_orientation_data_ab(self): # noqa: D102\n return \"\"\n\n def generate_motion_data_ac(self): # noqa: D102\n return \"\"\n\n def generate_motion_event(self): # noqa: D102\n return \"\"\n\n def generate_misc_stat( # noqa: D102\n self, orientation_data_count, motion_data_count\n ): # noqa: D102\n sum_of_text_event_values = self.key_event_list.get_sum()\n sum_of_touch_event_timestamps_and_types = self.touch_event_list.get_sum()\n orientation_data_b = 0\n motion_data_b = 0\n overall_sum = (\n sum_of_text_event_values\n + sum_of_touch_event_timestamps_and_types\n + orientation_data_b\n + motion_data_b\n )\n\n now_timestamp = datetime.datetime.now(datetime.UTC)\n time_since_sensor_collection_start = int(\n (now_timestamp - self.sensor_collection_start_timestamp)\n / datetime.timedelta(milliseconds=1)\n )\n\n return \",\".join(\n [\n str(sum_of_text_event_values),\n str(sum_of_touch_event_timestamps_and_types),\n str(orientation_data_b),\n str(motion_data_b),\n str(overall_sum),\n str(time_since_sensor_collection_start),\n str(len(self.key_event_list.key_events)),\n str(len(self.touch_event_list.touch_events)),\n str(orientation_data_count),\n str(motion_data_count),\n str(self.device_info_time),\n str(random.randrange(5, 15) * 1000),\n \"0\",\n str(\n feistel_cipher(\n overall_sum,\n len(self.key_event_list.key_events)\n + len(self.touch_event_list.touch_events)\n + orientation_data_count\n + motion_data_count,\n time_since_sensor_collection_start,\n )\n ),\n str(timestamp_to_millis(self.sensor_collection_start_timestamp)),\n \"0\",\n ]\n )\n\n def generate_stored_values_f(self): # noqa: D102\n return \"-1\"\n\n def generate_stored_values_g(self): # noqa: D102\n return \"0\"\n\n def generate_stored_stack_traces(self): # noqa: D102\n return \"\""
}
] | import asyncio # noqa: D100
import base64
import hashlib
import json
import logging
import ssl
import time
import aiohttp
from urllib.parse import urlencode
from .crypto_utils import (
decrypt_aes128cbc_buffer_to_str,
encrypt_aes128cbc_buffer_to_base64_str,
encrypt_rsaecbpkcs1_padding,
generate_usher_device_id_from_seed,
generate_uuid_from_seed,
)
from .exceptions import (
MazdaAccountLockedException,
MazdaAPIEncryptionException,
MazdaAuthenticationException,
MazdaConfigException,
MazdaException,
MazdaLoginFailedException,
MazdaRequestInProgressException,
MazdaTokenExpiredException,
)
from .sensordata.sensor_data_builder import SensorDataBuilder | 3,266 |
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_default_certs()
ssl_context.set_ciphers(
"DEFAULT:!aNULL:!eNULL:!MD5:!3DES:!DES:!RC4:!IDEA:!SEED:!aDSS:!SRP:!PSK"
)
REGION_CONFIG = {
"MNAO": {
"app_code": "202007270941270111799",
"base_url": "https://0cxo7m58.mazda.com/prod/",
"usher_url": "https://ptznwbh8.mazda.com/appapi/v1/",
},
"MME": {
"app_code": "202008100250281064816",
"base_url": "https://e9stj7g7.mazda.com/prod/",
"usher_url": "https://rz97suam.mazda.com/appapi/v1/",
},
"MJO": {
"app_code": "202009170613074283422",
"base_url": "https://wcs9p6wj.mazda.com/prod/",
"usher_url": "https://c5ulfwxr.mazda.com/appapi/v1/",
},
}
IV = "0102030405060708"
SIGNATURE_MD5 = "C383D8C4D279B78130AD52DC71D95CAA"
APP_PACKAGE_ID = "com.interrait.mymazda"
USER_AGENT_BASE_API = "MyMazda-Android/8.5.2"
USER_AGENT_USHER_API = "MyMazda/8.5.2 (Google Pixel 3a; Android 11)"
APP_OS = "Android"
APP_VERSION = "8.5.2"
USHER_SDK_VERSION = "11.3.0700.001"
MAX_RETRIES = 4
class Connection:
"""Main class for handling MyMazda API connection."""
def __init__(self, email, password, region, websession=None): # noqa: D107
self.email = email
self.password = password
if region in REGION_CONFIG:
region_config = REGION_CONFIG[region]
self.app_code = region_config["app_code"]
self.base_url = region_config["base_url"]
self.usher_url = region_config["usher_url"]
else:
raise MazdaConfigException("Invalid region")
|
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_default_certs()
ssl_context.set_ciphers(
"DEFAULT:!aNULL:!eNULL:!MD5:!3DES:!DES:!RC4:!IDEA:!SEED:!aDSS:!SRP:!PSK"
)
REGION_CONFIG = {
"MNAO": {
"app_code": "202007270941270111799",
"base_url": "https://0cxo7m58.mazda.com/prod/",
"usher_url": "https://ptznwbh8.mazda.com/appapi/v1/",
},
"MME": {
"app_code": "202008100250281064816",
"base_url": "https://e9stj7g7.mazda.com/prod/",
"usher_url": "https://rz97suam.mazda.com/appapi/v1/",
},
"MJO": {
"app_code": "202009170613074283422",
"base_url": "https://wcs9p6wj.mazda.com/prod/",
"usher_url": "https://c5ulfwxr.mazda.com/appapi/v1/",
},
}
IV = "0102030405060708"
SIGNATURE_MD5 = "C383D8C4D279B78130AD52DC71D95CAA"
APP_PACKAGE_ID = "com.interrait.mymazda"
USER_AGENT_BASE_API = "MyMazda-Android/8.5.2"
USER_AGENT_USHER_API = "MyMazda/8.5.2 (Google Pixel 3a; Android 11)"
APP_OS = "Android"
APP_VERSION = "8.5.2"
USHER_SDK_VERSION = "11.3.0700.001"
MAX_RETRIES = 4
class Connection:
"""Main class for handling MyMazda API connection."""
def __init__(self, email, password, region, websession=None): # noqa: D107
self.email = email
self.password = password
if region in REGION_CONFIG:
region_config = REGION_CONFIG[region]
self.app_code = region_config["app_code"]
self.base_url = region_config["base_url"]
self.usher_url = region_config["usher_url"]
else:
raise MazdaConfigException("Invalid region")
| self.base_api_device_id = generate_uuid_from_seed(email) | 4 | 2023-11-14 01:42:43+00:00 | 4k |
NevermindNilas/TheAnimeScripter | src/cugan/cugan.py | [
{
"identifier": "UpCunet2x",
"path": "src/cugan/cugan_arch.py",
"snippet": "class UpCunet2x(nn.Module): # 完美tile,全程无损\n def __init__(self, in_channels=3, out_channels=3):\n super(UpCunet2x, self).__init__()\n self.unet1 = UNet1(in_channels, out_channels, deconv=True)\n self.unet2 = UNet2(in_channels, out_channels, deconv=False)\n\n def forward(self, x): # 1.7G\n n, c, h0, w0 = x.shape\n # if(tile_mode==0):#不tile\n\n ph = ((h0 - 1) // 2 + 1) * 2\n pw = ((w0 - 1) // 2 + 1) * 2\n x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), \"reflect\") # 需要保证被2整除\n x = self.unet1.forward(x)\n x0 = self.unet2.forward(x)\n x1 = F.pad(x, (-20, -20, -20, -20))\n x = torch.add(x0, x1)\n if w0 != pw or h0 != ph:\n x = x[:, :, : h0 * 2, : w0 * 2]\n return x"
},
{
"identifier": "UpCunet3x",
"path": "src/cugan/cugan_arch.py",
"snippet": "class UpCunet3x(nn.Module): # 完美tile,全程无损\n def __init__(self, in_channels=3, out_channels=3):\n super(UpCunet3x, self).__init__()\n self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)\n self.unet2 = UNet2(in_channels, out_channels, deconv=False)\n\n def forward(self, x): # 1.7G\n n, c, h0, w0 = x.shape\n # if(tile_mode==0):#不tile\n\n ph = ((h0 - 1) // 4 + 1) * 4\n pw = ((w0 - 1) // 4 + 1) * 4\n x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), \"reflect\") # 需要保证被2整除\n x = self.unet1.forward(x)\n x0 = self.unet2.forward(x)\n x1 = F.pad(x, (-20, -20, -20, -20))\n x = torch.add(x0, x1)\n if w0 != pw or h0 != ph:\n x = x[:, :, : h0 * 3, : w0 * 3]\n return x"
},
{
"identifier": "UpCunet4x",
"path": "src/cugan/cugan_arch.py",
"snippet": "class UpCunet4x(nn.Module): # 完美tile,全程无损\n def __init__(self, in_channels=3, out_channels=3):\n super(UpCunet4x, self).__init__()\n self.unet1 = UNet1(in_channels, 64, deconv=True)\n self.unet2 = UNet2(64, 64, deconv=False)\n self.ps = nn.PixelShuffle(2)\n self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)\n\n def forward(self, x):\n n, c, h0, w0 = x.shape\n x00 = x\n # if(tile_mode==0):#不tile\n\n ph = ((h0 - 1) // 2 + 1) * 2\n pw = ((w0 - 1) // 2 + 1) * 2\n x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), \"reflect\") # 需要保证被2整除\n x = self.unet1.forward(x)\n x0 = self.unet2.forward(x)\n x1 = F.pad(x, (-20, -20, -20, -20))\n x = torch.add(x0, x1)\n x = self.conv_final(x)\n x = F.pad(x, (-1, -1, -1, -1))\n x = self.ps(x)\n if w0 != pw or h0 != ph:\n x = x[:, :, : h0 * 4, : w0 * 4]\n x += F.interpolate(x00, scale_factor=4, mode=\"nearest\")\n return x"
},
{
"identifier": "UpCunet2x_fast",
"path": "src/cugan/cugan_arch.py",
"snippet": "class UpCunet2x_fast(nn.Module): # 完美tile,全程无损\n def __init__(self, in_channels=3, out_channels=3):\n super(UpCunet2x_fast, self).__init__()\n self.unet1 = UNet1(12, 64, deconv=True)\n self.unet2 = UNet2(64, 64, deconv=False)\n self.ps = nn.PixelShuffle(2)\n self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)\n self.inv = pixel_unshuffle(2)\n\n def forward(self, x):\n n, c, h0, w0 = x.shape\n x00 = x\n # if(tile_mode==0):#不tile\n\n ph = ((h0 - 1) // 2 + 1) * 2\n pw = ((w0 - 1) // 2 + 1) * 2\n x = F.pad(x, (38, 38 + pw - w0, 38, 38 + ph - h0), \"reflect\") # 需要保证被2整除\n x = self.inv(x) # +18\n x = self.unet1.forward(x)\n x0 = self.unet2.forward(x)\n x1 = F.pad(x, (-20, -20, -20, -20))\n x = torch.add(x0, x1)\n x = self.conv_final(x)\n # with open(r\"C:\\Users\\liujing\\Desktop\\log.txt\",\"a+\")as f:\n # f.write(\"%s\"%(str(x.shape)))\n # f.flush()\n x = F.pad(x, (-1, -1, -1, -1))\n x = self.ps(x)\n if w0 != pw or h0 != ph:\n x = x[:, :, : h0 * 2, : w0 * 2]\n x += F.interpolate(x00, scale_factor=2, mode=\"nearest\")\n return x"
}
] | from .cugan_arch import UpCunet2x, UpCunet3x, UpCunet4x, UpCunet2x_fast
from realcugan_ncnn_py import Realcugan
import os
import requests
import torch
import torch.nn.functional as F | 1,897 |
class Cugan:
def __init__(self, upscale_method, upscale_factor, cugan_kind, half, width, height):
self.upscale_method = upscale_method
self.upscale_factor = upscale_factor
self.cugan_kind = cugan_kind
self.half = half
self.width = width
self.height = height
self.handle_models()
def handle_models(self):
if self.upscale_method == "shufflecugan":
self.model = UpCunet2x_fast(in_channels=3, out_channels=3)
self.filename = "sudo_shuffle_cugan_9.584.969.pth"
else:
model_path_prefix = "cugan"
model_path_suffix = "-latest"
model_path_middle = f"up{self.upscale_factor}x"
|
class Cugan:
def __init__(self, upscale_method, upscale_factor, cugan_kind, half, width, height):
self.upscale_method = upscale_method
self.upscale_factor = upscale_factor
self.cugan_kind = cugan_kind
self.half = half
self.width = width
self.height = height
self.handle_models()
def handle_models(self):
if self.upscale_method == "shufflecugan":
self.model = UpCunet2x_fast(in_channels=3, out_channels=3)
self.filename = "sudo_shuffle_cugan_9.584.969.pth"
else:
model_path_prefix = "cugan"
model_path_suffix = "-latest"
model_path_middle = f"up{self.upscale_factor}x" | model_map = {2: UpCunet2x, 3: UpCunet3x, 4: UpCunet4x} | 1 | 2023-11-14 22:10:11+00:00 | 4k |
ubertidavide/fastbots | tests/test_firefox_bot.py | [
{
"identifier": "FirefoxBot",
"path": "fastbots/firefox_bot.py",
"snippet": "class FirefoxBot(Bot):\n \"\"\"\n Firefox Bot\n\n Class representing the Firefox Bot implementation.\n\n Attributes:\n _driver (WebDriver): The WebDriver instance for Firefox.\n _wait (WebDriverWait): The WebDriverWait instance for Firefox.\n\n Methods:\n __init__(): Initializes all attributes of the Firefox Bot instance.\n save_screenshot(): Saves the browser's screenshot to a PNG file.\n __load_preferences__(): Loads Firefox preferences from a JSON file.\n __load_options__(): Loads Firefox options, including user agent and download directory.\n __load_driver__(): Loads and configures options for the Firefox driver.\n\n Example:\n ```python\n with FirefoxBot() as bot:\n bot.save_screenshot()\n ```\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"\n Initializes all attributes of the Firefox Bot instance.\n \"\"\"\n super().__init__()\n\n # Load the configured driver\n self._driver: WebDriver = self.__load_driver__()\n\n # Default wait\n self._wait: WebDriverWait = WebDriverWait(driver=self._driver, timeout=config.SELENIUM_DEFAULT_WAIT, poll_frequency=1)\n\n def save_screenshot(self) -> str:\n \"\"\"\n Saves the browser's screenshot to a PNG file.\n\n The file path can be specified in the settings.\n \"\"\"\n if not Path(config.BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH).exists():\n Path(config.BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH).mkdir(exist_ok=True, parents=True)\n\n file_path: Path = Path(config.BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH) / f'{datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")}.png'\n self._driver.get_full_page_screenshot_as_file(str(file_path.absolute()))\n return str(file_path.absolute())\n\n def __load_preferences__(self) -> FirefoxProfile:\n \"\"\"\n Load Firefox Preferences\n\n Load all the preferences for Firefox stored in a JSON file, specified in the config.\n\n Returns:\n FirefoxProfile: The Firefox profile with loaded preferences.\n \"\"\"\n # Initialize an empty profile for the settings\n firefox_profile: FirefoxProfile = FirefoxProfile()\n\n if Path(config.BOT_PREFERENCES_FILE_PATH).exists():\n # Load all the preferences from the file\n with open(config.BOT_PREFERENCES_FILE_PATH, 'r') as file:\n data = json.load(file)\n\n # Iterate through all the data in the file\n for key, value in data.items():\n firefox_profile.set_preference(key, value)\n\n return firefox_profile\n\n def __load_options__(self) -> FirefoxOptions:\n \"\"\"\n Load Firefox Options\n\n Load all the default Firefox options.\n\n Returns:\n FirefoxOptions: The configured Firefox options.\n \"\"\"\n # Firefox configurations\n firefox_options: FirefoxOptions = FirefoxOptions()\n \n # Add all the arguments specified in the config\n if config.BOT_ARGUMENTS != 'None':\n arguments = config.BOT_ARGUMENTS.replace(' ', '').strip().split(',')\n for argument in arguments:\n firefox_options.add_argument(argument)\n\n firefox_profile: FirefoxProfile = self.__load_preferences__()\n\n # Basic static settings: download directory as temp and user agent from config\n firefox_profile.set_preference('general.useragent.override', config.BOT_USER_AGENT)\n firefox_profile.set_preference('browser.download.folderList', 2)\n firefox_profile.set_preference('browser.download.dir', self._temp_dir)\n\n # Add the profile to the Firefox options\n firefox_options.profile = firefox_profile\n\n return firefox_options\n \n def __load_driver__(self) -> WebDriver:\n \"\"\"\n Load Firefox Driver\n\n Load and configure all the options for the Firefox driver.\n\n Returns:\n WebDriver: The configured WebDriver instance for Firefox.\n \"\"\"\n seleniumwire_options = {\n 'disable_capture': config.SELENIUM_DISABLE_CAPTURE,\n 'enable_har': config.SELENIUM_ENABLE_HAR_CAPTURE\n }\n\n if config.BOT_PROXY_ENABLED:\n # Proxy settings\n seleniumwire_options['proxy'] = {\n 'http': config.BOT_HTTP_PROXY,\n 'https': config.BOT_HTTPS_PROXY,\n }\n \n # Initialize Firefox with options\n return Firefox(\n options=self.__load_options__(),\n seleniumwire_options=seleniumwire_options\n )"
},
{
"identifier": "config",
"path": "fastbots/config.py",
"snippet": "class DriverType(Enum):\n FIREFOX = 1\n CHROME = 2\nENV_DEVELOPMENT: str = 'development'\nENV_RELEASE: str = 'release'\nLOG_LEVEL: int = config('LOGLEVEL', default=logging.DEBUG, cast=int)\nENV: str = config('ENV', default=ENV_DEVELOPMENT, cast=str)\nPROJECT_NAME: str = config('PROJECT_NAME', default='fastbot', cast=str)\nAPP_VERSION: str = config('APP_VERSION', default='0.2.6', cast=str)\nBOT_DRIVER_TYPE: DriverType = config('BOT_DRIVER_TYPE', default='firefox', cast=DriverType.from_str)\nBOT_DOWNLOAD_FOLDER_PATH: str = config('BOT_DOWNLOAD_FOLDER_PATH', default=None, cast=str)\nBOT_STRICT_DOWNLOAD_WAIT: bool = config('BOT_STRICT_DOWNLOAD_WAIT', default=True, cast=bool)\nBOT_ARGUMENTS: str = config('BOT_ARGUMENTS', default=None, cast=str)\nBOT_USER_AGENT: str = config('BOT_USER_AGENT', default=f'{PROJECT_NAME} {APP_VERSION}', cast=str)\nBOT_PROXY_ENABLED: bool = config('BOT_PROXY_ENABLED', default=False, cast=bool)\nBOT_HTTP_PROXY: str = config('BOT_HTTP_PROXY', default=None, cast=str)\nBOT_HTTPS_PROXY: str = config('BOT_HTTPS_PROXY', default=BOT_HTTP_PROXY, cast=str)\nBOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH: str = config('BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH', default='debug/', cast=str)\nBOT_HTML_DOWNLOAD_FOLDER_PATH: str = config('BOT_HTML_DOWNLOAD_FOLDER_PATH', default='debug/', cast=str)\nBOT_COOKIES_FILE_PATH: str = config('BOT_COOKIES_FILE_PATH', default='cookies.pkl', cast=str)\nBOT_PREFERENCES_FILE_PATH: str = config('BOT_PREFERENCES_FILE_PATH', default='preferences.json', cast=str)\nBOT_MAX_RETRIES: int = config('BOT_MAX_RETRIES', default=2, cast=int)\nBOT_RETRY_DELAY: int = config('BOT_RETRY_DELAY', default=10, cast=int)\nSELENIUM_GLOBAL_IMPLICIT_WAIT: int = config('SELENIUM_GLOBAL_IMPLICIT_WAIT', default=5, cast=int)\nSELENIUM_EXPECTED_URL_CHECK: bool = config('SELENIUM_EXPECTED_URL_CHECK', default=True, cast=bool)\nSELENIUM_EXPECTED_URL_TIMEOUT: int = config('SELENIUM_EXPECTED_URL_TIMEOUT', default=5, cast=int)\nSELENIUM_DEFAULT_WAIT: int = config('SELENIUM_DEFAULT_WAIT', default=5, cast=int)\nSELENIUM_FILE_DOWNLOAD_TIMEOUT: int = config('SELENIUM_FILE_DOWNLOAD_TIMEOUT', default=20, cast=int)\nSELENIUM_LOCATORS_FILE: str = config('SELENIUM_LOCATORS_FILE', default='locators.ini', cast=str)\nSELENIUM_DISABLE_CAPTURE: bool = config('SELENIUM_DISABLE_CAPTURE', default=True, cast=bool)\nSELENIUM_IN_SCOPE_CAPTURE: str = config('SELENIUM_IN_SCOPE_CAPTURE', default=None, cast=str)\nSELENIUM_ENABLE_HAR_CAPTURE: bool = config('SELENIUM_ENABLE_HAR_CAPTURE', default=False, cast=bool)\nCAPSOLVER_API_KEY: str = config('CAPSOLVER_API_KEY', default=None, cast=str)\n def from_str(label):"
},
{
"identifier": "Payload",
"path": "fastbots/payload.py",
"snippet": "class Payload:\n \"\"\"\n Payload class for managing input data, downloads, and output data.\n \"\"\"\n\n input_data: Dict[str, str] = field(default_factory=dict)\n downloads: List[str] = field(default_factory=list)\n output_data: Dict[str, str] = field(default_factory=dict)"
}
] | import pytest
from configparser import ConfigParser
from pathlib import Path
from seleniumwire.webdriver import Firefox
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver
from fastbots.firefox_bot import FirefoxBot
from fastbots import config, Payload | 2,009 |
@pytest.fixture
def bot():
with FirefoxBot() as bot:
yield bot
def test_driver(bot):
assert isinstance(bot.driver, WebDriver)
def test_wait(bot):
assert isinstance(bot.wait, WebDriverWait)
def test_payload(bot):
|
@pytest.fixture
def bot():
with FirefoxBot() as bot:
yield bot
def test_driver(bot):
assert isinstance(bot.driver, WebDriver)
def test_wait(bot):
assert isinstance(bot.wait, WebDriverWait)
def test_payload(bot): | assert isinstance(bot.payload, Payload) | 2 | 2023-11-16 00:12:09+00:00 | 4k |
intel/llm-on-ray | rlhf/rl_algo/ppo/ppo_rlhf.py | [
{
"identifier": "generate_response",
"path": "common/agentenv/rlhf_env.py",
"snippet": "def generate_response(\n model: torch.nn.Module, \n *, \n input_ids: torch.tensor, \n max_length:int, \n eos_token_id: int\n):\n \"\"\"Generate a response using the model.\"\"\"\n generated_sequence = []\n probs_list = []\n model_in = torch.clone(input_ids)\n with torch.no_grad():\n for i in range(max_length):\n # Get the logits using the forward() method\n logits = model(model_in).logits\n\n # Get the logits of the last token\n next_token_logits = logits[:, -1, :]\n\n # Apply a softmax function to the logits to get the probabilities\n probs = next_token_logits.softmax(dim=-1)\n\n # if not probs_list:\n # prev_probs = logits[:, :-1, :].softmax(dim=-1)\n # probs_list.extend(prev_probs.unbind(1))\n\n # Sample the next token from the probability distribution\n next_token = torch.multinomial(probs, num_samples=1)\n\n # Append the probabilities and the generated token\n generated_sequence.append(next_token)\n # probs_list.append(probs)\n\n # Update the input_ids with the generated token\n model_in = torch.cat([model_in, next_token], dim=-1)\n\n if next_token.item() == eos_token_id:\n break\n\n # Decode and print the generated sequence\n generated_tokens = torch.cat(generated_sequence, dim=-1)\n\n # Stack the probabilities tensor --> this resulted in N - 1 probs missing the last one, to get that we have to do another forward pass, so we may as well do one round in the end to compute all the probs.\n # probs_tensor = torch.concat(probs_list, dim=0)\n\n logit_tensor = model(model_in).logits\n\n return {\n \"sequence\": torch.cat([input_ids, generated_tokens], dim=-1),\n \"logits\": logit_tensor,\n \"n_input_tokens\": input_ids.shape[-1],\n \"n_generated_tokens\": generated_tokens.shape[-1],\n }"
},
{
"identifier": "Buffer",
"path": "rlhf/rl_algo/ppo/rlhf_buffer.py",
"snippet": "class Buffer:\n \"\"\"This buffer should work for both torch and numpy types in the buffer items.\n \n Its job is to collect simple BufferItems but then upon calling \n convert_to_sample_batch, figure out the padding required to create blocks for \n tensors inside a SampleBatch.\n \"\"\"\n \n def __init__(self):\n self._buffer = []\n self._framework = None\n\n def append(self, item: BufferItem):\n if self._framework is None:\n self._framework = torch if isinstance(item.obs[\"input_ids\"], torch.Tensor) else np\n else:\n if self._framework == torch:\n assert isinstance(item.obs[\"input_ids\"], torch.Tensor), \"The buffer items should be of the same framework.\"\n else:\n assert isinstance(item.obs[\"input_ids\"], np.ndarray), \"The buffer items should be of the same framework.\"\n\n\n # under the same key, the values should be of the same length\n for k in (SampleBatch.ACTIONS, SampleBatch.OBS):\n flattened = tree.flatten(getattr(item, k))\n for i in range(len(flattened) - 1):\n if not flattened[i].shape[0] == flattened[i+1].shape[0]:\n raise ValueError(\"The values under the same key should be of the same length.\")\n \n self._buffer.append(item)\n \n def convert_to_sample_batch(self, padding_type: str = \"right\") -> SampleBatch:\n assert padding_type in (\"left\", \"right\"), \"The padding should be either 'left' or 'right'.\"\n keys = BufferItem.__dataclass_fields__.keys()\n\n sample_batch_dict = {}\n for key in keys:\n values = []\n for item in self._buffer: \n val = getattr(item, key)\n\n if isinstance(val, float):\n val = torch.tensor(val) if self._framework == torch else np.array(val)\n elif isinstance(val, dict):\n val = NestedDict(val)\n \n values.append(val)\n\n # some values may not have the same sequence length, so we need to pad them\n if key in (SampleBatch.ACTIONS, SampleBatch.OBS):\n # we should first obtain the max length for each value. Remember that each value is possibly a nested dict where the values are tensors.\n\n # TODO (Kourosh): This is not optimal since we are flattening the whole \n # tree structure, while all we need is the DFS traversal of the tree \n # and obtaining the first leave.\n\n # Each v is a nested dict where the leave values can be iterated easily\n max_length = max(next(iter(v.values())).shape[0] for v in values)\n \n for item in values:\n for nested_key, val in item.items():\n if val.shape[0] < max_length:\n padding = self._framework.zeros(\n (max_length - val.shape[0], *val.shape[1:]), \n dtype=val.dtype\n )\n\n if padding_type == \"left\":\n if self._framework == torch:\n item[nested_key] = torch.cat((padding, val), 0)\n else:\n item[nested_key] = np.concatenate((padding, val), 0)\n else:\n if self._framework == torch:\n item[nested_key] = torch.cat((val, padding), 0)\n else:\n item[nested_key] = np.concatenate((val, padding), 0)\n \n values = tree.map_structure(lambda *x: self._framework.stack(x,0), *values)\n sample_batch_dict[key] = values.asdict() if isinstance(values, NestedDict) else values\n\n return SampleBatch(sample_batch_dict)"
},
{
"identifier": "BufferItem",
"path": "rlhf/rl_algo/ppo/rlhf_buffer.py",
"snippet": "class BufferItem:\n # TODO (Kourosh): These names have to match those in the SampleBatch and \n # PostProcessing.\n obs: dict # keys (shape): input_ids (T,), attention_mask (T,)\n actions: dict # keys: sequence (T,), response_mask (T,), logits (T, VS), attention_mask (T, )\n infos: dict \n rewards: float # scalar (python float)\n value_targets: float # scalar (python float)\n advantages: float # scalar (python float)"
}
] | import torch
import numpy as np
import sys, os
from typing import List, Optional, Type, Union, TYPE_CHECKING
from ray.rllib.algorithms import Algorithm, AlgorithmConfig
from ray.rllib.algorithms.ppo import PPO
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch, concat_samples, DEFAULT_POLICY_ID
from ray.rllib.core.learner.learner_group import LearnerGroup
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.utils.metrics import (
NUM_AGENT_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED, LEARNER_STATS_KEY
)
from ray.rllib.evaluation.metrics import RolloutMetrics
from common.agentenv.rlhf_env import generate_response
from .rlhf_buffer import Buffer, BufferItem
from ray.rllib.evaluation.metrics import (
collect_episodes,
collect_metrics,
summarize_episodes,
) | 1,886 |
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../'))
class RLHFSampler:
"""This sampler is a local sampler for LLMEnv.
The underlying env is an LLMEnv which creates a batch of prompts and the agent has
to generate a response for each prompt. Then the env evaluate those responses and
returns a reward signal.
"""
def __init__(self, module, env):
self._env = env
self._module = module
self.max_generation_length = self._env.max_generation_length
def sample(self, batch_size: int, **kwargs) -> SampleBatch:
# TODO (Kourosh): Can we use batch inference here?
|
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../'))
class RLHFSampler:
"""This sampler is a local sampler for LLMEnv.
The underlying env is an LLMEnv which creates a batch of prompts and the agent has
to generate a response for each prompt. Then the env evaluate those responses and
returns a reward signal.
"""
def __init__(self, module, env):
self._env = env
self._module = module
self.max_generation_length = self._env.max_generation_length
def sample(self, batch_size: int, **kwargs) -> SampleBatch:
# TODO (Kourosh): Can we use batch inference here? | batches = Buffer() | 1 | 2023-11-13 05:08:21+00:00 | 4k |
chuzhumin98/LLM_Eval | PRE/eval.py | [
{
"identifier": "DataLoader",
"path": "PRE/data.py",
"snippet": "class DataLoader:\n '''\n The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM\n '''\n def __init__(self, args):\n self.path_data = args['path_data'] # the load path for the data\n self.format = args['format'] # the data format, csv (need a title line) or json (each line is a single data item)\n self.path_prompt = args['path_prompt'] if 'path_prompt' in args else None # the path of prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt \"You need answer a question: {{question}}\", the \"question\" field need to be included in the data\n if not os.path.exists(self.path_data):\n raise FileExistsError(\"Load task data failed: file not exist!\")\n assert self.format in ['csv', 'json']\n \n \n def generate_reader(self):\n if self.format == 'csv':\n with open(self.path_data, encoding='utf-8') as f:\n gen = csv.DictReader(f, skipinitialspace=True)\n elif self.format == 'json':\n gen = open(self.path_data, encoding='utf-8')\n else:\n raise Exception(\"Invalid data format\")\n return gen\n \n def get_prompt(self):\n if self.path_prompt is None:\n raise Exception(\"Exception: missing argument path_prompt\")\n if not os.path.exists(self.path_prompt):\n raise FileExistsError(\"Load task prompt template failed: file not exist!\")\n self.template_prompt = open(self.path_prompt, encoding='utf-8').read().strip()\n \n gen = self.generate_reader()\n \n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n else:\n item = row\n \n prompt = self.template_prompt\n for key in item:\n prompt = prompt.replace(\"{{\" + key + \"}}\", item[key])\n yield prompt # a generator to return each prompt\n \n def get_task_items(self):\n data_list = []\n gen = self.generate_reader()\n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n elif self.format == 'csv':\n item = dict(row)\n data_list.append(item)\n return data_list"
},
{
"identifier": "Auto_API",
"path": "PRE/api.py",
"snippet": "class Auto_API:\n @staticmethod\n def instantiate_api(api_type, args) -> LLM_API:\n for at, _API in API_type2class_list:\n if api_type == at:\n return _API(args)\n raise Exception(f\"Invalid api_type: {api_type}\")"
},
{
"identifier": "parse_response",
"path": "PRE/utils.py",
"snippet": "def parse_response(response, parse_type, nominal_list=None, nominal_ticks=None):\n '''\n parse_type: int, float or str\n if parse_type = str, then required parameter nominal_list and nominal_ticks\n nominal_list: a series of nominal types, its name\n nomianl_ticks: the corresponding nominal number (int)\n '''\n assert parse_type in ['int', 'float', 'str']\n if parse_type == 'int':\n nums = re.findall(r\"-?\\d+\", response)\n if len(nums) == 0:\n return None\n return int(nums[0])\n elif parse_type == 'float':\n nums = re.findall(r\"-?\\d+\\.?\\d*\", response)\n if len(nums) == 0:\n return None\n return int(nums[0])\n elif parse_type == 'str':\n appear_pos, cur_idx = math.inf, -1\n response = response.lower()\n for idx, label in enumerate(nominal_list):\n pos = response.find(label.lower())\n if pos != -1: # really appear!\n if pos < appear_pos:\n appear_pos, cur_idx = pos, idx\n if cur_idx == -1:\n return None\n else:\n return nominal_ticks[cur_idx]"
}
] | import os
import yaml
import warnings
import json
import copy
import sys
import numpy as np
from PRE.data import DataLoader
from PRE.api import Auto_API
from PRE.utils import parse_response | 2,141 | '''
The implement of the peer review and result aggregation module
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class PEER_REVIEW:
'''
Conduct peer review, process for one prompt (pairwise or pointwise)
'''
def __init__(self, args) -> None:
self.parser_type = args['parser_type'] # int, float, str
self.task_name = args['task_name']
self.save_dir = args['save_dir']
if self.parser_type == 'str':
self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')]
self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_ticks'].split(',')]
else:
self.nominal_list, self.nominal_ticks = None, None
def peer_review_single_round(self, reviewers, prompts):
'''
used in gaming sampling strategy
reviewers: LLM config list
prompts: an array, each item is a dict with key "prompt"
return a dict to denote the results of each evaluate task under all the reviews, key: reviewer model name, value: the original response of this reviewer
'''
apis_reviewer = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in reviewers]
responses_dict = dict()
for _, api in enumerate(apis_reviewer):
records_thisapi = []
for prompt in prompts:
response = api.chat(prompt['prompt'])
result = parse_response(response, self.parser_type, self.nominal_list, self.nominal_ticks)
item = {"response": response, "result": result}
item.update(prompt)
records_thisapi.append(item)
responses_dict[api.model_name] = records_thisapi
return responses_dict
def peer_review_batch(self, reviewers, prompts) -> None:
'''
used in full evaluate strategy
reviewers: LLM config list
save the evaluate responses of each reviewer on seperated file
'''
apis_reviewer = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in reviewers]
os.makedirs(f"{self.save_dir}/evaluation_responses", exist_ok=True)
for _, api in enumerate(apis_reviewer):
path_out = f"{self.save_dir}/evaluation_responses/{self.task_name}_{api.model_name}.json"
if os.path.exists(path_out):
data = open(path_out).readlines()
else:
data = []
if len(data) < len(prompts):
fout = open(path_out, 'w')
for line in data:
fout.write(line)
for prompt in prompts[len(data):]:
response_orig = api.chat(prompt['prompt'])
result_parse = parse_response(response_orig, self.parser_type, self.nominal_list, self.nominal_ticks)
line = {"response": response_orig, 'result': result_parse}
line.update(prompt)
line = json.dumps(line)
data.append(line)
fout.write(line + '\n')
fout.close()
return None
class EvalDataLoader:
def __init__(self, args) -> None:
self.task_name = args['task_name']
self.mode = args['mode'] # pointwise, pairwise
'''
In pointwise mode, the prompt is required to include key "#source" (the LLM to generate the response). The expected evaulate response is an integer or float number;
In pairwise mode, the prompt is required to include key "#source1" (the LLM 1 to generate the response) and key "#source2" (the LLM 2 to generate the response). The expected evaluate response is three possible token, meaning -1 (1 is better), 0 (tied), 1 (2 is better) respectively
'''
# self.dirpath = args['dirpath_response'] # the load path for the response results
self.save_dir = args['save_dir'] # the evaluation result save dir, In full strategy, the evaluation save filename = [save_dir] / evaluation_responses / [task_name]_[model_name].json, each line is one result with json {modelA: modelA_name, modelB: modelB_name, task_id: task_id, response: str, result: int/float}; in gaming strategy, the evaluation save filename = [save_dir] / evaluation_responses / [task_name]__[game strategy].json, each line is one compete result with json {modelA: modelA_name, modelB: modelB_name, task_ids: list, response: {reviewer_name: {responses: list, results: list} for each reviewer}}
self.path_evaluate_prompt = args['path_evaluate_prompt'] if 'path_evaluate_prompt' in args else None # the path of evaluate prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt "You need answer a question: {{question}}", the "question" field need to be included in the data
### load task data and response data
path_config_task_data = args['config_task_data']
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
| '''
The implement of the peer review and result aggregation module
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class PEER_REVIEW:
'''
Conduct peer review, process for one prompt (pairwise or pointwise)
'''
def __init__(self, args) -> None:
self.parser_type = args['parser_type'] # int, float, str
self.task_name = args['task_name']
self.save_dir = args['save_dir']
if self.parser_type == 'str':
self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')]
self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_ticks'].split(',')]
else:
self.nominal_list, self.nominal_ticks = None, None
def peer_review_single_round(self, reviewers, prompts):
'''
used in gaming sampling strategy
reviewers: LLM config list
prompts: an array, each item is a dict with key "prompt"
return a dict to denote the results of each evaluate task under all the reviews, key: reviewer model name, value: the original response of this reviewer
'''
apis_reviewer = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in reviewers]
responses_dict = dict()
for _, api in enumerate(apis_reviewer):
records_thisapi = []
for prompt in prompts:
response = api.chat(prompt['prompt'])
result = parse_response(response, self.parser_type, self.nominal_list, self.nominal_ticks)
item = {"response": response, "result": result}
item.update(prompt)
records_thisapi.append(item)
responses_dict[api.model_name] = records_thisapi
return responses_dict
def peer_review_batch(self, reviewers, prompts) -> None:
'''
used in full evaluate strategy
reviewers: LLM config list
save the evaluate responses of each reviewer on seperated file
'''
apis_reviewer = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in reviewers]
os.makedirs(f"{self.save_dir}/evaluation_responses", exist_ok=True)
for _, api in enumerate(apis_reviewer):
path_out = f"{self.save_dir}/evaluation_responses/{self.task_name}_{api.model_name}.json"
if os.path.exists(path_out):
data = open(path_out).readlines()
else:
data = []
if len(data) < len(prompts):
fout = open(path_out, 'w')
for line in data:
fout.write(line)
for prompt in prompts[len(data):]:
response_orig = api.chat(prompt['prompt'])
result_parse = parse_response(response_orig, self.parser_type, self.nominal_list, self.nominal_ticks)
line = {"response": response_orig, 'result': result_parse}
line.update(prompt)
line = json.dumps(line)
data.append(line)
fout.write(line + '\n')
fout.close()
return None
class EvalDataLoader:
def __init__(self, args) -> None:
self.task_name = args['task_name']
self.mode = args['mode'] # pointwise, pairwise
'''
In pointwise mode, the prompt is required to include key "#source" (the LLM to generate the response). The expected evaulate response is an integer or float number;
In pairwise mode, the prompt is required to include key "#source1" (the LLM 1 to generate the response) and key "#source2" (the LLM 2 to generate the response). The expected evaluate response is three possible token, meaning -1 (1 is better), 0 (tied), 1 (2 is better) respectively
'''
# self.dirpath = args['dirpath_response'] # the load path for the response results
self.save_dir = args['save_dir'] # the evaluation result save dir, In full strategy, the evaluation save filename = [save_dir] / evaluation_responses / [task_name]_[model_name].json, each line is one result with json {modelA: modelA_name, modelB: modelB_name, task_id: task_id, response: str, result: int/float}; in gaming strategy, the evaluation save filename = [save_dir] / evaluation_responses / [task_name]__[game strategy].json, each line is one compete result with json {modelA: modelA_name, modelB: modelB_name, task_ids: list, response: {reviewer_name: {responses: list, results: list} for each reviewer}}
self.path_evaluate_prompt = args['path_evaluate_prompt'] if 'path_evaluate_prompt' in args else None # the path of evaluate prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt "You need answer a question: {{question}}", the "question" field need to be included in the data
### load task data and response data
path_config_task_data = args['config_task_data']
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config | data_loader = DataLoader(config_task) # a task data loader | 0 | 2023-11-16 18:40:23+00:00 | 4k |
python-thread/thread | src/thread/decorators/_threaded.py | [
{
"identifier": "Thread",
"path": "src/thread/thread.py",
"snippet": "class Thread(threading.Thread, Generic[_Target_P, _Target_T]):\n \"\"\"\n Wraps python's `threading.Thread` class\n ---------------------------------------\n\n Type-Safe and provides more functionality on top\n \"\"\"\n\n status : ThreadStatus\n hooks : List[HookFunction]\n _returned_value: Data_Out\n\n errors : List[Exception]\n ignore_errors : Sequence[type[Exception]]\n suppress_errors: bool\n\n # threading.Thread stuff\n _initialized : bool\n _run : Callable\n\n\n def __init__(\n self,\n target: TargetFunction[_Target_P, _Target_T],\n args: Sequence[Data_In] = (),\n kwargs: Mapping[str, Data_In] = {},\n ignore_errors: Sequence[type[Exception]] = (),\n suppress_errors: bool = False,\n\n name: Optional[str] = None,\n daemon: bool = False,\n group = None,\n *overflow_args: Overflow_In,\n **overflow_kwargs: Overflow_In\n ) -> None:\n \"\"\"\n Initializes a thread\n\n Parameters\n ----------\n :param target: This should be a function that takes in anything and returns anything\n :param args: This should be an interable sequence of arguments parsed to the `target` function (e.g. tuple('foo', 'bar'))\n :param kwargs: This should be the kwargs pased to the `target` function (e.g. dict(foo = 'bar'))\n :param ignore_errors: This should be an interable sequence of all exceptions to ignore. To ignore all exceptions, parse tuple(Exception)\n :param suppress_errors: This should be a boolean indicating whether exceptions will be raised, else will only write to internal `errors` property\n :param name: This is an argument parsed to `threading.Thread`\n :param daemon: This is an argument parsed to `threading.Thread`\n :param group: This does nothing right now, but should be left as None\n :param *: These are arguments parsed to `threading.Thread`\n :param **: These are arguments parsed to `thread.Thread`\n \"\"\"\n _target = self._wrap_target(target)\n self._returned_value = None\n self.status = 'Idle'\n self.hooks = []\n\n self.errors = []\n self.ignore_errors = ignore_errors\n self.suppress_errors = suppress_errors\n\n super().__init__(\n target = _target,\n args = args,\n kwargs = kwargs,\n name = name,\n daemon = daemon,\n group = group,\n *overflow_args,\n **overflow_kwargs\n )\n\n\n def _wrap_target(self, target: TargetFunction[_Target_P, _Target_T]) -> TargetFunction[_Target_P, Union[_Target_T, None]]:\n \"\"\"Wraps the target function\"\"\"\n @wraps(target)\n def wrapper(*args: _Target_P.args, **kwargs: _Target_P.kwargs) -> Union[_Target_T, None]:\n self.status = 'Running'\n\n global Threads\n Threads.add(self)\n\n try:\n self._returned_value = target(*args, **kwargs)\n except Exception as e:\n if not any(isinstance(e, ignore) for ignore in self.ignore_errors):\n self.status = 'Errored'\n self.errors.append(e)\n return\n \n self.status = 'Invoking hooks'\n self._invoke_hooks()\n Threads.remove(self)\n self.status = 'Completed'\n return wrapper\n \n\n def _invoke_hooks(self) -> None:\n \"\"\"Invokes hooks in the thread\"\"\"\n errors: List[Tuple[Exception, str]] = []\n for hook in self.hooks:\n try:\n hook(self._returned_value)\n except Exception as e:\n if not any(isinstance(e, ignore) for ignore in self.ignore_errors):\n errors.append((\n e,\n hook.__name__\n ))\n\n if len(errors) > 0:\n self.errors.append(exceptions.HookRuntimeError(\n None, errors\n ))\n\n\n def _handle_exceptions(self) -> None:\n \"\"\"Raises exceptions if not suppressed in the main thread\"\"\"\n if self.suppress_errors:\n return\n \n for e in self.errors:\n raise e\n \n\n def global_trace(self, frame, event: str, arg) -> Optional[Callable]:\n if event == 'call':\n return self.local_trace\n \n def local_trace(self, frame, event: str, arg):\n if self.status == 'Kill Scheduled' and event == 'line':\n print('KILLED ident: %s' % self.ident)\n self.status = 'Killed'\n raise SystemExit()\n return self.local_trace\n \n def _run_with_trace(self) -> None:\n \"\"\"This will replace `threading.Thread`'s `run()` method\"\"\"\n if not self._run:\n raise exceptions.ThreadNotInitializedError('Running `_run_with_trace` may cause unintended behaviour, run `start` instead')\n \n sys.settrace(self.global_trace)\n self._run()\n \n\n @property\n def result(self) -> _Target_T:\n \"\"\"\n The return value of the thread\n \n Raises\n ------\n ThreadNotInitializedError: If the thread is not intialized\n ThreadNotRunningError: If the thread is not running\n ThreadStillRunningError: If the thread is still running\n \"\"\"\n if not self._initialized:\n raise exceptions.ThreadNotInitializedError()\n if self.status in ['Idle', 'Killed']:\n raise exceptions.ThreadNotRunningError()\n \n self._handle_exceptions()\n if self.status in ['Invoking hooks', 'Completed']:\n return self._returned_value\n else:\n raise exceptions.ThreadStillRunningError()\n \n \n def is_alive(self) -> bool:\n \"\"\"\n See if thread is still alive\n\n Raises\n ------\n ThreadNotInitializedError: If the thread is not intialized\n \"\"\"\n if not self._initialized:\n raise exceptions.ThreadNotInitializedError()\n return super().is_alive()\n \n\n def add_hook(self, hook: HookFunction[_Target_T]) -> None:\n \"\"\"\n Adds a hook to the thread\n -------------------------\n Hooks are executed automatically after a successful thread execution.\n The returned value is parsed directly into the hook\n\n Parameters\n ----------\n :param hook: This should be a function which takes the output value of `target` and should return None\n \"\"\"\n self.hooks.append(hook)\n\n\n def join(self, timeout: Optional[float] = None) -> bool:\n \"\"\"\n Halts the current thread execution until a thread completes or exceeds the timeout\n\n Parameters\n ----------\n :param timeout: The maximum time allowed to halt the thread\n\n Returns\n -------\n :returns bool: True if the thread is no-longer alive\n\n Raises\n ------\n ThreadNotInitializedError: If the thread is not initialized\n ThreadNotRunningError: If the thread is not running\n \"\"\"\n if not self._initialized:\n raise exceptions.ThreadNotInitializedError()\n \n if self.status == ['Idle', 'Killed']:\n raise exceptions.ThreadNotRunningError()\n\n super().join(timeout)\n self._handle_exceptions()\n return not self.is_alive()\n \n\n def get_return_value(self) -> _Target_T:\n \"\"\"\n Halts the current thread execution until the thread completes\n\n Returns\n -------\n :returns Any: The return value of the target function\n \"\"\"\n self.join()\n return self.result\n \n\n def kill(self, yielding: bool = False, timeout: float = 5) -> bool:\n \"\"\"\n Schedules a thread to be killed\n\n Parameters\n ----------\n :param yielding: If true, halts the current thread execution until the thread is killed\n :param timeout: The maximum number of seconds to wait before exiting\n\n Returns\n -------\n :returns bool: False if the it exceeded the timeout\n \n Raises\n ------\n ThreadNotInitializedError: If the thread is not initialized\n ThreadNotRunningError: If the thread is not running\n \"\"\"\n if not self.is_alive():\n raise exceptions.ThreadNotRunningError()\n \n self.status = 'Kill Scheduled'\n if not yielding:\n return True\n \n start = time.perf_counter()\n while self.status != 'Killed':\n time.sleep(0.01)\n if (time.perf_counter() - start) >= timeout:\n return False\n\n return True\n \n\n def start(self) -> None:\n \"\"\"\n Starts the thread\n \n Raises\n ------\n ThreadNotInitializedError: If the thread is not intialized\n ThreadStillRunningError: If there already is a running thread\n \"\"\"\n if self.is_alive():\n raise exceptions.ThreadStillRunningError()\n \n self._run = self.run\n self.run = self._run_with_trace\n super().start()"
},
{
"identifier": "Overflow_In",
"path": "src/thread/_types.py",
"snippet": ""
}
] | from functools import wraps
from ..thread import Thread
from .._types import Overflow_In, Data_In
from typing import Callable, Mapping, Sequence, Optional, Union, overload
from typing_extensions import ParamSpec, TypeVar | 2,383 | """
## Threaded
Documentation: https://thread.ngjx.org
"""
T = TypeVar('T')
P = ParamSpec('P')
TargetFunction = Callable[P, T]
NoParamReturn = Callable[P, Thread[P, T]]
WithParamReturn = Callable[[TargetFunction[P, T]], NoParamReturn[P, T]]
FullParamReturn = Callable[P, Thread[P, T]]
@overload
def threaded(__function: TargetFunction[P, T]) -> NoParamReturn[P, T]: ...
@overload
def threaded(
*,
| """
## Threaded
Documentation: https://thread.ngjx.org
"""
T = TypeVar('T')
P = ParamSpec('P')
TargetFunction = Callable[P, T]
NoParamReturn = Callable[P, Thread[P, T]]
WithParamReturn = Callable[[TargetFunction[P, T]], NoParamReturn[P, T]]
FullParamReturn = Callable[P, Thread[P, T]]
@overload
def threaded(__function: TargetFunction[P, T]) -> NoParamReturn[P, T]: ...
@overload
def threaded(
*, | args: Sequence[Data_In] = (), | 1 | 2023-11-12 21:01:21+00:00 | 4k |
victor0089/AirBnB_clone_v2 | models/engine/db_storage.py | [
{
"identifier": "Base",
"path": "models/base_model.py",
"snippet": "class BaseModel:\n def __init__(self, *args, **kwargs):\n def __str__(self):\n def __repr__(self):\n def save(self):\n def to_dict(self):\n def delete(self):"
},
{
"identifier": "State",
"path": "models/state.py",
"snippet": "class State(BaseModel, Base):\n \"\"\"This is the class for State\n Attributes:\n name: input name\n \"\"\"\n __tablename__ = \"states\"\n name = Column(String(128), nullable=False)\n cities = relationship(\"City\", cascade='all, delete, delete-orphan',\n backref=\"state\")\n\n @property\n def cities(self):\n var = models.storage.all()\n lista = []\n result = []\n for key in var:\n city = key.replace('.', ' ')\n city = shlex.split(city)\n if (city[0] == 'City'):\n lista.append(var[key])\n for elem in lista:\n if (elem.state_id == self.id):\n result.append(elem)\n return (result)"
},
{
"identifier": "City",
"path": "models/city.py",
"snippet": "class City(BaseModel, Base):\n \"\"\"This is the class for City\n Attributes:\n state_id: The state id\n name: input name\n \"\"\"\n __tablename__ = \"cities\"\n name = Column(String(128), nullable=False)\n state_id = Column(String(60), ForeignKey('states.id'), nullable=False)\n places = relationship(\"Place\", cascade='all, delete, delete-orphan',\n backref=\"cities\")"
},
{
"identifier": "User",
"path": "models/user.py",
"snippet": "class User(BaseModel, Base):\n \"\"\"This is the class for user\n Attributes:\n email: email address\n password: password for you login\n first_name: first name\n last_name: last name\n \"\"\"\n __tablename__ = \"users\"\n email = Column(String(128), nullable=False)\n password = Column(String(128), nullable=False)\n first_name = Column(String(128))\n last_name = Column(String(128))\n places = relationship(\"Place\", cascade='all, delete, delete-orphan',\n backref=\"user\")\n reviews = relationship(\"Review\", cascade='all, delete, delete-orphan',\n backref=\"user\")"
},
{
"identifier": "Place",
"path": "models/place.py",
"snippet": "class Place(BaseModel, Base):\n \"\"\"This is the class for Place\n Attributes:\n city_id: city id\n user_id: user id\n name: name input\n description: string of description\n number_rooms: number of room in int\n number_bathrooms: number of bathrooms in int\n max_guest: maximum guest in int\n price_by_night:: pice for a staying in int\n latitude: latitude in flaot\n longitude: longitude in float\n amenity_ids: list of Amenity ids\n \"\"\"\n __tablename__ = \"places\"\n city_id = Column(String(60), ForeignKey(\"cities.id\"), nullable=False)\n user_id = Column(String(60), ForeignKey(\"users.id\"), nullable=False)\n name = Column(String(128), nullable=False)\n description = Column(String(1024))\n number_rooms = Column(Integer, nullable=False, default=0)\n number_bathrooms = Column(Integer, nullable=False, default=0)\n max_guest = Column(Integer, nullable=False, default=0)\n price_by_night = Column(Integer, nullable=False, default=0)\n latitude = Column(Float)\n longitude = Column(Float)\n amenity_ids = []\n\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n reviews = relationship(\"Review\", cascade='all, delete, delete-orphan',\n backref=\"place\")\n\n amenities = relationship(\"Amenity\", secondary=place_amenity,\n viewonly=False,\n back_populates=\"place_amenities\")\n else:\n @property\n def reviews(self):\n \"\"\" Returns list of reviews.id \"\"\"\n var = models.storage.all()\n lista = []\n result = []\n for key in var:\n review = key.replace('.', ' ')\n review = shlex.split(review)\n if (review[0] == 'Review'):\n lista.append(var[key])\n for elem in lista:\n if (elem.place_id == self.id):\n result.append(elem)\n return (result)\n\n @property\n def amenities(self):\n \"\"\" Returns list of amenity ids \"\"\"\n return self.amenity_ids\n\n @amenities.setter\n def amenities(self, obj=None):\n \"\"\" Appends amenity ids to the attribute \"\"\"\n if type(obj) is Amenity and obj.id not in self.amenity_ids:\n self.amenity_ids.append(obj.id)"
},
{
"identifier": "Review",
"path": "models/review.py",
"snippet": "class Review(BaseModel, Base):\n \"\"\"This is the class for Review\n Attributes:\n place_id: place id\n user_id: user id\n text: review description\n \"\"\"\n __tablename__ = \"reviews\"\n text = Column(String(1024), nullable=False)\n place_id = Column(String(60), ForeignKey(\"places.id\"), nullable=False)\n user_id = Column(String(60), ForeignKey(\"users.id\"), nullable=False)"
},
{
"identifier": "Amenity",
"path": "models/amenity.py",
"snippet": "class Amenity(BaseModel, Base):\n \"\"\"This is the class for Amenity\n Attributes:\n name: input name\n \"\"\"\n __tablename__ = \"amenities\"\n name = Column(String(128), nullable=False)\n place_amenities = relationship(\"Place\", secondary=place_amenity)"
}
] | from os import getenv
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import (create_engine)
from sqlalchemy.ext.declarative import declarative_base
from models.base_model import Base
from models.state import State
from models.city import City
from models.user import User
from models.place import Place
from models.review import Review
from models.amenity import Amenity | 1,697 | #!/usr/bin/python3
""" new class for sqlAlchemy """
class DBStorage:
""" create tables in environmental"""
__engine = None
__session = None
def __init__(self):
'''instantiate new dbstorage instance'''
HBNB_MYSQL_USER = getenv('HBNB_MYSQL_USER')
HBNB_MYSQL_PWD = getenv('HBNB_MYSQL_PWD')
HBNB_MYSQL_HOST = getenv('HBNB_MYSQL_HOST')
HBNB_MYSQL_DB = getenv('HBNB_MYSQL_DB')
HBNB_ENV = getenv('HBNB_ENV')
self.__engine = create_engine(
'mysql+mysqldb://{}:{}@{}/{}'.format(
HBNB_MYSQL_USER,
HBNB_MYSQL_PWD,
HBNB_MYSQL_HOST,
HBNB_MYSQL_DB
), pool_pre_ping=True)
if HBNB_ENV == 'test':
Base.metadata.drop_all(self.__engine)
def all(self, cls=None):
"""returns a dictionary
Return:
returns a dictionary of __object
"""
dic = {}
if cls:
if type(cls) is str:
cls = eval(cls)
query = self.__session.query(cls)
for elem in query:
key = "{}.{}".format(type(elem).__name__, elem.id)
dic[key] = elem
else:
| #!/usr/bin/python3
""" new class for sqlAlchemy """
class DBStorage:
""" create tables in environmental"""
__engine = None
__session = None
def __init__(self):
'''instantiate new dbstorage instance'''
HBNB_MYSQL_USER = getenv('HBNB_MYSQL_USER')
HBNB_MYSQL_PWD = getenv('HBNB_MYSQL_PWD')
HBNB_MYSQL_HOST = getenv('HBNB_MYSQL_HOST')
HBNB_MYSQL_DB = getenv('HBNB_MYSQL_DB')
HBNB_ENV = getenv('HBNB_ENV')
self.__engine = create_engine(
'mysql+mysqldb://{}:{}@{}/{}'.format(
HBNB_MYSQL_USER,
HBNB_MYSQL_PWD,
HBNB_MYSQL_HOST,
HBNB_MYSQL_DB
), pool_pre_ping=True)
if HBNB_ENV == 'test':
Base.metadata.drop_all(self.__engine)
def all(self, cls=None):
"""returns a dictionary
Return:
returns a dictionary of __object
"""
dic = {}
if cls:
if type(cls) is str:
cls = eval(cls)
query = self.__session.query(cls)
for elem in query:
key = "{}.{}".format(type(elem).__name__, elem.id)
dic[key] = elem
else: | lista = [State, City, User, Place, Review, Amenity] | 6 | 2023-11-17 07:59:13+00:00 | 4k |
believethehype/nostrdvm | nostr_dvm/tasks/advanced_search.py | [
{
"identifier": "DVMTaskInterface",
"path": "nostr_dvm/interfaces/dvmtaskinterface.py",
"snippet": "class DVMTaskInterface:\n NAME: str\n KIND: int\n TASK: str = \"\"\n FIX_COST: float = 0\n PER_UNIT_COST: float = 0\n PRIVATE_KEY: str\n PUBLIC_KEY: str\n DVM = DVM\n SUPPORTS_ENCRYPTION = True # DVMs build with this framework support encryption, but others might not.\n ACCEPTS_CASHU = True # DVMs build with this framework support encryption, but others might not.\n dvm_config: DVMConfig\n admin_config: AdminConfig\n dependencies = []\n\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, admin_config: AdminConfig = None,\n options=None, task=None):\n self.init(name, dvm_config, admin_config, nip89config, task)\n self.options = options\n self.install_dependencies(dvm_config)\n\n def init(self, name, dvm_config, admin_config=None, nip89config=None, task=None):\n self.NAME = name\n self.PRIVATE_KEY = dvm_config.PRIVATE_KEY\n if dvm_config.PUBLIC_KEY == \"\" or dvm_config.PUBLIC_KEY is None:\n dvm_config.PUBLIC_KEY = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_hex()\n self.PUBLIC_KEY = dvm_config.PUBLIC_KEY\n if dvm_config.FIX_COST is not None:\n self.FIX_COST = dvm_config.FIX_COST\n if dvm_config.PER_UNIT_COST is not None:\n self.PER_UNIT_COST = dvm_config.PER_UNIT_COST\n if task is not None:\n self.TASK = task\n\n dvm_config.SUPPORTED_DVMS = [self]\n dvm_config.DB = \"db/\" + self.NAME + \".db\"\n if nip89config.KIND is not None:\n self.KIND = nip89config.KIND\n\n dvm_config.NIP89 = self.NIP89_announcement(nip89config)\n self.dvm_config = dvm_config\n self.admin_config = admin_config\n\n def install_dependencies(self, dvm_config):\n if dvm_config.SCRIPT != \"\":\n if self.dvm_config.USE_OWN_VENV:\n dir = r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(\".py\")[0]\n pip_location = 'bin/pip'\n if platform == \"win32\":\n pip_location = dir + '/Scripts/pip'\n\n if not os.path.isdir(dir):\n print(\"Creating Venv: \" + dir)\n create(dir, with_pip=True, upgrade_deps=True)\n self.dependencies.append((\"nostr-dvm\", \"nostr-dvm\"))\n for (module, package) in self.dependencies:\n print(\"Installing Venv Module: \" + module)\n run([pip_location, \"install\", \"--upgrade\", package], cwd=dir)\n else:\n for module, package in self.dependencies:\n if module != \"nostr-dvm\":\n try:\n __import__(module)\n except ImportError:\n print(\"Installing global Module: \" + module)\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n def run(self):\n nostr_dvm_thread = Thread(target=self.DVM, args=[self.dvm_config, self.admin_config])\n nostr_dvm_thread.start()\n\n def NIP89_announcement(self, nip89config: NIP89Config):\n nip89 = NIP89Config()\n nip89.NAME = self.NAME\n nip89.KIND = self.KIND\n nip89.PK = self.PRIVATE_KEY\n nip89.DTAG = nip89config.DTAG\n nip89.CONTENT = nip89config.CONTENT\n return nip89\n\n def is_input_supported(self, tags, client=None, dvm_config=None) -> bool:\n \"\"\"Check if input is supported for current Task.\"\"\"\n pass\n\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None) -> dict:\n \"\"\"Parse input into a request form that will be given to the process method\"\"\"\n pass\n\n def process(self, request_form):\n \"Process the data and return the result\"\n pass\n\n def post_process(self, result, event):\n \"\"\"Post-process the data and return the result Use default function, if not overwritten\"\"\"\n return post_process_result(result, event)\n\n @staticmethod\n def set_options(request_form):\n print(\"Setting options...\")\n opts = []\n if request_form.get(\"options\"):\n opts = json.loads(request_form[\"options\"])\n print(opts)\n return dict(opts)\n\n @staticmethod\n def process_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--request', dest='request')\n parser.add_argument('--identifier', dest='identifier')\n parser.add_argument('--output', dest='output')\n args = parser.parse_args()\n return args\n\n @staticmethod\n def write_output(result, output):\n with open(os.path.abspath(output), 'w') as f:\n f.write(result)\n # f.close()"
},
{
"identifier": "process_venv",
"path": "nostr_dvm/interfaces/dvmtaskinterface.py",
"snippet": "def process_venv(identifier):\n args = DVMTaskInterface.process_args()\n dvm_config = build_default_config(args.identifier)\n dvm = identifier(name=\"\", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)\n try:\n result = dvm.process(json.loads(args.request))\n DVMTaskInterface.write_output(result, args.output)\n except Exception as e:\n DVMTaskInterface.write_output(\"Error: \" + str(e), args.output)"
},
{
"identifier": "AdminConfig",
"path": "nostr_dvm/utils/admin_utils.py",
"snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\""
},
{
"identifier": "EventDefinitions",
"path": "nostr_dvm/utils/definitions.py",
"snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000\n KIND_NIP90_SUMMARIZE_TEXT = 5001\n KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000\n KIND_NIP90_TRANSLATE_TEXT = 5002\n KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000\n KIND_NIP90_GENERATE_TEXT = 5050\n KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000\n KIND_NIP90_GENERATE_IMAGE = 5100\n KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000\n KIND_NIP90_CONVERT_VIDEO = 5200\n KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000\n KIND_NIP90_GENERATE_VIDEO = 5202\n KIND_NIP90_TEXT_TO_SPEECH = 5250\n KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000\n KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000\n KIND_NIP90_CONTENT_DISCOVERY = 5300\n KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000\n KIND_NIP90_PEOPLE_DISCOVERY = 5301\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000\n KIND_NIP90_CONTENT_SEARCH = 5302\n KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000\n KIND_NIP90_GENERIC = 5999\n KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000\n ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,\n KIND_NIP90_RESULT_SUMMARIZE_TEXT,\n KIND_NIP90_RESULT_TRANSLATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_IMAGE,\n KIND_NIP90_CONTENT_DISCOVERY,\n KIND_NIP90_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_CONVERT_VIDEO,\n KIND_NIP90_RESULT_CONTENT_DISCOVERY,\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_GENERATE_VIDEO,\n KIND_NIP90_RESULT_GENERIC]"
},
{
"identifier": "DVMConfig",
"path": "nostr_dvm/utils/dvmconfig.py",
"snippet": "class DVMConfig:\n SUPPORTED_DVMS = []\n PRIVATE_KEY: str = \"\"\n PUBLIC_KEY: str = \"\"\n FIX_COST: float = None\n PER_UNIT_COST: float = None\n\n RELAY_LIST = [\"wss://relay.damus.io\", \"wss://nostr-pub.wellorder.net\", \"wss://nos.lol\", \"wss://nostr.wine\",\n \"wss://nostr.mom\", \"wss://nostr.oxtr.dev\", \"wss://relay.nostr.bg\",\n \"wss://relay.f7z.io\", \"wss://pablof7z.nostr1.com\", \"wss://relay.nostr.net\", \"wss://140.f7z.io\",\n \"wss://relay.snort.social\", \"wss://offchain.pub/\", \"wss://relay.nostr.band\"]\n\n RELAY_TIMEOUT = 5\n EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external\n LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env\n LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.\n LNBITS_URL = 'https://lnbits.com'\n LN_ADDRESS = ''\n SCRIPT = ''\n IDENTIFIER = ''\n USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions\n DB: str\n NEW_USER_BALANCE: int = 0 # Free credits for new users\n NIP89: NIP89Config\n SHOW_RESULT_BEFORE_PAYMENT: bool = False # if this is true show results even when not paid right after autoprocess"
},
{
"identifier": "build_default_config",
"path": "nostr_dvm/utils/dvmconfig.py",
"snippet": "def build_default_config(identifier):\n dvm_config = DVMConfig()\n dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)\n dvm_config.IDENTIFIER = identifier\n npub = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_bech32()\n invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub)\n dvm_config.LNBITS_INVOICE_KEY = invoice_key\n dvm_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back\n dvm_config.LNBITS_URL = os.getenv(\"LNBITS_HOST\")\n dvm_config.LN_ADDRESS = lnaddress\n return dvm_config"
},
{
"identifier": "NIP89Config",
"path": "nostr_dvm/utils/nip89_utils.py",
"snippet": "class NIP89Config:\n DTAG: str = \"\"\n NAME: str = \"\"\n KIND: int = None\n PK: str = \"\"\n CONTENT: str = \"\""
},
{
"identifier": "check_and_set_d_tag",
"path": "nostr_dvm/utils/nip89_utils.py",
"snippet": "def check_and_set_d_tag(identifier, name, pk, imageurl):\n if not os.getenv(\"NIP89_DTAG_\" + identifier.upper()):\n new_dtag = nip89_create_d_tag(name, Keys.from_sk_str(pk).public_key().to_hex(),\n imageurl)\n nip89_add_dtag_to_env_file(\"NIP89_DTAG_\" + identifier.upper(), new_dtag)\n print(\"Some new dtag:\" + new_dtag)\n return new_dtag\n else:\n return os.getenv(\"NIP89_DTAG_\" + identifier.upper())"
},
{
"identifier": "post_process_list_to_events",
"path": "nostr_dvm/utils/output_utils.py",
"snippet": "def post_process_list_to_events(result):\n result_list = json.loads(result)\n result_str = \"\"\n if len(result_list) == 0:\n return \"No results found\"\n for tag in result_list:\n e_tag = Tag.parse(tag)\n result_str = result_str + \"nostr:\" + EventId.from_hex(\n e_tag.as_vec()[1]).to_bech32() + \"\\n\"\n return result_str"
}
] | import json
import os
from datetime import timedelta
from nostr_sdk import Client, Timestamp, PublicKey, Tag, Keys, Options, SecretKey, ClientSigner
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface, process_venv
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import post_process_list_to_events
from nostr_sdk import Filter | 3,385 |
"""
This File contains a Module to search for notes
Accepted Inputs: a search query
Outputs: A list of events
Params: None
"""
class AdvancedSearch(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_CONTENT_SEARCH
TASK: str = "search-content"
FIX_COST: float = 0
dvm_config: DVMConfig
dependencies = [("nostr-dvm", "nostr-dvm")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
|
"""
This File contains a Module to search for notes
Accepted Inputs: a search query
Outputs: A list of events
Params: None
"""
class AdvancedSearch(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_CONTENT_SEARCH
TASK: str = "search-content"
FIX_COST: float = 0
dvm_config: DVMConfig
dependencies = [("nostr-dvm", "nostr-dvm")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, | admin_config: AdminConfig = None, options=None): | 2 | 2023-11-17 18:32:56+00:00 | 4k |
zouXH-god/meme_web | meme_generator/meme.py | [
{
"identifier": "ArgModelMismatch",
"path": "meme_generator/exception.py",
"snippet": "class ArgModelMismatch(ArgMismatch):\n status_code: int = 552\n\n def __init__(self, meme_key: str, error_message: str):\n self.error_message = error_message\n message = f\"Argument model validation failed: {self.error_message}\"\n super().__init__(meme_key, message)"
},
{
"identifier": "ArgParserExit",
"path": "meme_generator/exception.py",
"snippet": "class ArgParserExit(ArgMismatch):\n status_code: int = 551\n\n def __init__(self, meme_key: str, error_message: str):\n self.error_message = error_message\n message = f\"Argument parser failed to parse: {self.error_message}\"\n super().__init__(meme_key, message)"
},
{
"identifier": "ImageNumberMismatch",
"path": "meme_generator/exception.py",
"snippet": "class ImageNumberMismatch(ParamsMismatch):\n status_code: int = 541\n\n def __init__(self, meme_key: str, min_images: int = 0, max_images: int = 0):\n message = (\n \"The number of images is incorrect, \"\n f\"it should be no less than {min_images} and no more than {max_images}\"\n )\n super().__init__(meme_key, message)"
},
{
"identifier": "OpenImageFailed",
"path": "meme_generator/exception.py",
"snippet": "class OpenImageFailed(MemeGeneratorException):\n status_code: int = 533\n\n def __init__(self, error_message: str):\n self.error_message = error_message\n message = f'Error opening images: \"{self.error_message}\"'\n super().__init__(message)"
},
{
"identifier": "ParserExit",
"path": "meme_generator/exception.py",
"snippet": "class ParserExit(MemeGeneratorException):\n status_code: int = 534\n\n def __init__(self, status: int = 0, error_message: Optional[str] = None):\n self.status = status\n self.error_message = error_message or \"\"\n message = (\n f\"Argument parser failed to parse. (status={self.status}\"\n + (f\", message={self.error_message!r}\" if self.error_message else \"\")\n + \")\"\n )\n super().__init__(message)"
},
{
"identifier": "TextNumberMismatch",
"path": "meme_generator/exception.py",
"snippet": "class TextNumberMismatch(ParamsMismatch):\n status_code: int = 542\n\n def __init__(self, meme_key: str, min_texts: int = 0, max_texts: int = 0):\n message = (\n \"The number of texts is incorrect, \"\n f\"it should be no less than {min_texts} and no more than {max_texts}\"\n )\n super().__init__(meme_key, message)"
},
{
"identifier": "TextOrNameNotEnough",
"path": "meme_generator/exception.py",
"snippet": "class TextOrNameNotEnough(ParamsMismatch):\n status_code: int = 543\n\n def __init__(self, meme_key: str, message: Optional[str] = None):\n message = message or \"The number of texts or user names is not enough\"\n super().__init__(meme_key, message)"
},
{
"identifier": "is_coroutine_callable",
"path": "meme_generator/utils.py",
"snippet": "def is_coroutine_callable(call: Callable[..., Any]) -> bool:\n \"\"\"检查 call 是否是一个 callable 协程函数\"\"\"\n if inspect.isroutine(call):\n return inspect.iscoroutinefunction(call)\n if inspect.isclass(call):\n return False\n func_ = getattr(call, \"__call__\", None)\n return inspect.iscoroutinefunction(func_)"
},
{
"identifier": "random_image",
"path": "meme_generator/utils.py",
"snippet": "def random_image() -> BytesIO:\n text = random.choice([\"😂\", \"😅\", \"🤗\", \"🤤\", \"🥵\", \"🥰\", \"😍\", \"😭\", \"😋\", \"😏\"])\n return (\n BuildImage.new(\"RGBA\", (500, 500), \"white\")\n .draw_text((0, 0, 500, 500), text, max_fontsize=400)\n .save_png()\n )"
},
{
"identifier": "random_text",
"path": "meme_generator/utils.py",
"snippet": "def random_text() -> str:\n return random.choice([\"刘一\", \"陈二\", \"张三\", \"李四\", \"王五\", \"赵六\", \"孙七\", \"周八\", \"吴九\", \"郑十\"])"
},
{
"identifier": "run_sync",
"path": "meme_generator/utils.py",
"snippet": "def run_sync(call: Callable[P, R]) -> Callable[P, Coroutine[None, None, R]]:\n \"\"\"一个用于包装 sync function 为 async function 的装饰器\n 参数:\n call: 被装饰的同步函数\n \"\"\"\n\n @wraps(call)\n async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n loop = asyncio.get_running_loop()\n pfunc = partial(call, *args, **kwargs)\n result = await loop.run_in_executor(None, pfunc)\n return result\n\n return _wrapper"
}
] | import copy
from argparse import ArgumentError, ArgumentParser
from contextvars import ContextVar
from dataclasses import dataclass, field
from io import BytesIO
from pathlib import Path
from typing import (
IO,
Any,
Awaitable,
Callable,
Dict,
List,
Literal,
Optional,
Type,
TypeVar,
Union,
cast,
)
from pil_utils import BuildImage
from pydantic import BaseModel, ValidationError
from .exception import (
ArgModelMismatch,
ArgParserExit,
ImageNumberMismatch,
OpenImageFailed,
ParserExit,
TextNumberMismatch,
TextOrNameNotEnough,
)
from .utils import is_coroutine_callable, random_image, random_text, run_sync | 2,013 |
class UserInfo(BaseModel):
name: str = ""
gender: Literal["male", "female", "unknown"] = "unknown"
class MemeArgsModel(BaseModel):
user_infos: List[UserInfo] = []
ArgsModel = TypeVar("ArgsModel", bound=MemeArgsModel)
MemeFunction = Union[
Callable[[List[BuildImage], List[str], ArgsModel], BytesIO],
Callable[[List[BuildImage], List[str], ArgsModel], Awaitable[BytesIO]],
]
parser_message: ContextVar[str] = ContextVar("parser_message")
class MemeArgsParser(ArgumentParser):
"""`shell_like` 命令参数解析器,解析出错时不会退出程序。
用法:
用法与 `argparse.ArgumentParser` 相同,
参考文档: [argparse](https://docs.python.org/3/library/argparse.html)
"""
def _print_message(self, message: str, file: Optional[IO[str]] = None):
if (msg := parser_message.get(None)) is not None:
parser_message.set(msg + message)
else:
super()._print_message(message, file)
def exit(self, status: int = 0, message: Optional[str] = None):
if message:
self._print_message(message)
raise ParserExit(status=status, error_message=parser_message.get(None))
@dataclass
class MemeArgsType:
parser: MemeArgsParser
model: Type[MemeArgsModel]
instances: List[MemeArgsModel] = field(default_factory=list)
@dataclass
class MemeParamsType:
min_images: int = 0
max_images: int = 0
min_texts: int = 0
max_texts: int = 0
default_texts: List[str] = field(default_factory=list)
args_type: Optional[MemeArgsType] = None
@dataclass
class Meme:
key: str
function: MemeFunction
params_type: MemeParamsType
keywords: List[str] = field(default_factory=list)
patterns: List[str] = field(default_factory=list)
async def __call__(
self,
*,
images: Union[List[str], List[Path], List[bytes], List[BytesIO]] = [],
texts: List[str] = [],
args: Dict[str, Any] = {},
) -> BytesIO:
if not (
self.params_type.min_images <= len(images) <= self.params_type.max_images
):
raise ImageNumberMismatch(
self.key, self.params_type.min_images, self.params_type.max_images
)
if not (self.params_type.min_texts <= len(texts) <= self.params_type.max_texts):
raise TextNumberMismatch(
self.key, self.params_type.min_texts, self.params_type.max_texts
)
if args_type := self.params_type.args_type:
args_model = args_type.model
else:
args_model = MemeArgsModel
try:
model = args_model.parse_obj(args)
except ValidationError as e:
raise ArgModelMismatch(self.key, str(e))
imgs: List[BuildImage] = []
try:
for image in images:
if isinstance(image, bytes):
image = BytesIO(image)
imgs.append(BuildImage.open(image))
except Exception as e:
|
class UserInfo(BaseModel):
name: str = ""
gender: Literal["male", "female", "unknown"] = "unknown"
class MemeArgsModel(BaseModel):
user_infos: List[UserInfo] = []
ArgsModel = TypeVar("ArgsModel", bound=MemeArgsModel)
MemeFunction = Union[
Callable[[List[BuildImage], List[str], ArgsModel], BytesIO],
Callable[[List[BuildImage], List[str], ArgsModel], Awaitable[BytesIO]],
]
parser_message: ContextVar[str] = ContextVar("parser_message")
class MemeArgsParser(ArgumentParser):
"""`shell_like` 命令参数解析器,解析出错时不会退出程序。
用法:
用法与 `argparse.ArgumentParser` 相同,
参考文档: [argparse](https://docs.python.org/3/library/argparse.html)
"""
def _print_message(self, message: str, file: Optional[IO[str]] = None):
if (msg := parser_message.get(None)) is not None:
parser_message.set(msg + message)
else:
super()._print_message(message, file)
def exit(self, status: int = 0, message: Optional[str] = None):
if message:
self._print_message(message)
raise ParserExit(status=status, error_message=parser_message.get(None))
@dataclass
class MemeArgsType:
parser: MemeArgsParser
model: Type[MemeArgsModel]
instances: List[MemeArgsModel] = field(default_factory=list)
@dataclass
class MemeParamsType:
min_images: int = 0
max_images: int = 0
min_texts: int = 0
max_texts: int = 0
default_texts: List[str] = field(default_factory=list)
args_type: Optional[MemeArgsType] = None
@dataclass
class Meme:
key: str
function: MemeFunction
params_type: MemeParamsType
keywords: List[str] = field(default_factory=list)
patterns: List[str] = field(default_factory=list)
async def __call__(
self,
*,
images: Union[List[str], List[Path], List[bytes], List[BytesIO]] = [],
texts: List[str] = [],
args: Dict[str, Any] = {},
) -> BytesIO:
if not (
self.params_type.min_images <= len(images) <= self.params_type.max_images
):
raise ImageNumberMismatch(
self.key, self.params_type.min_images, self.params_type.max_images
)
if not (self.params_type.min_texts <= len(texts) <= self.params_type.max_texts):
raise TextNumberMismatch(
self.key, self.params_type.min_texts, self.params_type.max_texts
)
if args_type := self.params_type.args_type:
args_model = args_type.model
else:
args_model = MemeArgsModel
try:
model = args_model.parse_obj(args)
except ValidationError as e:
raise ArgModelMismatch(self.key, str(e))
imgs: List[BuildImage] = []
try:
for image in images:
if isinstance(image, bytes):
image = BytesIO(image)
imgs.append(BuildImage.open(image))
except Exception as e: | raise OpenImageFailed(str(e)) | 3 | 2023-11-12 12:31:53+00:00 | 4k |
OKC13/General-Documents-Layout-parser | hubconf.py | [
{
"identifier": "Model",
"path": "models/yolo.py",
"snippet": "class Model(nn.Module):\n def __init__(self, model_cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes\n super(Model, self).__init__()\n if type(model_cfg) is dict:\n self.md = model_cfg # model dict\n else: # is *.yaml\n with open(model_cfg) as f:\n self.md = yaml.load(f, Loader=yaml.FullLoader) # model dict\n\n # Define model\n if nc:\n self.md['nc'] = nc # override yaml value\n self.model, self.save = parse_model(self.md, ch=[ch]) # model, savelist, ch_out\n # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])\n\n # Build strides, anchors\n m = self.model[-1] # Detect()\n m.stride = torch.tensor([64 / x.shape[-2] for x in self.forward(torch.zeros(1, ch, 64, 64))]) # forward\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n\n # Init weights, biases\n torch_utils.initialize_weights(self)\n self._initialize_biases() # only run once\n torch_utils.model_info(self)\n print('')\n\n def forward(self, x, augment=False, profile=False):\n if augment:\n img_size = x.shape[-2:] # height, width\n s = [0.83, 0.67] # scales\n y = []\n for i, xi in enumerate((x,\n torch_utils.scale_img(x.flip(3), s[0]), # flip-lr and scale\n torch_utils.scale_img(x, s[1]), # scale\n )):\n # cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])\n y.append(self.forward_once(xi)[0])\n\n y[1][..., :4] /= s[0] # scale\n y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr\n y[2][..., :4] /= s[1] # scale\n return torch.cat(y, 1), None # augmented inference, train\n else:\n return self.forward_once(x, profile) # single-scale inference, train\n\n def forward_once(self, x, profile=False):\n y, dt = [], [] # outputs\n for m in self.model:\n if m.f != -1: # if not from previous layer\n x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers\n\n if profile:\n import thop\n o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS\n t = torch_utils.time_synchronized()\n for _ in range(10):\n _ = m(x)\n dt.append((torch_utils.time_synchronized() - t) * 100)\n print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\n\n x = m(x) # run\n y.append(x if m.i in self.save else None) # save output\n\n if profile:\n print('%.1fms total' % sum(dt))\n return x\n\n def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for f, s in zip(m.f, m.stride): # from\n mi = self.model[f % m.i]\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n with torch.no_grad():\n b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _print_biases(self):\n m = self.model[-1] # Detect() module\n for f in sorted([x % m.i for x in m.f]): # from\n b = self.model[f].bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)\n print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean()))\n\n # def _print_weights(self):\n # for m in self.model.modules():\n # if type(m) is Bottleneck:\n # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights\n\n def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers\n print('Fusing layers...')\n for m in self.model.modules():\n if type(m) is Conv:\n m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv\n m.bn = None # remove batchnorm\n m.forward = m.fuseforward # update forward\n torch_utils.model_info(self)"
},
{
"identifier": "google_utils",
"path": "utils/google_utils.py",
"snippet": "def attempt_download(weights):\ndef gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'):"
}
] | import torch
from models.yolo import Model
from utils import google_utils | 1,705 | """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
"""
dependencies = ['torch', 'yaml']
def create(name, pretrained, channels, classes):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
model = Model('/home/wangjiawei/yolov3/YOLOv5/models/%s.yaml' % name, channels, classes)
if pretrained:
ckpt = '/home/wangjiawei/yolov3/YOLOv5/weights/%s.pt' % name # checkpoint filename
| """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
"""
dependencies = ['torch', 'yaml']
def create(name, pretrained, channels, classes):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
model = Model('/home/wangjiawei/yolov3/YOLOv5/models/%s.yaml' % name, channels, classes)
if pretrained:
ckpt = '/home/wangjiawei/yolov3/YOLOv5/weights/%s.pt' % name # checkpoint filename | google_utils.attempt_download(ckpt) # download if not found locally | 1 | 2023-11-16 08:37:10+00:00 | 4k |
tensorpix/benchmarking-cv-models | src/train.py | [
{
"identifier": "log",
"path": "src/log.py",
"snippet": "def setup_custom_logger(name: str = \"benchmark\"):"
},
{
"identifier": "BenchmarkCallback",
"path": "src/callbacks.py",
"snippet": "class BenchmarkCallback(Callback):\n def __init__(\n self,\n model_name: str,\n precision: str,\n workers: int,\n warmup_steps: int = 50,\n ):\n self.warmup_steps = warmup_steps\n self.start_time = 0\n self.end_time = 0\n self.precision = precision\n self.model = model_name\n self.workers = workers\n\n def on_fit_start(self, trainer, pl_module):\n logger.info(\n f\"Benchmark started. Number of warmup iterations: {self.warmup_steps}\"\n )\n\n def on_train_batch_start(self, trainer, pl_module, batch, batch_idx: int):\n if batch_idx == self.warmup_steps:\n logger.info(\n f\"Completed {self.warmup_steps} warmup steps. Benchmark timer started.\"\n )\n self.start_time = time.time()\n\n def on_fit_end(self, trainer, pl_module):\n self.end_time = time.time()\n logger.info(\"Fit function finished\")\n\n dataset = trainer.train_dataloader.dataset\n batch_size = trainer.train_dataloader.batch_size\n in_w, in_h = dataset.width, dataset.height\n\n benchmark_steps = trainer.global_step - self.warmup_steps\n processed_megapixels = (\n trainer.world_size * in_w * in_h * batch_size * benchmark_steps / 1e6\n )\n\n elapsed_time = (\n self.end_time - self.start_time\n ) + 1e-7 # for numerical stability\n mpx_s = processed_megapixels / (elapsed_time)\n\n processed_imgs = batch_size * benchmark_steps * trainer.world_size\n images_s = processed_imgs / (elapsed_time)\n\n batches_s = benchmark_steps * trainer.world_size / elapsed_time\n\n logger.info(f\"Benchmark finished in {elapsed_time:.1f} seconds\")\n logger.info(\n f\"Average training throughput: {mpx_s:.2f} MPx/s (megapixels per second) | \"\n + f\"{images_s:.2f} images/s | {batches_s:.2f} batches/s\"\n )\n\n os.makedirs(\"./benchmarks\", exist_ok=True)\n csv_path = os.path.join(\"./benchmarks\", \"benchmark.csv\")\n file_exists = os.path.isfile(csv_path) and os.stat(csv_path).st_size >= 0\n with open(csv_path, \"a\") as file:\n writer = csv.writer(file)\n if not file_exists:\n writer.writerow(\n [\n \"Datetime\",\n \"GPU\",\n \"cuDNN version\",\n \"N GPUs\",\n \"Data Loader workers\",\n \"Model\",\n \"Precision\",\n \"Minibatch\",\n \"Input width [px]\",\n \"Input height [px]\",\n \"Warmup steps\",\n \"Benchmark steps\",\n \"MPx/s\",\n \"images/s\",\n \"batches/s\",\n ]\n )\n\n data = [\n datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\n torch.cuda.get_device_name(0),\n torch.backends.cudnn.version(),\n trainer.world_size,\n self.workers,\n self.model,\n self.precision,\n batch_size,\n in_w,\n in_h,\n self.warmup_steps,\n benchmark_steps,\n mpx_s,\n images_s,\n batches_s,\n ]\n writer.writerow(data)\n logger.info(\n \"Written benchmark data to a CSV file. \"\n + \"See 'Logging Results to a Persisent CSV File' section to \"\n + \"save the file on your disk: \"\n + \"https://github.com/tensorpix/benchmarking-cv-models#logging-results-to-a-persistent-csv-file\"\n )\n\n try:\n os.chmod(\n csv_path,\n stat.S_IRUSR\n | stat.S_IRGRP\n | stat.S_IWUSR\n | stat.S_IROTH\n | stat.S_IWOTH,\n )\n except Exception as e:\n logger.error(f\"Failed to change csv permissions: {e}\")"
},
{
"identifier": "InMemoryDataset",
"path": "src/data/in_memory_dataset.py",
"snippet": "class InMemoryDataset(Dataset):\n def __init__(\n self,\n width: int = 224,\n height: int = 224,\n n_channels: int = 3,\n dataset_size: int = int(1e7),\n ):\n super().__init__()\n self.width = width\n self.height = height\n self.n_channels = n_channels\n self.dataset_size = dataset_size\n\n def __len__(self):\n return self.dataset_size\n\n def __getitem__(self, idx: int) -> torch.Tensor:\n \"\"\"\n Must return a tensor of shape C x H x W with values in [0, 1] range.\n \"\"\"\n return torch.rand(self.n_channels, self.height, self.width, dtype=torch.float32)"
},
{
"identifier": "LitClassification",
"path": "src/models/lightning_modules.py",
"snippet": "class LitClassification(L.LightningModule):\n def __init__(self, model: nn.Module, optimizer=torch.optim.Adam):\n super().__init__()\n self.model = model\n self.loss = nn.CrossEntropyLoss()\n self.optimizer = optimizer\n\n def training_step(self, batch, batch_idx) -> torch.Tensor:\n y_hat = self.model(batch)\n y = torch.rand_like(y_hat)\n\n loss = self.loss(y_hat, y)\n return loss\n\n def configure_optimizers(self):\n return self.optimizer(self.parameters(), lr=2e-5)"
}
] | import argparse
import segmentation_models_pytorch as smp
import torch
from lightning import Trainer
from pip._internal.operations import freeze
from torch.utils.data import DataLoader
from torchvision.models import (
convnext_base,
efficientnet_v2_m,
mobilenet_v3_large,
resnet50,
resnext50_32x4d,
swin_b,
vgg16,
vit_b_16,
)
from src import log
from src.callbacks import BenchmarkCallback
from src.data.in_memory_dataset import InMemoryDataset
from src.models.lightning_modules import LitClassification | 1,931 |
logger = log.setup_custom_logger()
ARCHITECTURES = {
"resnet50": resnet50,
"convnext": convnext_base,
"vgg16": vgg16,
"efficient_net_v2": efficientnet_v2_m,
"mobilenet_v3": mobilenet_v3_large,
"resnext50": resnext50_32x4d,
"swin": swin_b,
"vit": vit_b_16,
"unet_resnet50": smp.Unet
# TODO "ssd_vgg16": ssd300_vgg16,
# TODO "fasterrcnn_resnet50_v2": fasterrcnn_resnet50_fpn_v2,
}
def print_requirements():
pkgs = freeze.freeze()
for pkg in pkgs:
logger.info(pkg)
def main(args):
if args.list_requirements:
print_requirements()
args_dict = vars(args)
logger.info(f"User Arguments {args_dict}")
dataset = InMemoryDataset(width=args.width, height=args.width)
data_loader = DataLoader(
dataset,
num_workers=args.n_workers,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
drop_last=True,
)
trainer = Trainer(
accelerator=args.accelerator,
strategy="ddp",
precision=args.precision,
limit_train_batches=args.n_iters + args.warmup_steps,
max_epochs=1,
logger=False,
enable_checkpointing=False,
callbacks=[
BenchmarkCallback(
warmup_steps=args.warmup_steps,
model_name=args.model,
precision=args.precision,
workers=args.n_workers,
)
],
devices=torch.cuda.device_count(),
)
if args.model in ARCHITECTURES:
if args.model == "unet_resnet50":
model = ARCHITECTURES[args.model](
encoder_name="resnet50", encoder_weights=None
)
else:
model = ARCHITECTURES[args.model]()
else:
raise ValueError("Architecture not supported.")
|
logger = log.setup_custom_logger()
ARCHITECTURES = {
"resnet50": resnet50,
"convnext": convnext_base,
"vgg16": vgg16,
"efficient_net_v2": efficientnet_v2_m,
"mobilenet_v3": mobilenet_v3_large,
"resnext50": resnext50_32x4d,
"swin": swin_b,
"vit": vit_b_16,
"unet_resnet50": smp.Unet
# TODO "ssd_vgg16": ssd300_vgg16,
# TODO "fasterrcnn_resnet50_v2": fasterrcnn_resnet50_fpn_v2,
}
def print_requirements():
pkgs = freeze.freeze()
for pkg in pkgs:
logger.info(pkg)
def main(args):
if args.list_requirements:
print_requirements()
args_dict = vars(args)
logger.info(f"User Arguments {args_dict}")
dataset = InMemoryDataset(width=args.width, height=args.width)
data_loader = DataLoader(
dataset,
num_workers=args.n_workers,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
drop_last=True,
)
trainer = Trainer(
accelerator=args.accelerator,
strategy="ddp",
precision=args.precision,
limit_train_batches=args.n_iters + args.warmup_steps,
max_epochs=1,
logger=False,
enable_checkpointing=False,
callbacks=[
BenchmarkCallback(
warmup_steps=args.warmup_steps,
model_name=args.model,
precision=args.precision,
workers=args.n_workers,
)
],
devices=torch.cuda.device_count(),
)
if args.model in ARCHITECTURES:
if args.model == "unet_resnet50":
model = ARCHITECTURES[args.model](
encoder_name="resnet50", encoder_weights=None
)
else:
model = ARCHITECTURES[args.model]()
else:
raise ValueError("Architecture not supported.")
| model = LitClassification(model=model) | 3 | 2023-11-10 11:45:09+00:00 | 4k |
embrake/Aquilify | aquilify/core/__status.py | [
{
"identifier": "NotFound",
"path": "aquilify/exception/base.py",
"snippet": "class NotFound(Response, HTTPException):\n def __init__(self, status_code=404):\n super().__init__(error404(), status_code=status_code, content_type=\"text/html\")"
},
{
"identifier": "Unauthorized",
"path": "aquilify/exception/base.py",
"snippet": "class Unauthorized(Response, HTTPException):\n def __init__(self, detail=\"Unauthorized\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=401, content_type='text/html')"
},
{
"identifier": "Forbidden",
"path": "aquilify/exception/base.py",
"snippet": "class Forbidden(Response, HTTPException):\n def __init__(self, detail=\"\"):\n self.detail = error403(detail)\n super().__init__(content=self.detail, status_code=403, content_type='text/html')"
},
{
"identifier": "BadGateway",
"path": "aquilify/exception/base.py",
"snippet": "class BadGateway(Response, HTTPException):\n def __init__(self, detail=\"Bad Gateway\"):\n self.detail = error502()\n super().__init__(content=self.detail, status_code=502, content_type='text/html')"
},
{
"identifier": "InternalServerError",
"path": "aquilify/exception/base.py",
"snippet": "class InternalServerError(Response, HTTPException):\n def __init__(self, detail=\"Internal Server Error\"):\n self.detail = error500(detail)\n super().__init__(content=self.detail, status_code=500, content_type='text/html')"
},
{
"identifier": "MethodNotAllowed",
"path": "aquilify/exception/base.py",
"snippet": "class MethodNotAllowed(Response, HTTPException):\n def __init__(self, allowed_methods=None):\n super().__init__(content=error405(), status_code=405, content_type='text/html')"
},
{
"identifier": "BadRequest",
"path": "aquilify/exception/base.py",
"snippet": "class BadRequest(Response, HTTPException):\n def __init__(self, detail=\"Bad Request\", exception=None):\n self.detail = error400()\n self.exception = exception\n super().__init__(content=detail, status_code=400, content_type='text/html')"
},
{
"identifier": "NotAcceptable",
"path": "aquilify/exception/base.py",
"snippet": "class NotAcceptable(Response, HTTPException):\n def __init__(self, detail=\"Not Acceptable\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=406, content_type='text/html')"
},
{
"identifier": "ProxyAuthenticationRequired",
"path": "aquilify/exception/base.py",
"snippet": "class ProxyAuthenticationRequired(Response, HTTPException):\n def __init__(self, detail=\"Proxy Authentication Required\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=407, content_type='text/html')"
},
{
"identifier": "RequestTimeout",
"path": "aquilify/exception/base.py",
"snippet": "class RequestTimeout(Response, HTTPException):\n def __init__(self, detail=\"Request Timeout\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=408, content_type='text/html')"
},
{
"identifier": "Conflict",
"path": "aquilify/exception/base.py",
"snippet": "class Conflict(Response, HTTPException):\n def __init__(self, detail=\"Conflict\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=409, content_type='text/html')"
},
{
"identifier": "Gone",
"path": "aquilify/exception/base.py",
"snippet": "class Gone(Response, HTTPException):\n def __init__(self, detail=\"Gone\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=410, content_type='text/html')"
},
{
"identifier": "LengthRequired",
"path": "aquilify/exception/base.py",
"snippet": "class LengthRequired(Response, HTTPException):\n def __init__(self, detail=\"Length Required\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=411, content_type='text/html')"
},
{
"identifier": "PreconditionFailed",
"path": "aquilify/exception/base.py",
"snippet": "class PreconditionFailed(Response, HTTPException):\n def __init__(self, detail=\"Precondition Failed\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=412, content_type='text/html')"
},
{
"identifier": "RequestURITooLong",
"path": "aquilify/exception/base.py",
"snippet": "class RequestURITooLong(Response, HTTPException):\n def __init__(self, detail=\"Request-URI Too Long\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=414, content_type='text/html')"
},
{
"identifier": "UnsupportedMediaType",
"path": "aquilify/exception/base.py",
"snippet": "class UnsupportedMediaType(Response, HTTPException):\n def __init__(self, detail=\"Unsupported Media Type\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=415, content_type='text/html')"
},
{
"identifier": "RequestedRangeNotSatisfiable",
"path": "aquilify/exception/base.py",
"snippet": "class RequestedRangeNotSatisfiable(Response, HTTPException):\n def __init__(self, detail=\"Requested Range Not Satisfiable\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=416, content_type='text/html')"
},
{
"identifier": "ExpectationFailed",
"path": "aquilify/exception/base.py",
"snippet": "class ExpectationFailed(Response, HTTPException):\n def __init__(self, detail=\"Expectation Failed\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=417, content_type='text/html')"
},
{
"identifier": "UnprocessableEntity",
"path": "aquilify/exception/base.py",
"snippet": "class UnprocessableEntity(Response, HTTPException):\n def __init__(self, detail=\"Unprocessable Entity\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=422, content_type='text/html')"
},
{
"identifier": "MisdirectedRequest",
"path": "aquilify/exception/base.py",
"snippet": "class MisdirectedRequest(Response, HTTPException):\n def __init__(self, detail=\"Misdirected Request\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=421, content_type='text/html')"
},
{
"identifier": "Locked",
"path": "aquilify/exception/base.py",
"snippet": "class Locked(Response, HTTPException):\n def __init__(self, detail=\"Locked\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=423, content_type='text/html')"
},
{
"identifier": "FailedDependency",
"path": "aquilify/exception/base.py",
"snippet": "class FailedDependency(Response, HTTPException):\n def __init__(self, detail=\"Failed Dependency\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=424, content_type='text/html')"
},
{
"identifier": "UpgradeRequired",
"path": "aquilify/exception/base.py",
"snippet": "class UpgradeRequired(Response, HTTPException):\n def __init__(self, detail=\"Upgrade Required\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=426, content_type='text/html')"
},
{
"identifier": "TooManyRequests",
"path": "aquilify/exception/base.py",
"snippet": "class TooManyRequests(Response, HTTPException):\n def __init__(self, detail=\"\"):\n self.detail = error429(detail)\n super().__init__(content=self.detail, status_code=429, content_type='text/html')"
},
{
"identifier": "RequestHeaderFieldsTooLarge",
"path": "aquilify/exception/base.py",
"snippet": "class RequestHeaderFieldsTooLarge(Response, HTTPException):\n def __init__(self, detail=\"Request Header Fields Too Large\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=431, content_type='text/html')"
},
{
"identifier": "UnavailableForLegalReasons",
"path": "aquilify/exception/base.py",
"snippet": "class UnavailableForLegalReasons(Response, HTTPException):\n def __init__(self, detail=\"Unavailable For Legal Reasons\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=451, content_type='text/html')"
},
{
"identifier": "ServiceUnavailable",
"path": "aquilify/exception/base.py",
"snippet": "class ServiceUnavailable(Response, HTTPException):\n def __init__(self, detail=\"Service Unavailable\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=503, content_type='text/html')"
},
{
"identifier": "NotImplemented",
"path": "aquilify/exception/base.py",
"snippet": "class NotImplemented(Response, HTTPException):\n def __init__(self, detail=\"Not Implemented\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=501, content_type='text/html')"
},
{
"identifier": "GatewayTimeout",
"path": "aquilify/exception/base.py",
"snippet": "class GatewayTimeout(Response, HTTPException):\n def __init__(self, detail=\"Gateway Timeout\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=504, content_type='text/html')"
},
{
"identifier": "HTTPVersionNotSupported",
"path": "aquilify/exception/base.py",
"snippet": "class HTTPVersionNotSupported(Response, HTTPException):\n def __init__(self, detail=\"HTTP Version Not Supported\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=505, content_type='text/html')"
},
{
"identifier": "VariantAlsoNegotiates",
"path": "aquilify/exception/base.py",
"snippet": "class VariantAlsoNegotiates(Response, HTTPException):\n def __init__(self, detail=\"Variant Also Negotiates\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=506, content_type='text/html')"
},
{
"identifier": "InsufficientStorage",
"path": "aquilify/exception/base.py",
"snippet": "class InsufficientStorage(Response, HTTPException):\n def __init__(self, detail=\"Insufficient Storage\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=507, content_type='text/html')"
},
{
"identifier": "LoopDetected",
"path": "aquilify/exception/base.py",
"snippet": "class LoopDetected(Response, HTTPException):\n def __init__(self, detail=\"Loop Detected\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=508, content_type='text/html')"
},
{
"identifier": "NotExtended",
"path": "aquilify/exception/base.py",
"snippet": "class NotExtended(Response, HTTPException):\n def __init__(self, detail=\"Not Extended\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=510, content_type='text/html')"
},
{
"identifier": "NetworkAuthenticationRequired",
"path": "aquilify/exception/base.py",
"snippet": "class NetworkAuthenticationRequired(Response, HTTPException):\n def __init__(self, detail=\"Network Authentication Required\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=511, content_type='text/html')"
},
{
"identifier": "PaymentRequired",
"path": "aquilify/exception/base.py",
"snippet": "class PaymentRequired(Response, HTTPException):\n def __init__(self, detail=\"Payement Required\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=402, content_type='text/html')"
},
{
"identifier": "PayloadTooLarge",
"path": "aquilify/exception/base.py",
"snippet": "class PayloadTooLarge(Response, HTTPException):\n def __init__(self, detail=\"Payload Too Large\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=413, content_type='text/html')"
},
{
"identifier": "ImATeapot",
"path": "aquilify/exception/base.py",
"snippet": "class ImATeapot(Response, HTTPException):\n def __init__(self, detail=\"ImATeapot\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=418, content_type='text/html')"
},
{
"identifier": "PreconditionRequired",
"path": "aquilify/exception/base.py",
"snippet": "class PreconditionRequired(Response, HTTPException):\n def __init__(self, detail=\"Preconditional Required\"):\n self.detail = f\"\"\"<h1>{detail}</h1>\"\"\"\n super().__init__(content=self.detail, status_code=428, content_type='text/html')"
}
] | from ..exception.base import (
NotFound, Unauthorized, Forbidden, BadGateway, InternalServerError, MethodNotAllowed, BadRequest,
NotAcceptable, ProxyAuthenticationRequired, RequestTimeout, Conflict, Gone, LengthRequired, PreconditionFailed,
RequestURITooLong, UnsupportedMediaType, RequestedRangeNotSatisfiable, ExpectationFailed, UnprocessableEntity,
MisdirectedRequest, Locked, FailedDependency, UpgradeRequired, TooManyRequests, RequestHeaderFieldsTooLarge,
UnavailableForLegalReasons, ServiceUnavailable, NotImplemented,GatewayTimeout, HTTPVersionNotSupported,
VariantAlsoNegotiates,InsufficientStorage, LoopDetected, NotExtended, NetworkAuthenticationRequired, PaymentRequired,
PayloadTooLarge, ImATeapot, PreconditionRequired
) | 3,045 |
exception_dict = {
400: BadRequest,
401: Unauthorized,
402: PaymentRequired,
403: Forbidden,
404: NotFound,
405: MethodNotAllowed,
406: NotAcceptable,
407: ProxyAuthenticationRequired,
408: RequestTimeout,
409: Conflict,
410: Gone,
411: LengthRequired,
412: PreconditionFailed,
413: PayloadTooLarge,
414: RequestURITooLong,
415: UnsupportedMediaType,
416: RequestedRangeNotSatisfiable,
|
exception_dict = {
400: BadRequest,
401: Unauthorized,
402: PaymentRequired,
403: Forbidden,
404: NotFound,
405: MethodNotAllowed,
406: NotAcceptable,
407: ProxyAuthenticationRequired,
408: RequestTimeout,
409: Conflict,
410: Gone,
411: LengthRequired,
412: PreconditionFailed,
413: PayloadTooLarge,
414: RequestURITooLong,
415: UnsupportedMediaType,
416: RequestedRangeNotSatisfiable, | 417: ExpectationFailed, | 17 | 2023-11-16 08:26:02+00:00 | 4k |
Viicos/django-autotyping | src/django_autotyping/stubbing/codemods/create_overload_codemod.py | [
{
"identifier": "InsertAfterImportsVisitor",
"path": "src/django_autotyping/stubbing/codemods/base.py",
"snippet": "class InsertAfterImportsVisitor(ContextAwareTransformer):\n \"\"\"Insert a list of statements after imports.\"\"\"\n\n CONTEXT_KEY = \"InsertAfterImportsVisitor\"\n\n @classmethod\n def insert_after_imports(\n cls,\n context: CodemodContext,\n statements: Sequence[cst.SimpleStatementLine | cst.BaseCompoundStatement],\n ) -> None:\n \"\"\"Insert a list of statements following the module imports.\n\n If no imports are to be found, statements will be added at the beginning of the module.\"\"\"\n ctx_statements = context.scratch.get(cls.CONTEXT_KEY, [])\n ctx_statements.extend(statements)\n context.scratch[cls.CONTEXT_KEY] = ctx_statements\n\n def leave_Module(self, original_node: cst.Module, updated_node: cst.Module) -> cst.Module:\n statements = self.context.scratch.get(self.CONTEXT_KEY, [])\n if not statements:\n return updated_node\n\n body = list(updated_node.body)\n\n last_import = next((node for node in reversed(body) if m.matches(node, IMPORT_MATCHER)), None)\n index = body.index(last_import) + 1 if last_import is not None else 0\n body[index:index] = statements\n\n return updated_node.with_changes(\n body=body,\n )"
},
{
"identifier": "StubVisitorBasedCodemod",
"path": "src/django_autotyping/stubbing/codemods/base.py",
"snippet": "class StubVisitorBasedCodemod(VisitorBasedCodemodCommand):\n \"\"\"The base class for all codemods used for custom stub files.\"\"\"\n\n STUB_FILES: ClassVar[set[str]]\n\n def __init__(self, context: CodemodContext) -> None:\n super().__init__(context)\n self.django_context = cast(\"DjangoStubbingContext\", context.scratch[\"django_context\"])\n self.stubs_settings = cast(\"StubsGenerationSettings\", context.scratch[\"stubs_settings\"])\n\n def add_model_imports(self) -> None:\n \"\"\"Add the defined models in the Django context as imports to the current file.\"\"\"\n\n # TODO LibCST should support adding imports from `ImportItem` objects\n imports = AddImportsVisitor._get_imports_from_context(self.context)\n imports.extend(self.django_context.model_imports)\n self.context.scratch[AddImportsVisitor.CONTEXT_KEY] = imports\n\n def add_typing_imports(self, names: list[str]) -> None:\n \"\"\"Add imports to the `typing` module (either from `typing` or `typing_extensions`).\"\"\"\n for name in names:\n AddImportsVisitor.add_needed_import(\n self.context,\n module=\"typing_extensions\" if name in TYPING_EXTENSIONS_NAMES else \"typing\",\n obj=name,\n )\n\n def transform_module(self, tree: cst.Module) -> cst.Module:\n # LibCST automatically runs `AddImportsVisitor` and `RemoveImportsVisitor`,\n # but this is hardcoded. So we manually add our visitor.\n tree = super().transform_module(tree)\n transform = InsertAfterImportsVisitor\n\n return self._instantiate_and_run(transform, tree)"
},
{
"identifier": "OVERLOAD_DECORATOR",
"path": "src/django_autotyping/stubbing/codemods/constants.py",
"snippet": "OVERLOAD_DECORATOR = cst.Decorator(decorator=cst.Name(\"overload\"))"
},
{
"identifier": "TypedDictAttribute",
"path": "src/django_autotyping/stubbing/codemods/utils.py",
"snippet": "class TypedDictAttribute:\n name: str\n \"\"\"The attribute name.\"\"\"\n\n annotation: str\n \"\"\"The annotation of the field.\"\"\"\n\n docstring: str | None = None\n \"\"\"The docstring of the field.\"\"\"\n\n required: bool = False\n \"\"\"Whether the field should be marked as `Required`.\"\"\"\n\n not_required: bool = False\n \"\"\"Whether the field should be marked as `NotRequired`.\"\"\"\n\n @property\n def marked_annotation(self) -> str:\n \"\"\"The annotation additionally marked as required or not required.\"\"\"\n if self.required:\n return f\"Required[{self.annotation}]\"\n if self.not_required:\n return f\"NotRequired[{self.annotation}]\"\n return self.annotation\n\n def __post_init__(self):\n if self.required and self.not_required:\n raise ValueError(\"`required` and `not_required` can't be set together.\")"
},
{
"identifier": "build_typed_dict",
"path": "src/django_autotyping/stubbing/codemods/utils.py",
"snippet": "def build_typed_dict(\n name: str, attributes: list[TypedDictAttribute], total: bool = True, leading_line: bool = False\n) -> cst.SimpleStatementLine | cst.ClassDef:\n \"\"\"Build a `TypedDict` class definition.\n\n If one of the attribute's name is not a valid Python identifier, the alternative functional syntax\n will be used (a `SimpleStatementLine` will be created instead of a `ClassDef`).\n\n Args:\n name: The name of the resulting class.\n attributes: A list of `TypedDictAttribute` instances, representing attributes of the dict.\n total: Whether `total=True` should be used.\n leadind_line: Whether an empty leading line should be added before the class definition.\n\n \"\"\"\n functional = any(not attr.name.isidentifier() for attr in attributes)\n leading_lines = [cst.EmptyLine(indent=False)] if leading_line else []\n if not functional:\n body: list[cst.SimpleStatementLine] = []\n\n for attr in attributes:\n ann_statement = helpers.parse_template_statement(f\"{attr.name}: {attr.marked_annotation}\")\n if attributes.index(attr) != 0:\n ann_statement = ann_statement.with_changes(leading_lines=[cst.EmptyLine(indent=False)])\n body.append(ann_statement)\n\n if attr.docstring:\n docstring = f'\"\"\"{_indent(attr.docstring)}\"\"\"'\n body.append(cst.SimpleStatementLine(body=[cst.Expr(cst.SimpleString(docstring))]))\n\n return cst.ClassDef(\n name=cst.Name(name),\n bases=[cst.Arg(cst.Name(\"TypedDict\"))],\n keywords=[\n cst.Arg(\n keyword=cst.Name(\"total\"),\n equal=cst.AssignEqual(cst.SimpleWhitespace(\"\"), cst.SimpleWhitespace(\"\")),\n value=cst.Name(\"False\"),\n )\n ]\n if not total\n else [],\n body=cst.IndentedBlock(body),\n leading_lines=leading_lines,\n )\n\n # If some attributes aren't Python identifiers, we use the functional form:\n # name = TypedDict(\"name\", {\"x\": int, \"y\": int})\n return cst.SimpleStatementLine(\n body=[\n cst.Assign(\n targets=[cst.AssignTarget(cst.Name(name))],\n value=cst.Call(\n func=cst.Name(\"TypedDict\"),\n args=[\n cst.Arg(cst.SimpleString(f'\"{name}\"')),\n cst.Arg(\n cst.Dict(\n elements=[\n cst.DictElement(\n key=cst.SimpleString(f'\"{attr.name}\"'), value=cst.Name(attr.marked_annotation)\n )\n for attr in attributes\n ]\n )\n ),\n ],\n ),\n )\n ],\n leading_lines=leading_lines,\n )"
},
{
"identifier": "get_param",
"path": "src/django_autotyping/stubbing/codemods/utils.py",
"snippet": "def get_param(node: cst.FunctionDef, param_name: str) -> cst.Param:\n \"\"\"Get the `Param` node matching `param_name`.\"\"\"\n try:\n return next(param for param in node.params.params if param.name.value == param_name)\n except StopIteration:\n raise RuntimeError(\n f\"The `FunctionDef` node with name {node.name.value!r} does not have any parameter named {param_name!r}\"\n )"
}
] | from typing import TYPE_CHECKING, cast
from django.db.models import Field
from libcst import helpers
from libcst.codemod import CodemodContext
from libcst.metadata import ScopeProvider
from django_autotyping.typing import FlattenFunctionDef
from .base import InsertAfterImportsVisitor, StubVisitorBasedCodemod
from .constants import OVERLOAD_DECORATOR
from .utils import TypedDictAttribute, build_typed_dict, get_param
from ..django_context import DjangoStubbingContext
import libcst as cst
import libcst.matchers as m | 2,346 | from __future__ import annotations
if TYPE_CHECKING:
# Matchers:
MANAGER_QS_CLASS_DEF_MATCHER = m.ClassDef(
name=m.SaveMatchedNode(m.Name("BaseManager") | m.Name("_QuerySet"), "cls_name")
)
"""Matches the `BaseManager` and `_QuerySet` class definitions."""
MODEL_CLASS_DEF_MATCHER = m.ClassDef(name=m.SaveMatchedNode(m.Name("Model"), "cls_name"))
"""Matches the `Model` class definition."""
CREATE_DEF_MATCHER = m.FunctionDef(name=m.Name("create") | m.Name("acreate"))
"""Matches the `create` and `acreate` method definitions."""
INIT_DEF_MATCHER = m.FunctionDef(name=m.Name("__init__"))
"""Matches the `__init__` method definition."""
class CreateOverloadCodemod(StubVisitorBasedCodemod):
"""A codemod that will add overloads to methods creating an instance of a model.
**Rule identifier**: `DJAS002`.
**Related settings**:
-[`MODEL_FIELDS_OPTIONAL`][django_autotyping.app_settings.StubsGenerationSettings.MODEL_FIELDS_OPTIONAL].
```python
MyModel(...) # Signature is provided.
MyModel.objects.create(...) # Signature is provided.
```
??? abstract "Implementation"
This codemod makes use of the [PEP 692][pep-0692]. If your type checker/LSP supports it,
documentation is provided for each field if [`help_text`][django.db.models.Field.help_text] was set.
"""
METADATA_DEPENDENCIES = {ScopeProvider}
STUB_FILES = {"db/models/manager.pyi", "db/models/query.pyi", "db/models/base.pyi"}
def __init__(self, context: CodemodContext) -> None:
super().__init__(context)
self.add_model_imports()
model_typed_dicts = _build_model_kwargs(self.django_context, self.stubs_settings.MODEL_FIELDS_OPTIONAL)
| from __future__ import annotations
if TYPE_CHECKING:
# Matchers:
MANAGER_QS_CLASS_DEF_MATCHER = m.ClassDef(
name=m.SaveMatchedNode(m.Name("BaseManager") | m.Name("_QuerySet"), "cls_name")
)
"""Matches the `BaseManager` and `_QuerySet` class definitions."""
MODEL_CLASS_DEF_MATCHER = m.ClassDef(name=m.SaveMatchedNode(m.Name("Model"), "cls_name"))
"""Matches the `Model` class definition."""
CREATE_DEF_MATCHER = m.FunctionDef(name=m.Name("create") | m.Name("acreate"))
"""Matches the `create` and `acreate` method definitions."""
INIT_DEF_MATCHER = m.FunctionDef(name=m.Name("__init__"))
"""Matches the `__init__` method definition."""
class CreateOverloadCodemod(StubVisitorBasedCodemod):
"""A codemod that will add overloads to methods creating an instance of a model.
**Rule identifier**: `DJAS002`.
**Related settings**:
-[`MODEL_FIELDS_OPTIONAL`][django_autotyping.app_settings.StubsGenerationSettings.MODEL_FIELDS_OPTIONAL].
```python
MyModel(...) # Signature is provided.
MyModel.objects.create(...) # Signature is provided.
```
??? abstract "Implementation"
This codemod makes use of the [PEP 692][pep-0692]. If your type checker/LSP supports it,
documentation is provided for each field if [`help_text`][django.db.models.Field.help_text] was set.
"""
METADATA_DEPENDENCIES = {ScopeProvider}
STUB_FILES = {"db/models/manager.pyi", "db/models/query.pyi", "db/models/base.pyi"}
def __init__(self, context: CodemodContext) -> None:
super().__init__(context)
self.add_model_imports()
model_typed_dicts = _build_model_kwargs(self.django_context, self.stubs_settings.MODEL_FIELDS_OPTIONAL) | InsertAfterImportsVisitor.insert_after_imports(context, model_typed_dicts) | 0 | 2023-11-11 20:42:05+00:00 | 4k |
IBM/oper8 | tests/watch_manager/python_watch_manager/filters/test_filters.py | [
{
"identifier": "KubeEventType",
"path": "oper8/deploy_manager/kube_event.py",
"snippet": "class KubeEventType(Enum):\n \"\"\"Enum for all possible kubernetes event types\"\"\"\n\n DELETED = \"DELETED\"\n MODIFIED = \"MODIFIED\"\n ADDED = \"ADDED\""
},
{
"identifier": "ReadyReason",
"path": "oper8/status.py",
"snippet": "class ReadyReason(Enum):\n \"\"\"Nested class to hold reason constants for the Ready condition\"\"\"\n\n # The application is stable and ready for traffic\n STABLE = \"Stable\"\n\n # The application is rolling out for the first time\n INITIALIZING = \"Initializing\"\n\n # The application rollout is in progress and will continue\n # the next reconcile\n IN_PROGRESS = \"InProgress\"\n\n # The application has hit an unrecoverable config error during rollout\n CONFIG_ERROR = \"ConfigError\"\n\n # The application has hit an unrecoverable error during rollout\n ERRORED = \"Errored\""
},
{
"identifier": "make_application_status",
"path": "oper8/status.py",
"snippet": "def make_application_status( # pylint: disable=too-many-arguments,too-many-locals\n ready_reason: Optional[Union[ReadyReason, str]] = None,\n ready_message: str = \"\",\n updating_reason: Optional[Union[UpdatingReason, str]] = None,\n updating_message: str = \"\",\n component_state: Optional[CompletionState] = None,\n external_conditions: Optional[List[dict]] = None,\n external_status: Optional[dict] = None,\n version: Optional[str] = None,\n supported_versions: Optional[List[str]] = None,\n operator_version: Optional[str] = None,\n kind: Optional[str] = None,\n) -> dict:\n \"\"\"Create a full status object for an application\n\n Args:\n ready_reason: Optional[ReadyReason or str]\n The reason enum for the Ready condition\n ready_message: str\n Plain-text message explaining the Ready condition value\n updating_reason: Optional[UpdatingReason or str]\n The reason enum for the Updating condition\n updating_message: str\n Plain-text message explaining the Updating condition value\n component_state: Optional[CompletionState]\n The terminal state of components in the latest rollout\n external_conditions: Optional[List[dict]]\n Additional conditions to include in the update\n external_status: Optional[dict]\n Additional key/value status elements besides \"conditions\" that\n should be preserved through the update\n version: Optional[str]\n The verified version of the application\n supported_versions: Optional[List[str]]\n The list of supported versions for this application\n operator_version: Optional[str]\n The operator version for this application\n kind: Optional[str]\n The kind of reconciliing CR. If specified, this function adds\n service status field which is compliant with IBM Cloud Pak\n requirements.\n\n Returns:\n current_status: dict\n Dict representation of the status for the application\n \"\"\"\n now = datetime.now()\n conditions = []\n if ready_reason is not None:\n conditions.append(_make_ready_condition(ready_reason, ready_message, now))\n if updating_reason is not None:\n conditions.append(\n _make_updating_condition(updating_reason, updating_message, now)\n )\n conditions.extend(external_conditions or [])\n status = external_status or {}\n status[\"conditions\"] = conditions\n\n # If a component_state is given, create the top-level status elements to\n # track which components have deployed and verified\n if component_state is not None:\n log.debug2(\"Adding component state to status\")\n status[COMPONENT_STATUS] = _make_component_state(component_state)\n log.debug3(status[COMPONENT_STATUS])\n\n # Create the versions section\n if version is not None:\n nested_set(status, VERSIONS_FIELD_CURRENT_VERSION, version)\n if supported_versions is not None:\n nested_set(\n status,\n VERSIONS_FIELD_AVAILABLE_VERSIONS,\n [_make_available_version(version) for version in supported_versions],\n )\n if operator_version is not None:\n nested_set(status, OPERATOR_VERSION, operator_version)\n\n # Create service status section\n if kind:\n # make field name follow k8s naming convention\n service_status_field = kind[0].lower()\n if len(kind) > 1:\n service_status_field += kind[1:]\n service_status_field += \"Status\"\n\n # Only update service status if the current value is set by oper8. This\n # allows services to override the service status section\n current_service_status = status.get(service_status_field)\n managed_service_values = [status.value for status in ServiceStatus]\n if (\n not current_service_status\n or current_service_status in managed_service_values\n ):\n current_service_status = _make_service_status(\n ready_reason, updating_reason\n ).value\n\n status[service_status_field] = current_service_status\n\n return status"
},
{
"identifier": "make_managed_object",
"path": "oper8/test_helpers/pwm_helpers.py",
"snippet": "def make_managed_object(*args, **kwargs):\n return ManagedObject(make_resource(*args, **kwargs))"
},
{
"identifier": "AnnotationFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class AnnotationFilter(Filter):\n \"\"\"Filter resources to reconcile on annotation changes\"\"\"\n\n def __init__(self, resource: ManagedObject):\n \"\"\"Initialize the annotation hash variable\"\"\"\n self.annotations = None\n super().__init__(resource)\n\n def test( # pylint: disable=inconsistent-return-statements\n self,\n resource: ManagedObject,\n event: KubeEventType,\n ) -> Optional[bool]:\n \"\"\"Test if a resource's annotation has changed\"\"\"\n # Ignore Added and deleted events\n if event in [KubeEventType.ADDED, KubeEventType.DELETED]:\n return\n\n return self.annotations != self.get_annotation_hash(resource)\n\n def update(self, resource: ManagedObject):\n \"\"\"Update the currently stored annotation\"\"\"\n self.annotations = self.get_annotation_hash(resource)\n\n def get_annotation_hash(self, resource: ManagedObject) -> str:\n \"\"\"Helper function to get the annotation hash\"\"\"\n return obj_to_hash(resource.metadata.get(\"annotations\", {}))"
},
{
"identifier": "CreationDeletionFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class CreationDeletionFilter(Filter):\n \"\"\"Filter to ensure reconciliation on creation and deletion events\"\"\"\n\n def test( # pylint: disable=inconsistent-return-statements\n self,\n resource: ManagedObject,\n event: KubeEventType,\n ) -> Optional[bool]:\n \"\"\"Return true if event is ADDED or DELETED\"\"\"\n\n # Ignore non Added/Deleted Events\n if event not in [KubeEventType.ADDED, KubeEventType.DELETED]:\n return\n\n return True"
},
{
"identifier": "DependentWatchFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class DependentWatchFilter(Filter):\n \"\"\"Don't reconcile creation events as we can assume the owner created\n them\"\"\"\n\n def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]:\n \"\"\"Return False if event is ADDED\"\"\"\n return event != KubeEventType.ADDED"
},
{
"identifier": "GenerationFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class GenerationFilter(Filter):\n \"\"\"Filter for reconciling on generation changes for resources that support it\"\"\"\n\n def __init__(self, resource: ManagedObject):\n \"\"\"Set generation instance variable\"\"\"\n super().__init__(resource)\n self.generation = None\n\n def test( # pylint: disable=inconsistent-return-statements\n self,\n resource: ManagedObject,\n event: KubeEventType,\n ) -> Optional[bool]:\n \"\"\"Return true if resource generation is different than before\"\"\"\n # Only update&test resources with a generation\n if not self.generation:\n return\n\n # Only test on resource updates\n if event in [KubeEventType.ADDED, KubeEventType.DELETED]:\n return\n\n # Test if new generation is different\n return self.generation != resource.metadata.get(\"generation\")\n\n def update(self, resource: ManagedObject):\n \"\"\"Update the currently observed generation\"\"\"\n self.generation = resource.metadata.get(\"generation\")"
},
{
"identifier": "LabelFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class LabelFilter(Filter):\n \"\"\"Filter for resources that match a set of labels\"\"\"\n\n @abstractclassproperty\n def labels(self) -> dict:\n \"\"\"Subclasses must implement a labels class attribute\"\"\"\n\n def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]:\n \"\"\"Return true is a resource matches the requested labels\"\"\"\n resource_labels = resource.get(\"metadata\", {}).get(\"labels\")\n # Check to make sure every requested label matches\n return all(\n resource_labels.get(label) == value for label, value in self.labels.items()\n )"
},
{
"identifier": "NoGenerationFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class NoGenerationFilter(Filter):\n \"\"\"Filter for reconciling changes to spec on resources that don't support\n the generation field like pods. It does this by hashing the objects excluding\n status and metadata\"\"\"\n\n def __init__(self, resource: ManagedObject):\n \"\"\"Check if resource supports generation and initialize the hash dict\"\"\"\n self.supports_generation = resource.metadata.get(\"generation\") is not None\n self.resource_hashes = {}\n super().__init__(resource)\n\n def test( # pylint: disable=inconsistent-return-statements\n self,\n resource: ManagedObject,\n event: KubeEventType,\n ) -> Optional[bool]:\n \"\"\"Return True if a resources current hash differs from the current\"\"\"\n # Don't test resources that support generation or if we don't have hashes yet\n if self.supports_generation or not self.resource_hashes:\n return\n\n # Only test on resource updates\n if event in [KubeEventType.ADDED, KubeEventType.DELETED]:\n return\n\n # Check each stored resource hash to see if its\n # changed\n for key, obj_has in self.resource_hashes.items():\n if obj_has != obj_to_hash(resource.get(key)):\n log.debug2(\"Detected change in %s\", key)\n return True\n\n return False\n\n def update(self, resource: ManagedObject):\n \"\"\"Update the observed spec hashes\"\"\"\n if self.supports_generation:\n return\n\n # Get the default hashes for all object keys except metadata\n # and status\n for key, obj in resource.definition.items():\n if key in [\"metadata\", \"status\", \"kind\", \"apiVersion\"]:\n continue\n\n self.resource_hashes[key] = obj_to_hash(obj)"
},
{
"identifier": "PauseFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class PauseFilter(Filter):\n \"\"\"This filter skips resources that have the oper8 pause annotation\"\"\"\n\n def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]:\n \"\"\"Test if a resource has the pause annotation\"\"\"\n return not ReconcileManager._is_paused( # pylint: disable=protected-access\n resource\n )"
},
{
"identifier": "ResourceVersionFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class ResourceVersionFilter(Filter):\n \"\"\"Filter for duplicate resource versions which happens when restarting a\n watch connection\"\"\"\n\n def __init__(self, resource: ManagedObject):\n \"\"\"Initialize the resource version list\"\"\"\n # Use a dequeue instead of a list/set to set a bound on the number\n # of tracked versions\n self.resource_versions = deque([], maxlen=RESOURCE_VERSION_KEEP_COUNT)\n super().__init__(resource)\n\n def test( # pylint: disable=inconsistent-return-statements\n self,\n resource: ManagedObject,\n event: KubeEventType,\n ) -> Optional[bool]:\n \"\"\"Test if the resource's resourceVersion has been seen before\"\"\"\n\n # Don't skip add events as the kubernetes watch can duplicate events\n if event == KubeEventType.DELETED:\n return\n\n return resource.resource_version not in self.resource_versions\n\n def update(self, resource: ManagedObject):\n \"\"\"Add the resources ResourceVersion to the list\"\"\"\n self.resource_versions.append(resource.resource_version)"
},
{
"identifier": "SubsystemStatusFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class SubsystemStatusFilter(Filter):\n \"\"\"Reconcile oper8 controllers when their oper8 status changes\n\n EXPERIMENTAL: This has passed basic validation but has not been rigorously tested\n in the field\n \"\"\"\n\n def __init__(self, resource: ManagedObject):\n \"\"\"Initialize the currently observed ready condition\"\"\"\n self.ready_condition = None\n super().__init__(resource)\n\n def test( # pylint: disable=inconsistent-return-statements\n self,\n resource: ManagedObject,\n event: KubeEventType,\n ) -> Optional[bool]:\n \"\"\"Test if a resources subsystem condition has changed\"\"\"\n if event in [KubeEventType.ADDED, KubeEventType.DELETED]:\n return\n\n return self.ready_condition != get_condition(\n READY_CONDITION, resource.get(\"status\", {})\n ).get(\"reason\")\n\n def update(self, resource: ManagedObject):\n \"\"\"Update the currently observed ready condition\"\"\"\n self.ready_condition = get_condition(\n READY_CONDITION, resource.get(\"status\", {})\n ).get(\"reason\")"
},
{
"identifier": "UserAnnotationFilter",
"path": "oper8/watch_manager/python_watch_manager/filters/filters.py",
"snippet": "class UserAnnotationFilter(AnnotationFilter):\n \"\"\"Filter resources to reconcile on user annotation changes. This excludes\n kubernetes and openshift annotations\n \"\"\"\n\n def get_annotation_hash(self, resource: ManagedObject) -> str:\n \"\"\"Overriden function to exclude common platform annotations from\n the annotation hash\"\"\"\n output_annotations = {}\n for key, value in resource.metadata.get(\"annotations\", {}).items():\n if self.contains_platform_key(key):\n continue\n\n output_annotations[key] = value\n\n return obj_to_hash(output_annotations)\n\n def contains_platform_key(self, key: str) -> bool:\n \"\"\"Helper to check if the key contains one of the\n platform annotations\"\"\"\n return any(\n reserved_key in key for reserved_key in RESERVED_PLATFORM_ANNOTATIONS\n )"
}
] | from oper8.deploy_manager.kube_event import KubeEventType
from oper8.status import ReadyReason, make_application_status
from oper8.test_helpers.pwm_helpers import make_managed_object
from oper8.watch_manager.python_watch_manager.filters.filters import (
AnnotationFilter,
CreationDeletionFilter,
DependentWatchFilter,
GenerationFilter,
LabelFilter,
NoGenerationFilter,
PauseFilter,
ResourceVersionFilter,
SubsystemStatusFilter,
UserAnnotationFilter,
) | 3,504 | """
Tests for the Filter classes
"""
# Local
## Helpers #####################################################################
def test_filter_creation_deletion():
resource = make_managed_object()
filter = CreationDeletionFilter(resource)
| """
Tests for the Filter classes
"""
# Local
## Helpers #####################################################################
def test_filter_creation_deletion():
resource = make_managed_object()
filter = CreationDeletionFilter(resource)
| assert filter.update_and_test(resource, KubeEventType.ADDED) | 0 | 2023-11-15 16:43:29+00:00 | 4k |
ariebovenberg/whenever | tests/test_utc_datetime.py | [
{
"identifier": "AlwaysEqual",
"path": "tests/common.py",
"snippet": "class AlwaysEqual:\n def __eq__(self, other):\n return True"
},
{
"identifier": "AlwaysLarger",
"path": "tests/common.py",
"snippet": "class AlwaysLarger:\n def __lt__(self, other):\n return False\n\n def __le__(self, other):\n return False\n\n def __gt__(self, other):\n return True\n\n def __ge__(self, other):\n return True"
},
{
"identifier": "AlwaysSmaller",
"path": "tests/common.py",
"snippet": "class AlwaysSmaller:\n def __lt__(self, other):\n return True\n\n def __le__(self, other):\n return True\n\n def __gt__(self, other):\n return False\n\n def __ge__(self, other):\n return False"
},
{
"identifier": "NeverEqual",
"path": "tests/common.py",
"snippet": "class NeverEqual:\n def __eq__(self, other):\n return False"
},
{
"identifier": "local_ams_tz",
"path": "tests/common.py",
"snippet": "@contextmanager\ndef local_ams_tz():\n with patch.dict(os.environ, {\"TZ\": \"Europe/Amsterdam\"}):\n tzset()\n yield"
},
{
"identifier": "local_nyc_tz",
"path": "tests/common.py",
"snippet": "@contextmanager\ndef local_nyc_tz():\n with patch.dict(os.environ, {\"TZ\": \"America/New_York\"}):\n tzset()\n yield"
}
] | import pickle
import weakref
import pytest
from copy import copy, deepcopy
from datetime import datetime as py_datetime
from datetime import timedelta, timezone
from freezegun import freeze_time
from hypothesis import given
from hypothesis.strategies import text
from pytest import approx
from whenever import (
AwareDateTime,
InvalidFormat,
LocalDateTime,
NaiveDateTime,
OffsetDateTime,
UTCDateTime,
ZonedDateTime,
hours,
)
from .common import (
AlwaysEqual,
AlwaysLarger,
AlwaysSmaller,
NeverEqual,
local_ams_tz,
local_nyc_tz,
) | 1,615 | assert d.hour == 5
assert d.minute == 12
assert d.second == 30
assert d.microsecond == 450
assert d.offset == timedelta()
assert d.tzinfo == timezone.utc
def test_optionality(self):
assert (
UTCDateTime(2020, 8, 15, 12)
== UTCDateTime(2020, 8, 15, 12, 0)
== UTCDateTime(2020, 8, 15, 12, 0, 0)
== UTCDateTime(2020, 8, 15, 12, 0, 0, 0)
)
def test_invalid(self):
with pytest.raises(ValueError, match="microsecond must"):
UTCDateTime(2020, 8, 15, 12, 8, 30, 1_000_000)
def test_kwargs(self):
d = UTCDateTime(
year=2020, month=8, day=15, hour=5, minute=12, second=30
)
assert d == UTCDateTime(2020, 8, 15, 5, 12, 30)
def test_immutable():
d = UTCDateTime(2020, 8, 15)
with pytest.raises(AttributeError):
d.year = 2021 # type: ignore[misc]
@pytest.mark.parametrize(
"d, expected",
[
(
UTCDateTime(2020, 8, 15, 23, 12, 9, 987_654),
"2020-08-15T23:12:09.987654Z",
),
(UTCDateTime(2020, 8, 15, 23, 12, 9), "2020-08-15T23:12:09Z"),
],
)
def test_canonical_str(d: UTCDateTime, expected: str):
assert str(d) == expected
assert d.canonical_str() == expected
class TestFromCanonicalStr:
def test_valid(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15T12:08:30Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30)
def test_valid_three_fractions(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15T12:08:30.349Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30, 349_000)
def test_valid_six_fractions(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15T12:08:30.349123Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30, 349_123)
def test_single_space_instead_of_T(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15 12:08:30Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30)
def test_unpadded(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-8-15T12:8:30Z")
def test_overly_precise_fraction(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08:30.123456789123Z")
def test_invalid_lowercase_z(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08:30z")
def test_no_trailing_z(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08:30")
def test_no_seconds(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08Z")
def test_empty(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("")
def test_garbage(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("garbage")
@given(text())
def test_fuzzing(self, s: str):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str(s)
class TestEquality:
def test_same(self):
d = UTCDateTime(2020, 8, 15)
same = d.replace()
assert d == same
assert not d != same
assert hash(d) == hash(same)
def test_different(self):
d = UTCDateTime(2020, 8, 15)
different = d.replace(year=2021)
assert d != different
assert not d == different
assert hash(d) != hash(different)
def test_notimplemented(self):
d = UTCDateTime(2020, 8, 15)
assert d == AlwaysEqual()
|
class TestInit:
def test_basic(self):
d = UTCDateTime(2020, 8, 15, 5, 12, 30, 450)
assert d.year == 2020
assert d.month == 8
assert d.day == 15
assert d.hour == 5
assert d.minute == 12
assert d.second == 30
assert d.microsecond == 450
assert d.offset == timedelta()
assert d.tzinfo == timezone.utc
def test_optionality(self):
assert (
UTCDateTime(2020, 8, 15, 12)
== UTCDateTime(2020, 8, 15, 12, 0)
== UTCDateTime(2020, 8, 15, 12, 0, 0)
== UTCDateTime(2020, 8, 15, 12, 0, 0, 0)
)
def test_invalid(self):
with pytest.raises(ValueError, match="microsecond must"):
UTCDateTime(2020, 8, 15, 12, 8, 30, 1_000_000)
def test_kwargs(self):
d = UTCDateTime(
year=2020, month=8, day=15, hour=5, minute=12, second=30
)
assert d == UTCDateTime(2020, 8, 15, 5, 12, 30)
def test_immutable():
d = UTCDateTime(2020, 8, 15)
with pytest.raises(AttributeError):
d.year = 2021 # type: ignore[misc]
@pytest.mark.parametrize(
"d, expected",
[
(
UTCDateTime(2020, 8, 15, 23, 12, 9, 987_654),
"2020-08-15T23:12:09.987654Z",
),
(UTCDateTime(2020, 8, 15, 23, 12, 9), "2020-08-15T23:12:09Z"),
],
)
def test_canonical_str(d: UTCDateTime, expected: str):
assert str(d) == expected
assert d.canonical_str() == expected
class TestFromCanonicalStr:
def test_valid(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15T12:08:30Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30)
def test_valid_three_fractions(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15T12:08:30.349Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30, 349_000)
def test_valid_six_fractions(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15T12:08:30.349123Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30, 349_123)
def test_single_space_instead_of_T(self):
assert UTCDateTime.from_canonical_str(
"2020-08-15 12:08:30Z"
) == UTCDateTime(2020, 8, 15, 12, 8, 30)
def test_unpadded(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-8-15T12:8:30Z")
def test_overly_precise_fraction(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08:30.123456789123Z")
def test_invalid_lowercase_z(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08:30z")
def test_no_trailing_z(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08:30")
def test_no_seconds(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("2020-08-15T12:08Z")
def test_empty(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("")
def test_garbage(self):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str("garbage")
@given(text())
def test_fuzzing(self, s: str):
with pytest.raises(InvalidFormat):
UTCDateTime.from_canonical_str(s)
class TestEquality:
def test_same(self):
d = UTCDateTime(2020, 8, 15)
same = d.replace()
assert d == same
assert not d != same
assert hash(d) == hash(same)
def test_different(self):
d = UTCDateTime(2020, 8, 15)
different = d.replace(year=2021)
assert d != different
assert not d == different
assert hash(d) != hash(different)
def test_notimplemented(self):
d = UTCDateTime(2020, 8, 15)
assert d == AlwaysEqual() | assert d != NeverEqual() | 3 | 2023-11-10 21:08:49+00:00 | 4k |
tonylampada/jarvisportal | jarvisportal/gptexec.py | [
{
"identifier": "GPT",
"path": "jarvisportal/gpt.py",
"snippet": "class GPT:\n def __init__(self, assistant_id):\n self.client = OpenAI()\n # self.thread_id = self._get_or_create_thread_id() # expensive :(\n self.thread_id = self._create_thread_id()\n self.assistant_id = assistant_id\n self.last_messsage = None\n self.last_step_id = None\n self.run = None\n self._cancel_pending_runs()\n \n def _get_or_create_thread_id(self):\n threadfilename = \"./.gptexecthread\"\n if os.getenv(\"GPTEXEC_THREAD_ID\"):\n thread_id = os.getenv(\"GPTEXEC_THREAD_ID\")\n print(f\"continuing GPT thread {thread_id}\")\n return thread_id\n elif os.path.exists(threadfilename):\n with open(threadfilename, \"r\") as file:\n thread_id = file.read().strip()\n print(f\"continuing GPT thread {thread_id}\")\n return thread_id\n else:\n thread = self.client.beta.threads.create()\n with open(threadfilename, \"w\") as file:\n file.write(thread.id)\n print(f\"created GPT thread {thread.id}\")\n return thread.id\n\n def _create_thread_id(self):\n thread = self.client.beta.threads.create()\n print(f\"created GPT thread {thread.id}\")\n return thread.id\n \n def _cancel_pending_runs(self):\n pending_runs = [r for r in self.client.beta.threads.runs.list(thread_id=self.thread_id) if r.status in {\"in_progress\", \"requires_action\"}]\n for run in pending_runs:\n self.client.beta.threads.runs.cancel(thread_id=self.thread_id, run_id=run.id)\n print(f\"canceled {run.status} run {run.id}\")\n\n def send_chat(self, message):\n omessage = self.client.beta.threads.messages.create(\n thread_id=self.thread_id, role=\"user\", content=message\n )\n self.run = self.client.beta.threads.runs.create(\n thread_id=self.thread_id, assistant_id=self.assistant_id\n )\n self.last_messsage = omessage\n\n def next_answer(self):\n while self.run.status in [\"queued\", \"in_progress\"]:\n sleep(1)\n self.run = self.client.beta.threads.runs.retrieve(\n thread_id=self.thread_id, run_id=self.run.id\n )\n answer = None\n if self.run.status == \"completed\":\n messages = list(\n self.client.beta.threads.messages.list(\n thread_id=self.thread_id, before=self.last_messsage.id\n )\n )\n message = messages[0]\n answer = {\n \"type\": \"message\",\n \"messages\": [c.text.value for c in message.content],\n \"is_final\": True,\n }\n self.run = None\n elif self.run.status == \"requires_action\":\n steps = list(\n self.client.beta.threads.runs.steps.list(\n thread_id=self.thread_id,\n run_id=self.run.id,\n order=\"asc\",\n after=self.last_step_id,\n )\n )\n step = steps[0]\n if step.step_details.type != 'message_creation': # still dont quite understand, but I know it doesn have too_calls\n try:\n answer = {\"type\": \"action\", \"actions\": step.step_details.tool_calls}\n except:\n print(step)\n raise\n self.last_step_id = step.id\n elif self.run.status == \"failed\":\n message = \"RUN FAILED.\"\n if self.run.last_error:\n message = f\"{message} {self.run.last_error}\"\n print(message)\n return answer\n\n def send_action_results(self, actions, results):\n self.run = self.client.beta.threads.runs.submit_tool_outputs(\n thread_id=self.thread_id,\n run_id=self.run.id,\n tool_outputs=[\n {\"tool_call_id\": a[\"id\"], \"output\": json.dumps(r)}\n for a, r in zip(actions, results)\n ],\n )\n\n # def is_done(self, run):\n # return run.status in [\"completed\", \"failed\", \"expired\"]"
},
{
"identifier": "Chat",
"path": "jarvisportal/llamaapichat.py",
"snippet": "class Chat:\n def __init__(self, functions):\n self.messages = [\n {\"role\": \"system\", \"content\": systemprompt},\n ]\n self.functions = functions\n\n def send_chat(self, msg):\n self.messages.append({\"role\": \"user\", \"content\": msg})\n self.last_response = self._send()\n \n def next_answer(self):\n if self.last_response.get(\"function_call\"):\n answer = {\n \"type\": \"action\",\n \"is_final\": False,\n \"actions\": [{\"function\": self.last_response[\"function_call\"]}]\n }\n else:\n answer = {\n \"type\": \"message\",\n \"is_final\": True,\n \"messages\": [self.last_response[\"content\"]]\n }\n return answer\n\n def send_action_results(self, actions, results):\n self.messages.append({\"role\": \"user\", \"content\": json.dumps(results[0])})\n self.last_response = self._send()\n\n def _send(self):\n api_request_json = {\n \"model\": model,\n \"functions\": self.functions,\n \"stream\": False,\n \"messages\": self.messages,\n }\n response = llama.run(api_request_json)\n result = response.json()[\"choices\"][0][\"message\"]\n if result.get(\"function_call\"):\n fncall = result[\"function_call\"]\n self.messages.append(\n {\"role\": \"assistant\", \"content\": json.dumps(fncall)}\n )\n else:\n self.messages.append({\"role\": \"assistant\", \"content\": result[\"content\"]})\n # if self.executor and result.get(\"function_call\"):\n # fncall = result[\"function_call\"]\n # fnresult = self.executor.execute(fncall)\n # result = self.sendFnresult(fncall[\"name\"], fnresult)\n # else:\n # self.messages.append({\"role\": \"assistant\", \"content\": result[\"content\"]})\n return result"
},
{
"identifier": "exec_actions",
"path": "jarvisportal/actions.py",
"snippet": "def set_cli_input(input_stream):\n def prompt(self, path: str, content: str):\n def run(self, path: str, content: str):\ndef ActionUpdateFileLines():\n def prompt(self, path: str, line_start: int, line_end: int, content: str):\n def run(self, path: str, line_start: int, line_end: int, content: str):\n def prompt(self, path: str, start_anchor: str, end_anchor: str, content: str):\n def run(self, path: str, start_anchor: str, end_anchor: str, content: str):\n def prompt(self, path: str, old_content: str, new_content: str):\n def run(self, path: str, old_content: str, new_content: str):\n def prompt(self, command: str):\n def run(self, command):\ndef _cmdexec(cmd):\ndef exec_actions(actions, ask=False):\ndef exec_action(action, ask=False):\nclass ActionCreateFile():\nclass ActionUpdateFileAnchors():\nclass ActionUpdateFileReplace():\nclass ActionExec():\nACTIONS = {\n \"createFile\": ActionCreateFile(),\n \"updateFile_lines\": ActionUpdateFileLines(),\n \"updateFile_anchors\": ActionUpdateFileAnchors(),\n \"updateFile_replace\": ActionUpdateFileReplace(),\n \"exec\": ActionExec(),\n}"
}
] | import sys
import os
import jarvisportal.listentomic as listentomic
import jarvisportal.listentomic as listentomic
from jarvisportal.gpt import GPT
from jarvisportal.llamaapichat import Chat as LlamaApiChat
from jarvisportal.actions import exec_actions, definitions | 2,235 |
usr = '\U0001F600'
bot = '\U0001F916'
mic = '\U0001F3A4'
def main():
args = sys.argv[1:]
engine = os.getenv("CHAT_ENGINE", "gpt")
if engine == "gpt":
if len(args) != 1:
print("Usage: gptexec.py <assistant_id>")
exit(1)
assistant_id = args[0]
bot = GPT(assistant_id)
elif engine == "llamaapi":
bot = LlamaApiChat(definitions)
try:
while True:
chatLoop(bot)
except KeyboardInterrupt:
print("\n====================================")
print("Thank you for using GPTExec. Come back soon. ;)")
print("====================================")
def _user_input():
if os.getenv("GPTEXEC_VOICE") == "1":
userInput = listentomic.listen_and_transcribe(detectsilence=True)
print(f"{usr} User: {userInput}")
else:
userInput = input(f"{usr} Type your message (or send an empty one to switch to voice input): \n")
if userInput.strip() == "":
print(f"{mic} Switching to voice input")
userInput = listentomic.listen_and_transcribe(detectsilence=False)
print(f"{usr} User: {userInput}")
return userInput
def chatLoop(bot):
userInput = _user_input()
print("waiting...")
bot.send_chat(userInput)
answer = None
while answer is None or not answer.get('is_final'):
print("waiting...")
answer = bot.next_answer()
print("=========================================================")
if answer["type"] == "action":
|
usr = '\U0001F600'
bot = '\U0001F916'
mic = '\U0001F3A4'
def main():
args = sys.argv[1:]
engine = os.getenv("CHAT_ENGINE", "gpt")
if engine == "gpt":
if len(args) != 1:
print("Usage: gptexec.py <assistant_id>")
exit(1)
assistant_id = args[0]
bot = GPT(assistant_id)
elif engine == "llamaapi":
bot = LlamaApiChat(definitions)
try:
while True:
chatLoop(bot)
except KeyboardInterrupt:
print("\n====================================")
print("Thank you for using GPTExec. Come back soon. ;)")
print("====================================")
def _user_input():
if os.getenv("GPTEXEC_VOICE") == "1":
userInput = listentomic.listen_and_transcribe(detectsilence=True)
print(f"{usr} User: {userInput}")
else:
userInput = input(f"{usr} Type your message (or send an empty one to switch to voice input): \n")
if userInput.strip() == "":
print(f"{mic} Switching to voice input")
userInput = listentomic.listen_and_transcribe(detectsilence=False)
print(f"{usr} User: {userInput}")
return userInput
def chatLoop(bot):
userInput = _user_input()
print("waiting...")
bot.send_chat(userInput)
answer = None
while answer is None or not answer.get('is_final'):
print("waiting...")
answer = bot.next_answer()
print("=========================================================")
if answer["type"] == "action": | action_results = exec_actions(answer["actions"], ask=True) | 2 | 2023-11-14 17:27:01+00:00 | 4k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.