repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
BouncyKoishi/ChuCaoQi-Bot | plugins/draw_item.py | [
{
"identifier": "config",
"path": "kusa_base.py",
"snippet": "async def isUserExist(qqNum) -> bool:\nasync def isSuperAdmin(qqNum) -> bool:\nasync def buying(qqNum, itemNameBuying, itemAmountBuying, totalPrice, isUsingAdvKusa=False) -> bool:\nasync def selling(qqNum, itemNameSelling, itemAmountSelling, totalPrice, isUsingAdvKusa=False) -> bool:\nasync def sendLog(message):"
},
{
"identifier": "nameDetailSplit",
"path": "utils.py",
"snippet": "def nameDetailSplit(strippedText):\n if not strippedText:\n return \"\", \"\"\n colonEnIndex = strippedText.find(\":\")\n colonCnIndex = strippedText.find(\":\")\n colonEnIndex = len(strippedText) if colonEnIndex == -1 else colonEnIndex\n colonCnIndex = len(strippedText) if colonCnIndex == -1 else colonCnIndex\n # 英文冒号\n if colonEnIndex < colonCnIndex:\n return strippedText.split(':', 1)\n # 中文冒号\n if colonCnIndex < colonEnIndex:\n return strippedText.split(':', 1)\n # 没有冒号\n return strippedText, \"\""
},
{
"identifier": "CQ_injection_check_command",
"path": "decorator.py",
"snippet": "def CQ_injection_check_command(func):\n @wraps(func)\n def check(session: CommandSession):\n if 'CQ:' in session.current_arg:\n return holder(session)\n return func(session)\n\n return check"
}
] | import random
import nonebot
import dbConnection.db as baseDB
import dbConnection.draw_item as drawItemDB
import dbConnection.kusa_item as usefulItemDB
from nonebot import on_command, CommandSession
from kusa_base import config
from utils import nameDetailSplit
from itertools import groupby
from decorator import CQ_injection_check_command | 1,735 | userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowDraw']:
await session.send('本群暂不支持抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await ban(groupId, userId)
return
banRisk = drawConfig['banRisk']
banShieldInfo = await usefulItemDB.getItemStorageInfo(userId, '量子护盾')
if banShieldInfo and banShieldInfo.allowUse:
await usefulItemDB.changeItemAmount(userId, '量子护盾', -1)
banRisk = banRisk / 10
if random.random() < banRisk:
await ban(groupId, userId)
return
strippedArg = session.current_arg_text.strip()
await getItem(groupId, userId, strippedArg)
@on_command(name='十连抽', only_to_me=False)
async def itemDraw10(session: CommandSession):
groupId = session.ctx['group_id']
userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await session.send('本群暂不支持十连抽^ ^')
return
strippedArg = session.current_arg_text.strip()
baseLevel, poolName = await getLevelAndPoolName(strippedArg)
baseLevel = baseLevel if baseLevel is not None else 0
ticketName = ['十连券', '上级十连券', '特级十连券', '天琴十连券'][baseLevel]
drawTenTicketInfo = await usefulItemDB.getItemStorageInfo(userId, ticketName)
if not drawTenTicketInfo or not drawTenTicketInfo.allowUse:
await session.send(f'你缺少{ticketName},无法十连抽^ ^')
return
await usefulItemDB.changeItemAmount(userId, ticketName, -1)
itemList = [await getItemFromDB(baseLevel, poolName) for i in range(10)]
outputStr = '十连抽结果:\n'
for item in itemList:
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
outputStr += f'[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
outputStr += '(New!)'
outputStr += '\n'
await drawItemDB.setItemStorage(userId, item.id)
outputStr = outputStr[:-1]
await session.send(outputStr)
async def ban(groupNum, userId):
bot = nonebot.get_bot()
dur_time = int(1.1 ** (5 + random.random() * 70))
print(f'抽奖口球-{dur_time}s, id:{userId}, group:{groupNum}')
msg = f'获得了:口球({dur_time}s)!'
await bot.set_group_ban(group_id=groupNum, user_id=userId, duration=dur_time)
await bot.send_group_msg(group_id=groupNum, message=msg)
async def getItem(groupNum, userId, strippedArg):
_, poolName = await getLevelAndPoolName(strippedArg)
redrawDice = await usefulItemDB.getItemStorageInfo(userId, '骰子碎片')
if not redrawDice or not redrawDice.allowUse:
drawLimit = 1
else:
drawLimit = min(51, redrawDice.amount + 1)
redrawCount, item = 0, None
for i in range(drawLimit):
redrawCount = i
item = await getItemFromDB(poolName=poolName)
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
if not existItemStorage:
break
msg = ''
if redrawCount > 0:
await usefulItemDB.changeItemAmount(userId, '骰子碎片', -redrawCount)
msg += f'消耗了骰子碎片*{redrawCount},'
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
msg += f'获得了:[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
msg += '(New!)'
if item.detail:
msg += f'\n物品说明:{item.detail}'
bot = nonebot.get_bot()
await bot.send_group_msg(group_id=groupNum, message=msg)
await drawItemDB.setItemStorage(userId, item.id)
async def getItemFromDB(startRareRank=0, poolName=None):
easyRand = 1 if startRareRank > 0 else random.random()
if easyRand < 0.7:
return await drawItemDB.getRandomItem(0, poolName)
normalRand = 1 if startRareRank > 1 else random.random()
if normalRand < 0.7:
return await drawItemDB.getRandomItem(1, poolName)
hardRand = 1 if startRareRank > 2 else random.random()
if hardRand < 0.7:
return await drawItemDB.getRandomItem(2, poolName)
lunaticRand = random.random()
if lunaticRand < 0.7:
return await drawItemDB.getRandomItem(3, poolName)
return await getItemFromDB(startRareRank, poolName)
@on_command(name='添加-Easy', aliases='物品添加-Easy', only_to_me=False)
|
itemRareDescribe = ['Easy', 'Normal', 'Hard', 'Lunatic']
drawConfig = config['drawItem']
@on_command(name='抽奖', only_to_me=False)
async def itemDraw(session: CommandSession):
groupId = session.ctx['group_id']
userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowDraw']:
await session.send('本群暂不支持抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await ban(groupId, userId)
return
banRisk = drawConfig['banRisk']
banShieldInfo = await usefulItemDB.getItemStorageInfo(userId, '量子护盾')
if banShieldInfo and banShieldInfo.allowUse:
await usefulItemDB.changeItemAmount(userId, '量子护盾', -1)
banRisk = banRisk / 10
if random.random() < banRisk:
await ban(groupId, userId)
return
strippedArg = session.current_arg_text.strip()
await getItem(groupId, userId, strippedArg)
@on_command(name='十连抽', only_to_me=False)
async def itemDraw10(session: CommandSession):
groupId = session.ctx['group_id']
userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await session.send('本群暂不支持十连抽^ ^')
return
strippedArg = session.current_arg_text.strip()
baseLevel, poolName = await getLevelAndPoolName(strippedArg)
baseLevel = baseLevel if baseLevel is not None else 0
ticketName = ['十连券', '上级十连券', '特级十连券', '天琴十连券'][baseLevel]
drawTenTicketInfo = await usefulItemDB.getItemStorageInfo(userId, ticketName)
if not drawTenTicketInfo or not drawTenTicketInfo.allowUse:
await session.send(f'你缺少{ticketName},无法十连抽^ ^')
return
await usefulItemDB.changeItemAmount(userId, ticketName, -1)
itemList = [await getItemFromDB(baseLevel, poolName) for i in range(10)]
outputStr = '十连抽结果:\n'
for item in itemList:
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
outputStr += f'[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
outputStr += '(New!)'
outputStr += '\n'
await drawItemDB.setItemStorage(userId, item.id)
outputStr = outputStr[:-1]
await session.send(outputStr)
async def ban(groupNum, userId):
bot = nonebot.get_bot()
dur_time = int(1.1 ** (5 + random.random() * 70))
print(f'抽奖口球-{dur_time}s, id:{userId}, group:{groupNum}')
msg = f'获得了:口球({dur_time}s)!'
await bot.set_group_ban(group_id=groupNum, user_id=userId, duration=dur_time)
await bot.send_group_msg(group_id=groupNum, message=msg)
async def getItem(groupNum, userId, strippedArg):
_, poolName = await getLevelAndPoolName(strippedArg)
redrawDice = await usefulItemDB.getItemStorageInfo(userId, '骰子碎片')
if not redrawDice or not redrawDice.allowUse:
drawLimit = 1
else:
drawLimit = min(51, redrawDice.amount + 1)
redrawCount, item = 0, None
for i in range(drawLimit):
redrawCount = i
item = await getItemFromDB(poolName=poolName)
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
if not existItemStorage:
break
msg = ''
if redrawCount > 0:
await usefulItemDB.changeItemAmount(userId, '骰子碎片', -redrawCount)
msg += f'消耗了骰子碎片*{redrawCount},'
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
msg += f'获得了:[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
msg += '(New!)'
if item.detail:
msg += f'\n物品说明:{item.detail}'
bot = nonebot.get_bot()
await bot.send_group_msg(group_id=groupNum, message=msg)
await drawItemDB.setItemStorage(userId, item.id)
async def getItemFromDB(startRareRank=0, poolName=None):
easyRand = 1 if startRareRank > 0 else random.random()
if easyRand < 0.7:
return await drawItemDB.getRandomItem(0, poolName)
normalRand = 1 if startRareRank > 1 else random.random()
if normalRand < 0.7:
return await drawItemDB.getRandomItem(1, poolName)
hardRand = 1 if startRareRank > 2 else random.random()
if hardRand < 0.7:
return await drawItemDB.getRandomItem(2, poolName)
lunaticRand = random.random()
if lunaticRand < 0.7:
return await drawItemDB.getRandomItem(3, poolName)
return await getItemFromDB(startRareRank, poolName)
@on_command(name='添加-Easy', aliases='物品添加-Easy', only_to_me=False) | @CQ_injection_check_command | 2 | 2023-11-02 04:06:31+00:00 | 4k |
ilur98/DGQ | dgq/models/opt_a8w4.py | [
{
"identifier": "W4A8BF32OF32Linear",
"path": "dgq/models/linear.py",
"snippet": "class W4A8BF32OF32Linear(torch.nn.Module):\n # For qkv_proj\n def __init__(self, in_features, out_features, groupsize=128):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.groupsize = groupsize\n self.register_buffer('weight', torch.zeros((self.out_features,\n self.in_features // 2), dtype=torch.int8, requires_grad=False))\n self.register_buffer('bias', torch.zeros(\n (1, self.out_features), dtype=torch.float, requires_grad=False))\n self.register_buffer('a', torch.zeros(1, self.out_features))\n self.register_buffer('b', torch.zeros(1, self.out_features))\n self.register_buffer('scales8', torch.zeros((self.out_features, self.in_features//self.groupsize),dtype=torch.int8))\n self.register_buffer('zeros', torch.zeros((self.out_features, self.in_features//self.groupsize),dtype=torch.int8))\n\n def to(self, *args, **kwargs):\n super().to(*args, **kwargs)\n self.weight = self.weight.to(*args, **kwargs)\n self.bias = self.bias.to(*args, **kwargs)\n return self\n\n @torch.no_grad()\n def forward(self, x):\n x_shape = x.shape\n x = x.view(-1, x_shape[-1])\n y = linear_a8_w4_bfp32_ofp32(x, self.weight, self.bias,\n self.a, self.b,\n self.scales8, self.zeros, self.in_features, self.out_features,\n self.groupsize // 8)\n y = y.view(*x_shape[:-1], -1)\n return y\n\n @staticmethod\n def from_float(module: QuantLinear, input_scale):\n a8w4_module = W4A8BF32OF32Linear(\n module.in_features, module.out_features, module.groupsize)\n alpha = module.wscales8.float() * input_scale\n a8w4_module.weight = module.qweight\n if module.bias is not None:\n a8w4_module.bias = module.bias.float()\n a8w4_module.a = alpha\n a8w4_module.scales8 = module.wscales\n a8w4_module.zeros = module.wzeros\n return a8w4_module"
},
{
"identifier": "W4A8B8O8Linear",
"path": "dgq/models/linear.py",
"snippet": "class W4A8B8O8Linear(torch.nn.Module):\n # For qkv_proj\n def __init__(self, in_features, out_features, groupsize=128):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features \n self.groupsize = groupsize\n self.register_buffer('weight', torch.zeros((self.out_features,\n self.in_features // 2), dtype=torch.int8, requires_grad=False))\n self.register_buffer('bias', torch.zeros(\n (1, self.out_features), dtype=torch.int8, requires_grad=False))\n self.register_buffer('a', torch.zeros(1, self.out_features))\n self.register_buffer('b', torch.ones(1))\n self.register_buffer('scales8', torch.zeros((self.out_features, self.in_features//self.groupsize),dtype=torch.int8))\n self.register_buffer('zeros', torch.zeros((self.out_features, self.in_features//self.groupsize),dtype=torch.int8))\n def to(self, *args, **kwargs):\n super().to(*args, **kwargs)\n self.weight = self.weight.to(*args, **kwargs)\n self.bias = self.bias.to(*args, **kwargs)\n return self\n\n @torch.no_grad()\n def forward(self, x):\n x_shape = x.shape\n x = x.view(-1, x_shape[-1])\n y = linear_a8_w4_b8_o8(x, self.weight, self.bias,\n self.a, self.b,\n self.scales8, self.zeros, self.in_features, self.out_features,\n self.groupsize // 8)\n y = y.view(*x_shape[:-1], -1)\n return y\n\n @staticmethod\n def from_float(module: QuantLinear, input_scale, output_scale):\n a8w4_module = W4A8B8O8Linear(\n module.in_features, module.out_features)\n int8_bias, bias_scale = quantize_per_tensor_absmax(module.bias)\n alpha = input_scale * module.wscales8.float() / output_scale\n beta = bias_scale / output_scale\n a8w4_module.weight = module.qweight\n a8w4_module.bias = int8_bias\n a8w4_module.a = alpha.reshape(-1, 8, 2, 8).transpose(1, 2).flatten()\n a8w4_module.b = beta\n a8w4_module.scales8 = module.wscales\n a8w4_module.zeros = module.wzeros\n return a8w4_module"
},
{
"identifier": "LayerNormQ",
"path": "dgq/models/fused.py",
"snippet": "class LayerNormQ(torch.nn.Module):\n def __init__(self, dim, eps=1e-5):\n super().__init__()\n self.input_scale = 1.0\n self.eps = eps\n self.register_buffer('weight', torch.ones(dim, dtype=torch.float32))\n self.register_buffer('bias', torch.zeros(dim, dtype=torch.float32))\n\n def forward(self, x):\n x = x.to(self.weight.dtype)\n ln_output_fp = torch.nn.functional.layer_norm(\n x, x.shape[-1:], self.weight, self.bias, self.eps)\n ln_output_int8 = ln_output_fp.round().clamp(-128, 127).to(torch.int8)\n return ln_output_int8\n\n @staticmethod\n def from_float(module: torch.nn.LayerNorm, output_scale: float):\n assert module.normalized_shape[0] == module.weight.numel()\n assert module.normalized_shape[0] == module.bias.numel()\n q_module = LayerNormQ(module.normalized_shape[0], module.eps)\n q_module.weight = module.weight.float() / output_scale\n q_module.bias = module.bias.float() / output_scale\n return q_module"
},
{
"identifier": "BMM_S8T_S8N_F32T",
"path": "dgq/models/bmm.py",
"snippet": "class BMM_S8T_S8N_F32T(torch.nn.Module):\n def __init__(self, alpha):\n super().__init__()\n self.register_buffer('a', torch.tensor(alpha))\n\n @torch.no_grad()\n def forward(self, a, b):\n # a: [B, M, K] int8\n # b: [B, N, K] int8\n # return: [B, M, N] int32\n return bmm_s8t_s8n_f32t(a, b, self.a.item())\n\n @staticmethod\n def from_scale(a_scale, b_scale):\n bmm_module = BMM_S8T_S8N_F32T(1.0)\n alpha = a_scale * b_scale\n if not torch.is_tensor(alpha):\n alpha = torch.tensor(alpha)\n bmm_module.a = alpha\n return bmm_module"
}
] | import torch
from torch import nn
from transformers.models.opt.modeling_opt import (
OPTConfig,
OPTForCausalLM,
OPTModel,
OPTPreTrainedModel,
OPTLearnedPositionalEmbedding,
OPTAttention,
OPTDecoderLayer,
OPTDecoder,
BaseModelOutputWithPast
)
from typing import Optional, Tuple, List
from dgq.models.linear import W4A8BF32OF32Linear, W4A8B8O8Linear
from dgq.models.fused import LayerNormQ
from transformers.utils import logging
from dgq.models.bmm import BMM_S8T_S8N_F32T
from torch.nn.functional import pad | 2,061 | logger = logging.get_logger(__name__)
class W4A8OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.qk_bmm = BMM_S8T_S8N_F32T(1.0)
self.k_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.v_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.q_proj = W4A8B8O8Linear(embed_dim, embed_dim)
| logger = logging.get_logger(__name__)
class W4A8OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.qk_bmm = BMM_S8T_S8N_F32T(1.0)
self.k_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.v_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.q_proj = W4A8B8O8Linear(embed_dim, embed_dim) | self.out_proj = W4A8BF32OF32Linear(embed_dim, embed_dim) | 0 | 2023-11-01 13:45:16+00:00 | 4k |
anilaltuner/personalized-news-agent | pages/chatbot.py | [
{
"identifier": "CUSTOM_ALGO_ID",
"path": "news.py",
"snippet": "CUSTOM_ALGO_ID = st.secrets[\"custom_algo_id\"]"
},
{
"identifier": "initialize_session",
"path": "news.py",
"snippet": "def initialize_session(user_input=\"\"):\n \"\"\"Initialize or restart the session.\"\"\"\n if user_input:\n st.session_state.username = user_input\n username_suffix = st.session_state.username if \"username\" in st.session_state else None\n if username_suffix:\n st.session_state.session = st.session_state.personalized.session(\n AlgorithmLabel.CUSTOM,\n custom_id=CUSTOM_ALGO_ID,\n vdbid=FIRSTBATCH_DB_NAME,\n session_id=\"rss_feed\" + username_suffix\n )\n else:\n st.session_state.session = st.session_state.personalized.session(\n AlgorithmLabel.CUSTOM,\n custom_id=CUSTOM_ALGO_ID,\n vdbid=FIRSTBATCH_DB_NAME\n )\n st.session_state.batches = []\n st.session_state.ids = []\n st.session_state.likes = []\n st.session_state.html_content = \"\"\"\n <div class=\"chat-container\">\n <div class=\"chat-box\">\n <div class=\"chat-output\" id=\"chat-output\"></div>\n </div>\n </div>\n \"\"\"\n st.session_state.chat_placeholder = st.empty()\n st.session_state.chat_history = \"\"\n st.session_state.chat_loader = 0"
},
{
"identifier": "fetch_content",
"path": "news.py",
"snippet": "def fetch_content():\n \"\"\"Fetch content for the current session.\"\"\"\n ids, batch = st.session_state.personalized.batch(st.session_state.session)\n st.session_state.batches += batch\n st.session_state.ids += ids"
},
{
"identifier": "chat",
"path": "chat_tools/kernel.py",
"snippet": "def chat(model, prompt, message):\n if \"chat_history\" not in st.session_state:\n st.session_state[\"chat_history\"] = \"\"\n context, user_interaction = generate_session_context(st.session_state)\n runnable = prompt | model | StrOutputParser()\n soup = BeautifulSoup(st.session_state.html_content, 'html.parser')\n chat_output = soup.find(id='chat-output')\n if \"init\" in st.session_state:\n user_div = soup.new_tag(\"div\", **{'class': 'user-message'})\n user_div.string = f\"{st.session_state.username}: {message}\"\n chat_output.append(user_div)\n stream_handler = StreamHandler(soup=soup, chat_output=chat_output)\n strategy = no_history if st.session_state[\"chat_history\"] == \"\" else has_history\n answer = runnable.invoke(\n ({\"context\": context,\n \"user_interaction\": user_interaction,\n \"user_input\": message,\n \"chat_history\": st.session_state[\"chat_history\"],\n \"strategy\": strategy}),\n config={\"callbacks\": [stream_handler]})\n st.session_state[\"chat_history\"] += f\"\\nUser:> {message}\\nChatBot:> {answer}\\n\""
},
{
"identifier": "setup_chat_with_memory",
"path": "chat_tools/kernel.py",
"snippet": "def setup_chat_with_memory():\n sk_prompt = \"\"\" \nAvoid using \"Answer:\" or \"Chatbot>\" as a response header. Responses should be concise, not exceeding 250 tokens.\n\nUser preferences is user's interaction with the articles. Use the articles that the user has liked for tailored recommendations.\nRelevant Articles for Context and Suggestions is the articles which exctracted user has liked. Use the articles that the more exploration and alternative.\nPrior Conversation Record is the previous chat history. This one is least important for user interests. Use that for engagement and continuity.\nUpcoming Chatbot Response will focus on is the strategy for the upcoming response. Use that for learn about user state on product experience.\n\nUser Preferences:\n{user_interaction}\n\nRelevant Articles for Context and Suggestions:\n{context}\n\nPrior Conversation Record:\n{chat_history}\n\nUser Inquiry:\n{user_input}\n\nUpcoming Chatbot Response will focus on:\n{strategy}\n\"\"\".strip()\n\n prompt = PromptTemplate(\n template=sk_prompt, input_variables=[\"context\", \"user_input\", \"chat_history\", \"user_interaction\", \"strategy\"]\n )\n chain = ChatOpenAI(model_name=\"gpt-4-1106-preview\", temperature=0.8, streaming=True, max_tokens=512)\n\n return chain, prompt"
},
{
"identifier": "css_",
"path": "markdowns/markdowns_chat.py",
"snippet": ""
}
] | import streamlit as st
from firstbatch import AlgorithmLabel
from pydantic import BaseModel
from news import CUSTOM_ALGO_ID, initialize_session, fetch_content
from chat_tools.kernel import chat, setup_chat_with_memory
from markdowns.markdowns_chat import css_, sidebar | 1,793 |
# Pydantic models
class SessionData(BaseModel):
username: str
class PersonalizeData(BaseModel):
message: str
class SignalData(BaseModel):
sessionID: dict
id: str
def get_user_input():
return st.sidebar.text_input("Username/Session Name", st.session_state.get("username", ""))
def update_session_state(user_input):
st.session_state.session = st.session_state.personalized.session(
AlgorithmLabel.CUSTOM, vdbid="rss_db", custom_id=CUSTOM_ALGO_ID
)
st.session_state.batches = []
st.session_state.ids = []
st.session_state.likes = []
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches += batch
st.session_state.ids += ids
st.session_state.username = user_input
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_history = ""
st.session_state.chat_loader = 3
def display_sidebar():
user_input = get_user_input()
if user_input and st.session_state.get("username") != user_input:
update_session_state(user_input)
initialize_session(user_input)
fetch_content()
st.sidebar.title("Personalized AI Agent")
st.sidebar.markdown(sidebar)
def chat_init():
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
if "username" not in st.session_state:
st.session_state.username = ""
if "init" not in st.session_state:
if st.session_state.loading == 1:
st.session_state.ids, st.session_state.batches = st.session_state.personalized.batch(st.session_state.session)
chat(model=model, prompt=prompt, message="Hello!")
st.session_state.init = True
def submit():
st.session_state.test_st = st.session_state.user_input
st.session_state.user_input = ''
def display_box():
st.markdown(css_, unsafe_allow_html=True)
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_placeholder.markdown(st.session_state.html_content, unsafe_allow_html=True)
st.text_input("User Input", key="user_input", on_change=submit())
if "username" not in st.session_state:
st.session_state.username = ""
if st.session_state.test_st != "":
print("User input changed")
if st.session_state.chat_loader > 2:
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches = batch
st.session_state.ids = ids
st.session_state.chat_loader = 0
st.session_state.chat_loader += 1
chat(model=model, prompt=prompt, message=st.session_state.test_st)
st.session_state.test_st = ""
if __name__ == '__main__':
if 'user_input' not in st.session_state:
st.session_state.user_input = ''
st.session_state.chat_loader = 0
st.session_state.chat_placeholder = st.empty()
|
# Pydantic models
class SessionData(BaseModel):
username: str
class PersonalizeData(BaseModel):
message: str
class SignalData(BaseModel):
sessionID: dict
id: str
def get_user_input():
return st.sidebar.text_input("Username/Session Name", st.session_state.get("username", ""))
def update_session_state(user_input):
st.session_state.session = st.session_state.personalized.session(
AlgorithmLabel.CUSTOM, vdbid="rss_db", custom_id=CUSTOM_ALGO_ID
)
st.session_state.batches = []
st.session_state.ids = []
st.session_state.likes = []
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches += batch
st.session_state.ids += ids
st.session_state.username = user_input
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_history = ""
st.session_state.chat_loader = 3
def display_sidebar():
user_input = get_user_input()
if user_input and st.session_state.get("username") != user_input:
update_session_state(user_input)
initialize_session(user_input)
fetch_content()
st.sidebar.title("Personalized AI Agent")
st.sidebar.markdown(sidebar)
def chat_init():
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
if "username" not in st.session_state:
st.session_state.username = ""
if "init" not in st.session_state:
if st.session_state.loading == 1:
st.session_state.ids, st.session_state.batches = st.session_state.personalized.batch(st.session_state.session)
chat(model=model, prompt=prompt, message="Hello!")
st.session_state.init = True
def submit():
st.session_state.test_st = st.session_state.user_input
st.session_state.user_input = ''
def display_box():
st.markdown(css_, unsafe_allow_html=True)
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_placeholder.markdown(st.session_state.html_content, unsafe_allow_html=True)
st.text_input("User Input", key="user_input", on_change=submit())
if "username" not in st.session_state:
st.session_state.username = ""
if st.session_state.test_st != "":
print("User input changed")
if st.session_state.chat_loader > 2:
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches = batch
st.session_state.ids = ids
st.session_state.chat_loader = 0
st.session_state.chat_loader += 1
chat(model=model, prompt=prompt, message=st.session_state.test_st)
st.session_state.test_st = ""
if __name__ == '__main__':
if 'user_input' not in st.session_state:
st.session_state.user_input = ''
st.session_state.chat_loader = 0
st.session_state.chat_placeholder = st.empty() | model, prompt = setup_chat_with_memory() | 4 | 2023-11-07 12:51:01+00:00 | 4k |
m4rkw/monzo-utils | monzo_utils/model/payment.py | [
{
"identifier": "Config",
"path": "monzo_utils/lib/config.py",
"snippet": "class Config(metaclass=Singleton):\n\n def __init__(self, config=None, config_path=None):\n if config_path is None:\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n config_path = f\"{homedir}/.monzo\"\n\n if not os.path.exists(config_path):\n os.mkdir(config_path, 0o755)\n\n self.config_file = f\"{config_path}/config.yaml\"\n\n if config:\n self.config = config\n else:\n if not os.path.exists(self.config_file):\n sys.stderr.write(f\"config file not found: {self.config_file}, run setup first.\\n\")\n sys.exit(1)\n\n self.config = yaml.safe_load(open(self.config_file).read())\n\n\n def __getattr__(self, name):\n if name in self.config:\n return self.config[name]\n\n return object.__getattribute__(self, name)\n\n\n def set(self, key, value):\n self.config[key] = value\n\n\n @property\n def keys(self):\n return self.config.keys()\n\n\n def save(self):\n with open(self.config_file, 'w') as f:\n f.write(yaml.dump(self.config))"
},
{
"identifier": "Transaction",
"path": "monzo_utils/model/transaction.py",
"snippet": "class Transaction(BaseModel):\n\n DISPLAY_KEYS = ['date','type','money_in','money_out','pending','description']\n RELATIONSHIPS = {\n 'account': ['`transaction`.account_id', 'account.id'],\n 'transaction_metadata': ['`transaction`.id', 'transaction_metadata.transaction_id'],\n 'pot': ['`transaction`.pot_id', 'pot.id']\n }"
},
{
"identifier": "Transactions",
"path": "monzo_utils/lib/transactions.py",
"snippet": "class Transactions(metaclass=Singleton):\n seen = {}"
}
] | import re
import datetime
from monzo_utils.lib.config import Config
from monzo_utils.model.transaction import Transaction
from monzo_utils.lib.transactions import Transactions | 1,972 | if 'yearly_month' in self.payment_config:
if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:
return 'SKIPPED'
if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:
return 'SKIPPED'
if self.last_date and self.last_date >= self.last_salary_date:
return 'PAID'
if self.due_date and self.due_date >= self.next_salary_date:
return 'SKIPPED'
return 'DUE'
@property
def payment_type(self):
return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')
@property
def num_paid(self):
return None
@property
def num_total(self):
if 'months' in self.payment_config:
return self.payment_config['months']
return None
@property
def remaining(self):
pass
@property
def display_amount(self):
today = datetime.datetime.now()
today = datetime.date(today.year, today.month, today.day)
if 'last_amount_overrides' in Config().keys and \
self.payment_config['name'] in Config().last_amount_overrides and \
self.last_salary_amount in Config().last_amount_overrides[self.payment_config['name']]:
amount = Config().last_amount_overrides[self.payment_config['name']][self.last_salary_amount]
elif 'renewal' in self.payment_config and (today >= self.payment_config['renewal']['date'] or self.status == 'PAID'):
if 'first_payment' in self.payment_config['renewal'] and today <= self.payment_config['renewal']['date']:
amount = self.payment_config['renewal']['first_payment']
else:
if self.last_date >= self.payment_config['renewal']['date']:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['renewal']['amount']
elif self.last_payment:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['amount']
if self.transaction_type == 'money_in':
return 0 - amount
return amount
@property
def last_date(self):
if 'last_date' in self.cache:
return self.cache['last_date']
if 'last_date_overrides' in self.config and \
self.payment_config['name'] in self.config['last_date_overrides'] and \
self.last_salary_date in self.config['last_date_overrides'][self.payment_config['name']]:
self.cache['last_date'] = self.config['last_date_overrides'][self.payment_config['name']][self.last_salary_date]
return self.cache['last_date']
if 'desc' not in self.payment_config:
self.cache['last_date'] = None
return self.cache['last_date']
if self.last_payment:
self.cache['last_date'] = self.last_payment.date
else:
if self.older_last_payment is not None:
self.cache['last_date'] = self.older_last_payment.date
else:
self.cache['last_date'] = None
return self.cache['last_date']
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
if 'desc' not in self.payment_config:
self.payment_config['desc'] = type(self).__name__
where=[{'clause': self.transaction_type + ' > %s', 'params': [0]}]
if 'start_date' in self.payment_config:
where.append({
'clause': '`date` >= %s',
'params': [self.payment_config['start_date']]
})
if self.always_fixed or 'fixed' in self.payment_config and self.payment_config['fixed']:
method_name = f"find_all_by_declined_and_{self.transaction_type}_and_description"
|
class Payment:
transaction_type = 'money_out'
always_fixed = False
def __init__(self, config, payment_list_config, payment_config, last_salary_date, next_salary_date, following_salary_date):
self.config = config
self.payment_list_config = payment_list_config
self.payment_config = payment_config
self.last_salary_date = last_salary_date
self.next_salary_date = next_salary_date
self.following_salary_date = following_salary_date
self.today = datetime.datetime.now()
self.cache = {}
def data(self, abbreviate=False):
if self.num_paid is not None:
suffix = '%d/%d' % (
self.num_paid,
self.num_total
)
else:
suffix = ''
if self.remaining is not None:
remaining = self.remaining
else:
remaining = None
return {
'status': self.status,
'payment_type': self.payment_type if abbreviate is False else self.abbreviate(self.payment_type),
'name': self.name,
'suffix': suffix,
'amount': self.display_amount,
'remaining': remaining,
'last_date': self.short_date(self.last_date) if abbreviate else self.last_date,
'due_date': self.short_date(self.due_date) if abbreviate else self.due_date
}
def abbreviate(self, string):
abbreviated = ''
for i in range(0, len(string)):
if string[i].isupper():
abbreviated += string[i]
return abbreviated
def short_date(self, date):
if not date:
return None
return date.strftime('%d/%m/%y')
def display(self):
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
data['status'].rjust(7),
data['payment_type'].ljust(15),
data['name'].ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (data['amount'])).ljust(8),
('£%.2f' % (data['remaining'])).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def name(self):
return self.payment_config['name']
@property
def status(self):
if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'yearly_month' in self.payment_config:
if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:
return 'SKIPPED'
if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:
return 'SKIPPED'
if self.last_date and self.last_date >= self.last_salary_date:
return 'PAID'
if self.due_date and self.due_date >= self.next_salary_date:
return 'SKIPPED'
return 'DUE'
@property
def payment_type(self):
return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')
@property
def num_paid(self):
return None
@property
def num_total(self):
if 'months' in self.payment_config:
return self.payment_config['months']
return None
@property
def remaining(self):
pass
@property
def display_amount(self):
today = datetime.datetime.now()
today = datetime.date(today.year, today.month, today.day)
if 'last_amount_overrides' in Config().keys and \
self.payment_config['name'] in Config().last_amount_overrides and \
self.last_salary_amount in Config().last_amount_overrides[self.payment_config['name']]:
amount = Config().last_amount_overrides[self.payment_config['name']][self.last_salary_amount]
elif 'renewal' in self.payment_config and (today >= self.payment_config['renewal']['date'] or self.status == 'PAID'):
if 'first_payment' in self.payment_config['renewal'] and today <= self.payment_config['renewal']['date']:
amount = self.payment_config['renewal']['first_payment']
else:
if self.last_date >= self.payment_config['renewal']['date']:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['renewal']['amount']
elif self.last_payment:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['amount']
if self.transaction_type == 'money_in':
return 0 - amount
return amount
@property
def last_date(self):
if 'last_date' in self.cache:
return self.cache['last_date']
if 'last_date_overrides' in self.config and \
self.payment_config['name'] in self.config['last_date_overrides'] and \
self.last_salary_date in self.config['last_date_overrides'][self.payment_config['name']]:
self.cache['last_date'] = self.config['last_date_overrides'][self.payment_config['name']][self.last_salary_date]
return self.cache['last_date']
if 'desc' not in self.payment_config:
self.cache['last_date'] = None
return self.cache['last_date']
if self.last_payment:
self.cache['last_date'] = self.last_payment.date
else:
if self.older_last_payment is not None:
self.cache['last_date'] = self.older_last_payment.date
else:
self.cache['last_date'] = None
return self.cache['last_date']
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
if 'desc' not in self.payment_config:
self.payment_config['desc'] = type(self).__name__
where=[{'clause': self.transaction_type + ' > %s', 'params': [0]}]
if 'start_date' in self.payment_config:
where.append({
'clause': '`date` >= %s',
'params': [self.payment_config['start_date']]
})
if self.always_fixed or 'fixed' in self.payment_config and self.payment_config['fixed']:
method_name = f"find_all_by_declined_and_{self.transaction_type}_and_description"
| transactions = getattr(Transaction(), method_name)( | 1 | 2023-11-05 12:48:18+00:00 | 4k |
rossiyareich/inknhue | test.py | [
{
"identifier": "ConditionalAutoencoder",
"path": "src/conditional/conditional_autoencoder.py",
"snippet": "class ConditionalAutoencoder(nn.Module):\n def __init__(\n self,\n emb_channels: int,\n z_channels: int,\n channels: int,\n channel_multipliers: List[int],\n n_resnet_blocks: int,\n in_channels: int,\n out_channels: int,\n ) -> None:\n super().__init__()\n self.encoder = Encoder(\n channels=channels,\n channel_multipliers=channel_multipliers,\n n_resnet_blocks=n_resnet_blocks,\n in_channels=in_channels,\n z_channels=z_channels,\n )\n self.cond_decoder = ConditionalDecoder(\n channels=channels,\n channel_multipliers=channel_multipliers,\n n_resnet_blocks=n_resnet_blocks,\n out_channels=out_channels,\n z_channels=z_channels,\n )\n self.cond_encoder = ConditionalEncoder(\n channels=channels,\n channel_multipliers=channel_multipliers,\n n_resnet_blocks=n_resnet_blocks,\n in_channels=in_channels,\n )\n self.quant_conv = nn.Conv2d(2 * z_channels, 2 * emb_channels, 1)\n self.post_quant_conv = nn.Conv2d(emb_channels, z_channels, 1)\n\n def encode(self, img: torch.Tensor) -> GaussianDistribution:\n z = self.encoder(img)\n moments = self.quant_conv(z)\n return GaussianDistribution(moments)\n\n def encode_cond(self, cond: torch.Tensor) -> List[torch.Tensor]:\n conds_z = self.cond_encoder(cond)\n return conds_z\n\n def decode(self, z: torch.Tensor, conds_z: List[torch.Tensor]) -> torch.Tensor:\n z = self.post_quant_conv(z)\n return self.cond_decoder(z, conds_z)\n\n @classmethod\n def load_from_saved(cls, pretrained_ckpt, pretrained_yaml, checkpoint_ckpt):\n pretrained_ckpt = torch.load(pretrained_ckpt)\n pretrained_yaml = OmegaConf.load(pretrained_yaml)\n checkpoint_ckpt = torch.load(checkpoint_ckpt)\n\n cond_autoencoder = cls(\n emb_channels=pretrained_yaml.params.embed_dim,\n z_channels=pretrained_yaml.params.ddconfig.z_channels,\n channels=pretrained_yaml.params.ddconfig.ch,\n channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,\n n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,\n in_channels=pretrained_yaml.params.ddconfig.in_channels,\n out_channels=pretrained_yaml.params.ddconfig.out_ch,\n )\n\n quant_conv_state_dict = {}\n post_quant_conv_state_dict = {}\n encoder_state_dict = {}\n cond_encoder_state_dict = {}\n cond_decoder_state_dict = {}\n\n for k, v in pretrained_ckpt[\"state_dict\"].items():\n if k.startswith(\"quant_conv\"):\n quant_conv_state_dict[k.replace(\"quant_conv.\", \"\", 1)] = v\n elif k.startswith(\"post_quant_conv\"):\n post_quant_conv_state_dict[k.replace(\"post_quant_conv.\", \"\", 1)] = v\n elif k.startswith(\"encoder\"):\n encoder_state_dict[k.replace(\"encoder.\", \"\", 1)] = v\n elif k.startswith(\"decoder\") or k.startswith(\"loss\"):\n continue\n else:\n raise KeyError(f\"Unexpected state_dict key: {k}\")\n\n cond_encoder_state_dict = checkpoint_ckpt[\"cond_encoder_state_dict\"]\n cond_decoder_state_dict = checkpoint_ckpt[\"cond_decoder_state_dict\"]\n\n cond_autoencoder.quant_conv.load_state_dict(quant_conv_state_dict, strict=True)\n cond_autoencoder.post_quant_conv.load_state_dict(\n post_quant_conv_state_dict, strict=True\n )\n cond_autoencoder.encoder.load_state_dict(encoder_state_dict, strict=True)\n cond_autoencoder.cond_encoder.load_state_dict(\n cond_encoder_state_dict, strict=True\n )\n cond_autoencoder.cond_decoder.load_state_dict(\n cond_decoder_state_dict, strict=True\n )\n\n return cond_autoencoder"
},
{
"identifier": "ConditionalTestDataset",
"path": "src/conditional/conditional_test_dataset.py",
"snippet": "class ConditionalTestDataset(Dataset):\n def __init__(self, dataset_path, transform=None):\n self.dataset_path = dataset_path\n self.transform = transform\n self.cond_dataset = []\n\n grayscale = get_entries(f\"{dataset_path}/grayscale/*.png\")\n style2paints = get_entries(f\"{dataset_path}/style2paints/*.png\")\n\n assert len(grayscale) == len(style2paints)\n\n for g, s in zip(grayscale, style2paints):\n self.cond_dataset.append({\"grayscale\": g, \"style2paints\": s})\n\n def __len__(self):\n return len(self.cond_dataset)\n\n def __getitem__(self, idx):\n g = Image.open(self.cond_dataset[idx][\"grayscale\"]).convert(\"RGB\")\n s = Image.open(self.cond_dataset[idx][\"style2paints\"]).convert(\"RGB\")\n\n if self.transform is not None:\n return self.transform(g, s)\n\n return g, s"
},
{
"identifier": "resize_max",
"path": "src/utils.py",
"snippet": "def resize_max(img, max_size):\n w, h = img.size\n if w > max_size or h > max_size:\n if w <= h:\n h = int(float(h) * float(max_size) / float(w))\n w = max_size\n else:\n w = int(float(w) * float(max_size) / float(h))\n h = max_size\n img = img.resize((w, h), Image.Resampling.LANCZOS)\n\n return img"
}
] | import argparse
import gc
import logging
import os
import numpy as np
import torch
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image
from rich.traceback import install
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from tqdm.auto import tqdm
from src.conditional.conditional_autoencoder import ConditionalAutoencoder
from src.conditional.conditional_test_dataset import ConditionalTestDataset
from src.utils import resize_max | 1,823 |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--conf_path",
type=str,
required=True,
help="Path to the configuration file",
)
args = parser.parse_args()
return args
@torch.no_grad()
def main(args):
# Load configuration
logging.info("Loading configuration")
conf = OmegaConf.load(args.conf_path)
# Create output directory
logging.info("Creating output directory")
os.makedirs(conf.paths.results_path, exist_ok=True)
# Setup models
logging.info("Setting up models")
cond_autoencoder = ConditionalAutoencoder.load_from_saved(
conf.paths.pretrained_ckpt,
conf.paths.pretrained_yaml,
conf.paths.conditional_ckpt,
).to(device="cuda", dtype=torch.bfloat16)
cond_autoencoder.eval()
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s):
g, s = resize_max(g, conf.params.max_size), resize_max(s, conf.params.max_size)
g = g.resize(
(((g.size[0] + 7) // 8) * 8, ((g.size[1] + 7) // 8) * 8),
Image.Resampling.LANCZOS,
)
s = s.resize(g.size, Image.Resampling.LANCZOS)
pil_to_tensor = transforms.PILToTensor()
g, s = pil_to_tensor(g), pil_to_tensor(s)
g, s = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
g, s = g.to(device="cuda", dtype=torch.bfloat16), s.to(
device="cuda", dtype=torch.bfloat16
)
return g, s
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--conf_path",
type=str,
required=True,
help="Path to the configuration file",
)
args = parser.parse_args()
return args
@torch.no_grad()
def main(args):
# Load configuration
logging.info("Loading configuration")
conf = OmegaConf.load(args.conf_path)
# Create output directory
logging.info("Creating output directory")
os.makedirs(conf.paths.results_path, exist_ok=True)
# Setup models
logging.info("Setting up models")
cond_autoencoder = ConditionalAutoencoder.load_from_saved(
conf.paths.pretrained_ckpt,
conf.paths.pretrained_yaml,
conf.paths.conditional_ckpt,
).to(device="cuda", dtype=torch.bfloat16)
cond_autoencoder.eval()
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s):
g, s = resize_max(g, conf.params.max_size), resize_max(s, conf.params.max_size)
g = g.resize(
(((g.size[0] + 7) // 8) * 8, ((g.size[1] + 7) // 8) * 8),
Image.Resampling.LANCZOS,
)
s = s.resize(g.size, Image.Resampling.LANCZOS)
pil_to_tensor = transforms.PILToTensor()
g, s = pil_to_tensor(g), pil_to_tensor(s)
g, s = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
g, s = g.to(device="cuda", dtype=torch.bfloat16), s.to(
device="cuda", dtype=torch.bfloat16
)
return g, s
| cond_dataset = cond_dataset_full = ConditionalTestDataset( | 1 | 2023-11-03 09:35:30+00:00 | 4k |
TencentBlueKing/bkflow-feel | bkflow_feel/parsers.py | [
{
"identifier": "RangeGroupData",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupData(BaseModel):\n left_val: Any\n right_val: Any\n left_operator: RangeGroupOperator\n right_operator: RangeGroupOperator"
},
{
"identifier": "RangeGroupOperator",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupOperator(enum.Enum):\n GT = \"greater than\"\n GTE = \"greater than or equal\"\n LT = \"less than\"\n LTE = \"less than or equal\""
},
{
"identifier": "FEELFunctionsManager",
"path": "bkflow_feel/utils.py",
"snippet": "class FEELFunctionsManager:\n __hub = {}\n\n @classmethod\n def register_invocation_cls(cls, invocation_cls):\n func_name = invocation_cls.Meta.func_name\n existed_invocation_cls = cls.__hub.get(func_name)\n if existed_invocation_cls:\n raise RuntimeError(\n \"func register error, {}'s func_name {} conflict with {}\".format(\n existed_invocation_cls, func_name, invocation_cls\n )\n )\n\n cls.__hub[func_name] = invocation_cls\n\n @classmethod\n def register_funcs(cls, func_dict):\n for func_name, func_path in func_dict.items():\n if not isinstance(func_name, str):\n raise ValueError(f\"func_name {func_name} should be string\")\n if func_name in cls.__hub:\n raise ValueError(\n \"func register error, {}'s func_name {} conflict with {}\".format(\n func_path, func_name, cls.__hub[func_name]\n )\n )\n cls.__hub[func_name] = func_path\n\n @classmethod\n def clear(cls):\n cls.__hub = {}\n\n @classmethod\n def all_funcs(cls):\n funcs = {}\n for version, invocation_cls in cls.__hub.items():\n funcs[version] = invocation_cls\n return funcs\n\n @classmethod\n def get_func(cls, func_name) -> Callable:\n func_obj = cls.__hub.get(func_name)\n if not func_obj:\n raise ValueError(\"func object {} not found\".format(func_name))\n\n if isinstance(func_obj, FEELInvocationMeta):\n return func_obj()\n else:\n module_path, func_name = str(func_obj).rsplit(\".\", 1)\n module = importlib.import_module(module_path)\n func = getattr(module, func_name)\n return func\n\n @classmethod\n def func_call(cls, func_name, *args, **kwargs):\n func = cls.get_func(func_name)\n return func(*args, **kwargs)"
},
{
"identifier": "BinaryOperationValidator",
"path": "bkflow_feel/validators.py",
"snippet": "class BinaryOperationValidator(Validator):\n def validate(self, left_item, right_item, instance_type=None, *args, **kwargs) -> ValidationResult:\n if not isinstance(left_item, type(right_item)):\n return ValidationResult(\n False, f\"Type of both operators must be same, get {type(left_item)} and {type(right_item)}\",\n )\n if instance_type is not None and not isinstance(left_item, instance_type):\n return ValidationResult(\n False, f\"Type of both operators must be {instance_type}, get {type(left_item)} and {type(right_item)}\",\n )\n return ValidationResult(True)"
},
{
"identifier": "DummyValidator",
"path": "bkflow_feel/validators.py",
"snippet": "class DummyValidator(Validator):\n def validate(self, *args, **kwargs):\n pass"
},
{
"identifier": "ListsLengthValidator",
"path": "bkflow_feel/validators.py",
"snippet": "class ListsLengthValidator(Validator):\n def validate(self, lists, *args, **kwargs):\n if not lists or all(len(alist) == len(lists[0]) for alist in lists):\n return ValidationResult(True)\n return ValidationResult(False, \"lists length not equal\")"
}
] | import abc
import datetime
import logging
import re
import pytz
from dateutil.parser import parse as date_parse
from .data_models import RangeGroupData, RangeGroupOperator
from .utils import FEELFunctionsManager
from .validators import BinaryOperationValidator, DummyValidator, ListsLengthValidator | 2,333 | self.value = value
def evaluate(self, context):
return self.key.evaluate(context), self.value.evaluate(context)
class Context(Expression):
def __init__(self, pairs):
self.pairs = pairs
def evaluate(self, context):
return dict(pair.evaluate(context) for pair in self.pairs)
class ContextItem(Expression):
def __init__(self, expr, keys):
self.expr = expr
self.keys = keys
def evaluate(self, context):
result = self.expr.evaluate(context)
for key in self.keys:
if not isinstance(result, dict):
return None
result = result.get(key)
return result
class Variable(Expression):
def __init__(self, name):
self.name = name
def evaluate(self, context):
return context.get(self.name)
class FunctionCall(Expression):
def __init__(self, name, args):
self.name = name
self.args = args
def evaluate(self, context):
function = context.get(self.name)
if function is None:
raise ValueError(f"Unknown function: {self.name}")
return function(*[arg.evaluate(context) for arg in self.args])
class BinaryOperator(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
class SameTypeBinaryOperator(BinaryOperator):
validator_cls = BinaryOperationValidator
def __init__(self, operation, left, right):
super().__init__(left, right)
self.operation = operation
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
self.validator_cls()(left_val, right_val)
return getattr(self, self.operation)(left_val, right_val)
def add(self, left_val, right_val):
return left_val + right_val
def subtract(self, left_val, right_val):
return left_val - right_val
def multiply(self, left_val, right_val):
return left_val * right_val
def divide(self, left_val, right_val):
return left_val / right_val
def power(self, left_val, right_val):
return left_val**right_val
def equal(self, left_val, right_val):
return left_val == right_val
def less_than(self, left_val, right_val):
return left_val < right_val
def greater_than(self, left_val, right_val):
return left_val > right_val
def less_than_or_equal(self, left_val, right_val):
return left_val <= right_val
def greater_than_or_equal(self, left_val, right_val):
return left_val >= right_val
class NotEqual(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) != self.right.evaluate(context)
class And(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
class Or(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
class In(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
if isinstance(self.right, RangeGroup):
left_operation = (
left_val > right_val.left_val
| # -*- coding: utf-8 -*-
logger = logging.getLogger(__name__)
class Expression(metaclass=abc.ABCMeta):
validator_cls = DummyValidator
@abc.abstractmethod
def evaluate(self, context):
pass
class CommonExpression(Expression):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
class Expr(CommonExpression):
def evaluate(self, context):
return self.value.evaluate(context)
class Number(CommonExpression):
pass
class String(CommonExpression):
pass
class Boolean(CommonExpression):
pass
class Null(Expression):
def evaluate(self, context):
return None
class List(Expression):
def __init__(self, *items):
self.items = items
def evaluate(self, context):
return [item.evaluate(context) for item in self.items]
class ListItem(Expression):
def __init__(self, list_expr, index):
self.list_expr = list_expr
self.index = index
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list) or self.index == 0 or len(items) < abs(self.index):
return None
items = items[self.index - 1] if self.index > 0 else items[self.index]
return items
class ListMatch(Expression):
validator_cls = ListsLengthValidator
def __init__(self, iter_pairs, expr):
self.iter_pairs = iter_pairs
self.expr = expr
def evaluate_and_validate_iter_pairs(self, context):
iter_pairs = [(pair[0].value, pair[1].evaluate(context)) for pair in self.iter_pairs]
self.validator_cls()(lists=[pair[1] for pair in iter_pairs])
return iter_pairs
class ListEvery(ListMatch):
def evaluate(self, context):
iter_pairs = self.evaluate_and_validate_iter_pairs(context)
for i in range(0, len(iter_pairs[0][1])):
tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}
if self.expr.evaluate(tmp_context) is False:
return False
return True
class ListSome(ListMatch):
def evaluate(self, context):
iter_pairs = self.evaluate_and_validate_iter_pairs(context)
for i in range(0, len(iter_pairs[0][1])):
tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}
if self.expr.evaluate(tmp_context) is True:
return True
return False
class ListFilter(Expression):
def __init__(self, list_expr, filter_expr):
self.list_expr = list_expr
self.filter_expr = filter_expr
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list):
return None
result = []
for item in items:
try:
# 当 item 为 dict 且 filter 中对比的 key 缺失时,可能报错
if self.filter_expr.evaluate(item if isinstance(item, dict) else {"item": item}):
result.append(item)
except Exception as e:
logger.exception(e)
pass
return result
class Pair(Expression):
def __init__(self, key, value):
self.key = key
self.value = value
def evaluate(self, context):
return self.key.evaluate(context), self.value.evaluate(context)
class Context(Expression):
def __init__(self, pairs):
self.pairs = pairs
def evaluate(self, context):
return dict(pair.evaluate(context) for pair in self.pairs)
class ContextItem(Expression):
def __init__(self, expr, keys):
self.expr = expr
self.keys = keys
def evaluate(self, context):
result = self.expr.evaluate(context)
for key in self.keys:
if not isinstance(result, dict):
return None
result = result.get(key)
return result
class Variable(Expression):
def __init__(self, name):
self.name = name
def evaluate(self, context):
return context.get(self.name)
class FunctionCall(Expression):
def __init__(self, name, args):
self.name = name
self.args = args
def evaluate(self, context):
function = context.get(self.name)
if function is None:
raise ValueError(f"Unknown function: {self.name}")
return function(*[arg.evaluate(context) for arg in self.args])
class BinaryOperator(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
class SameTypeBinaryOperator(BinaryOperator):
validator_cls = BinaryOperationValidator
def __init__(self, operation, left, right):
super().__init__(left, right)
self.operation = operation
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
self.validator_cls()(left_val, right_val)
return getattr(self, self.operation)(left_val, right_val)
def add(self, left_val, right_val):
return left_val + right_val
def subtract(self, left_val, right_val):
return left_val - right_val
def multiply(self, left_val, right_val):
return left_val * right_val
def divide(self, left_val, right_val):
return left_val / right_val
def power(self, left_val, right_val):
return left_val**right_val
def equal(self, left_val, right_val):
return left_val == right_val
def less_than(self, left_val, right_val):
return left_val < right_val
def greater_than(self, left_val, right_val):
return left_val > right_val
def less_than_or_equal(self, left_val, right_val):
return left_val <= right_val
def greater_than_or_equal(self, left_val, right_val):
return left_val >= right_val
class NotEqual(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) != self.right.evaluate(context)
class And(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
class Or(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
class In(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
if isinstance(self.right, RangeGroup):
left_operation = (
left_val > right_val.left_val | if right_val.left_operator == RangeGroupOperator.GT | 1 | 2023-11-09 13:47:26+00:00 | 4k |
namedgraph/oxijen | oxijen/model_impl/impl.py | [
{
"identifier": "Resource",
"path": "oxijen/rdf_model.py",
"snippet": "class Resource(ABC):\n\n @property\n def node(self):\n return self._node\n\n @property\n def graph(self):\n return self._graph\n\n @property\n def is_anon(self):\n if isinstance(self.node, NamedNode):\n return False\n else:\n return True\n \n @property\n def uri(self):\n if isinstance(self.node, NamedNode):\n return self.node.value\n else:\n return None\n\n @property\n def id(self):\n if isinstance(self.node, BlankNode):\n return self.node.value\n else:\n return None\n \n @abstractmethod\n def add_property(self, property: 'Property', value: Union['Resource', Literal]) -> 'Resource':\n pass\n\n @abstractmethod\n def remove_all(self, property: 'Property') -> 'Resource':\n pass\n\n @abstractmethod\n def list_properties(self, property: 'Property') -> Iterator[Triple]:\n pass"
},
{
"identifier": "Property",
"path": "oxijen/rdf_model.py",
"snippet": "class Property(Resource):\n\n pass"
},
{
"identifier": "Graph",
"path": "oxijen/rdf_model.py",
"snippet": "class Graph(ABC):\n\n @abstractmethod\n def __len__(self) -> int:\n pass\n\n @abstractmethod\n def create_resource(self, uri: Optional[str] = None) -> Resource:\n pass\n\n @abstractmethod\n def create_property(self, uri: str) -> Property:\n pass\n\n @abstractmethod\n def create_literal(self, value: str, language: Optional[str] = None) -> Literal:\n pass\n\n @abstractmethod\n def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:\n pass\n\n @abstractmethod\n def list_subjects(self) -> Iterator[Resource]:\n pass\n\n @abstractmethod\n def list_triples(self) -> Iterator[Triple]:\n pass\n\n @abstractmethod\n def add(self, triples: Union[Iterator[Triple], 'Graph']) -> 'Graph':\n pass\n\n @abstractmethod\n def remove_all(self ) -> 'Graph':\n pass"
},
{
"identifier": "Dataset",
"path": "oxijen/rdf_model.py",
"snippet": "class Dataset(ABC):\n\n @property\n def default_graph(self):\n pass\n \n @abstractmethod\n def graph_names(self) -> Iterator[Resource]:\n pass\n\n @abstractmethod\n def contains_named_graph(self, name: Union[str, Resource]) -> bool:\n pass\n\n @abstractmethod\n def get_named_graph(self, name: Union[str, Resource]) -> Graph:\n pass\n\n @abstractmethod\n def add_named_graph(self, name: Union[str, Resource], graph: Graph) -> 'Dataset':\n pass\n\n @abstractmethod\n def remove_named_graph(self, name: Union[str, Resource], graph: Graph) -> 'Dataset':\n pass\n\n @abstractmethod\n def list_quads(self) -> Iterator[Quad]:\n pass"
},
{
"identifier": "XSD",
"path": "oxijen/model_impl/xsd.py",
"snippet": "class XSD(Enum):\n\n NS : str = \"http://www.w3.org/2001/XMLSchema#\"\n\n INTEGER = NS + \"integer\"\n STRING = NS + \"string\"\n FLOAT = NS + \"float\""
}
] | from oxijen.rdf_model import Resource, Property, Graph, Dataset
from oxijen.model_impl.xsd import XSD
from pyoxigraph import Store, Triple, BlankNode, NamedNode, Literal, Quad, DefaultGraph
from typing import Iterator, Union, Optional, Any | 1,777 | def __hash__(self):
return hash(self.node.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.node.value == other.node.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return self.node.__str__()
def __repr__(self) -> str:
return self.__str__()
def add_property(self, property: 'Property', value: Union[Resource, Literal]) -> 'Resource':
if isinstance(value, Resource):
value = value.node
self.graph.store.add(Quad(self.node, property.node, value, self.graph.name)) # assumes GraphStoreImpl!
return self
def list_properties(self, property: Optional[Property] = None) -> Iterator[Triple]:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quads = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
return map(lambda quad: quad.triple, quads)
def remove_all(self, property: Optional[Property] = None) -> Resource:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quad_iter = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
for quad in quad_iter:
self.graph.store.remove(quad)
return self
class PropertyImpl(ResourceImpl, Property):
pass
class GraphImpl(Graph):
def create_resource(self, uri: Optional[str] = None) -> Resource:
if uri is not None:
return ResourceImpl(NamedNode(uri), self)
else:
return ResourceImpl(BlankNode(), self)
def create_property(self, uri: str) -> Property:
return ResourceImpl(NamedNode(uri), self)
def create_literal(self, value: str, language: Optional[str] = None) -> Literal:
return Literal(value, language=language) # should it be xsd:string-typed by default as per RDF 1.1?
def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:
if datatype is None:
match value:
case int():
datatype = NamedNode(XSD.INTEGER.value)
case str():
datatype = NamedNode(XSD.STRING.value)
case float():
datatype = NamedNode(XSD.FLOAT.value)
# TO-DO: support more types
case _:
raise TypeError('Unsupported type conversion')
else:
if type(datatype) is str:
datatype = NamedNode(datatype)
return Literal(str(value), datatype=datatype)
class GraphStoreImpl(GraphImpl):
def __init__(self, store: Store, name: Union[BlankNode, NamedNode]):
self.store = store
self.name = name
def __len__(self) -> int:
return len(list(self.list_triples()))
def list_subjects(self) -> Iterator[Resource]:
return iter(set(map(lambda triple: ResourceImpl(triple.subject, self), self.list_triples())))
def list_triples(self) -> Iterator[Triple]:
quads = self.store.quads_for_pattern(None, None, None, self.name)
return map(lambda quad: quad.triple, quads)
def add(self, triples: Union[Iterator[Triple], 'Graph']) -> 'Graph':
if isinstance(triples, Graph):
triples = triples.list_triples()
quads = map(lambda triple: Quad(triple.subject, triple.predicate, triple.object, self.name), triples)
self.store.extend(quads)
return self
def remove_all(self) -> 'Graph':
self.store.remove_graph(self.name)
return self
|
class ResourceImpl(Resource):
def __init__(self, node: Union[BlankNode, NamedNode], graph: Graph):
self._node = node
self._graph = graph
def __hash__(self):
return hash(self.node.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.node.value == other.node.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return self.node.__str__()
def __repr__(self) -> str:
return self.__str__()
def add_property(self, property: 'Property', value: Union[Resource, Literal]) -> 'Resource':
if isinstance(value, Resource):
value = value.node
self.graph.store.add(Quad(self.node, property.node, value, self.graph.name)) # assumes GraphStoreImpl!
return self
def list_properties(self, property: Optional[Property] = None) -> Iterator[Triple]:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quads = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
return map(lambda quad: quad.triple, quads)
def remove_all(self, property: Optional[Property] = None) -> Resource:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quad_iter = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
for quad in quad_iter:
self.graph.store.remove(quad)
return self
class PropertyImpl(ResourceImpl, Property):
pass
class GraphImpl(Graph):
def create_resource(self, uri: Optional[str] = None) -> Resource:
if uri is not None:
return ResourceImpl(NamedNode(uri), self)
else:
return ResourceImpl(BlankNode(), self)
def create_property(self, uri: str) -> Property:
return ResourceImpl(NamedNode(uri), self)
def create_literal(self, value: str, language: Optional[str] = None) -> Literal:
return Literal(value, language=language) # should it be xsd:string-typed by default as per RDF 1.1?
def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:
if datatype is None:
match value:
case int():
datatype = NamedNode(XSD.INTEGER.value)
case str():
datatype = NamedNode(XSD.STRING.value)
case float():
datatype = NamedNode(XSD.FLOAT.value)
# TO-DO: support more types
case _:
raise TypeError('Unsupported type conversion')
else:
if type(datatype) is str:
datatype = NamedNode(datatype)
return Literal(str(value), datatype=datatype)
class GraphStoreImpl(GraphImpl):
def __init__(self, store: Store, name: Union[BlankNode, NamedNode]):
self.store = store
self.name = name
def __len__(self) -> int:
return len(list(self.list_triples()))
def list_subjects(self) -> Iterator[Resource]:
return iter(set(map(lambda triple: ResourceImpl(triple.subject, self), self.list_triples())))
def list_triples(self) -> Iterator[Triple]:
quads = self.store.quads_for_pattern(None, None, None, self.name)
return map(lambda quad: quad.triple, quads)
def add(self, triples: Union[Iterator[Triple], 'Graph']) -> 'Graph':
if isinstance(triples, Graph):
triples = triples.list_triples()
quads = map(lambda triple: Quad(triple.subject, triple.predicate, triple.object, self.name), triples)
self.store.extend(quads)
return self
def remove_all(self) -> 'Graph':
self.store.remove_graph(self.name)
return self
| class DatasetStoreImpl(Dataset): | 3 | 2023-11-03 19:50:51+00:00 | 4k |
sivasurend/lyzr | build/lib/lyzr/utils/chat_utils.py | [
{
"identifier": "LyzrLLMFactory",
"path": "lyzr/base/llm.py",
"snippet": "class LyzrLLMFactory:\n\n def __init__(self) -> None:\n None\n @staticmethod\n def from_defaults(model: str = \"gpt-3.5-turbo\", **kwargs) -> LLM:\n return LiteLLM(model=model, **kwargs)"
},
{
"identifier": "LyzrService",
"path": "lyzr/base/service.py",
"snippet": "class LyzrService:\n @staticmethod\n def from_defaults(\n llm: Optional[LLMType] = \"default\",\n embed_model: Optional[EmbedType] = \"default\",\n system_prompt: str = None,\n query_wrapper_prompt: Union[str, BasePromptTemplate] = None,\n **kwargs,\n ) -> ServiceContext: \n if isinstance(query_wrapper_prompt, str):\n query_wrapper_prompt = PromptTemplate(template=query_wrapper_prompt)\n\n callback_manager: CallbackManager = kwargs.get(\n \"callback_manager\", CallbackManager()\n )\n\n node_parser = SimpleNodeParser.from_defaults(\n chunk_size=750,\n chunk_overlap=100,\n callback_manager=callback_manager,\n )\n\n service_context = ServiceContext.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n callback_manager=callback_manager,\n node_parser=node_parser,\n **kwargs,\n )\n\n return service_context"
},
{
"identifier": "LyzrVectorStoreIndex",
"path": "lyzr/base/vector_store.py",
"snippet": "class LyzrVectorStoreIndex:\n @staticmethod\n def from_defaults(\n vector_store_type: str = \"LanceDBVectorStore\", \n documents: Optional[Sequence[Document]] = None,\n service_context: Optional[ServiceContext] = None,\n **kwargs\n ) -> VectorStoreIndex:\n\n if documents is None and vector_store_type == \"SimpleVectorStore\":\n raise ValueError(\"documents must be provided for SimpleVectorStore\")\n\n vector_store_class = import_vector_store_class(vector_store_type)\n\n if documents is None:\n vector_store = vector_store_class(**kwargs)\n index = VectorStoreIndex.from_vector_store(\n vector_store=vector_store, service_context=service_context\n )\n else:\n if vector_store_type == \"LanceDBVectorStore\":\n kwargs[\"uri\"] = \"./.lancedb\" if \"uri\" not in kwargs else kwargs[\"uri\"]\n kwargs[\"table_name\"] = (\n \"vectors\" if \"table_name\" not in kwargs else kwargs[\"table_name\"]\n )\n vector_store = vector_store_class(**kwargs)\n storage_context = StorageContext.from_defaults(vector_store=vector_store)\n \n index = VectorStoreIndex.from_documents(\n documents=documents,\n storage_context=storage_context,\n service_context=service_context,\n show_progress=True,\n )\n\n return index"
},
{
"identifier": "read_pdf_as_documents",
"path": "lyzr/utils/document_reading.py",
"snippet": "def read_pdf_as_documents(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[str]] = None,\n **kwargs,\n) -> Sequence[Document]:\n file_extractor = {\".pdf\": LyzrPDFReader()}\n\n reader = SimpleDirectoryReader(\n input_dir=input_dir,\n exclude_hidden=exclude_hidden,\n file_extractor=file_extractor,\n input_files=input_files,\n filename_as_id=filename_as_id,\n recursive=recursive,\n required_exts=required_exts,\n **kwargs,\n )\n\n documents = reader.load_data()\n\n logger.info(f\"Found {len(documents)} 'documents'.\")\n return documents"
},
{
"identifier": "read_docx_as_documents",
"path": "lyzr/utils/document_reading.py",
"snippet": "def read_docx_as_documents(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[str]] = None,\n **kwargs,\n) -> Sequence[Document]:\n file_extractor = {\".docx\": LyzrDocxReader()}\n\n reader = SimpleDirectoryReader(\n input_dir=input_dir,\n exclude_hidden=exclude_hidden,\n file_extractor=file_extractor,\n input_files=input_files,\n filename_as_id=filename_as_id,\n recursive=recursive,\n required_exts=required_exts,\n **kwargs,\n )\n\n documents = reader.load_data()\n\n logger.info(f\"Found {len(documents)} 'documents'.\")\n return documents"
},
{
"identifier": "read_txt_as_documents",
"path": "lyzr/utils/document_reading.py",
"snippet": "def read_txt_as_documents(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[str]] = None,\n **kwargs,\n) -> Sequence[Document]:\n file_extractor = {\".txt\": LyzrTxtReader()}\n\n reader = SimpleDirectoryReader(\n input_dir=input_dir,\n exclude_hidden=exclude_hidden,\n file_extractor=file_extractor,\n input_files=input_files,\n filename_as_id=filename_as_id,\n recursive=recursive,\n required_exts=required_exts,\n **kwargs,\n )\n\n documents = reader.load_data()\n\n logger.info(f\"Found {len(documents)} 'documents'.\")\n return documents"
},
{
"identifier": "read_website_as_documents",
"path": "lyzr/utils/document_reading.py",
"snippet": "def read_website_as_documents(url: str) -> List[Document]:\n reader = LyzrWebsiteReader()\n documents = reader.load_data(url)\n return documents"
},
{
"identifier": "read_webpage_as_documents",
"path": "lyzr/utils/document_reading.py",
"snippet": "def read_webpage_as_documents(url: str) -> List[Document]:\n reader = LyzrWebPageReader()\n documents = reader.load_data(url)\n return documents"
},
{
"identifier": "read_youtube_as_documents",
"path": "lyzr/utils/document_reading.py",
"snippet": "def read_youtube_as_documents(\n urls: List[str] = None,\n) -> List[Document]:\n reader = LyzrYoutubeReader()\n documents = reader.load_data(urls)\n return documents"
}
] | from typing import Union, Optional, List
from llama_index.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.embeddings.utils import EmbedType
from lyzr.base.llm import LyzrLLMFactory
from lyzr.base.service import LyzrService
from lyzr.base.vector_store import LyzrVectorStoreIndex
from lyzr.utils.document_reading import (
read_pdf_as_documents,
read_docx_as_documents,
read_txt_as_documents,
read_website_as_documents,
read_webpage_as_documents,
read_youtube_as_documents,
) | 1,907 |
def pdf_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
|
def pdf_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params) | service_context = LyzrService.from_defaults( | 1 | 2023-11-07 14:52:08+00:00 | 4k |
allmonday/pydantic2-resolve | tests/resolver/test_24_parse_to_obj_for_dataclass.py | [
{
"identifier": "Resolver",
"path": "pydantic2_resolve/resolver.py",
"snippet": "class Resolver:\n \"\"\"\n Entrypoint of a resolve action\n \"\"\"\n def __init__(\n self, \n loader_filters: Optional[Dict[Any, Dict[str, Any]]] = None, \n loader_instances: Optional[Dict[Any, Any]] = None,\n ensure_type=False,\n context: Optional[Dict[str, Any]] = None\n ):\n self.loader_instance_cache = {}\n\n self.ancestor_vars = {}\n self.ancestor_vars_checker = defaultdict(set) # expose_field_name: set(kls fullpath) if len > 1, raise error\n\n # for dataloader which has class attributes, you can assign the value at here\n self.loader_filters = loader_filters or {}\n\n # now you can pass your loader instance, Resolver will check `isinstance``\n if loader_instances and self._validate_loader_instance(loader_instances):\n self.loader_instances = loader_instances\n else:\n self.loader_instances = None\n\n self.ensure_type = ensure_type\n self.context = MappingProxyType(context) if context else None\n self.scan_data = {}\n\n\n def _add_expose_fields(self, target):\n \"\"\"\n 1. check whether expose to descendant existed\n 2. add fields into contextvars (ancestor_vars_checker)\n 2.1 check overwrite by another class(which is forbidden)\n 2.2 check field exists\n \"\"\"\n dct: Optional[dict] = getattr(target, const.EXPOSE_TO_DESCENDANT, None)\n # 1\n if dct:\n if type(dct) is not dict:\n raise AttributeError(f'{const.EXPOSE_TO_DESCENDANT} is not dict')\n\n # 2\n for field, alias in dct.items(): # eg: name, bar_name\n # 2.1\n self.ancestor_vars_checker[alias].add(util.get_kls_full_path(target.__class__))\n if len(self.ancestor_vars_checker[alias]) > 1:\n conflict_modules = ', '.join(list(self.ancestor_vars_checker[alias]))\n raise AttributeError(f'alias name conflicts, please check: {conflict_modules}')\n\n if not self.ancestor_vars.get(alias):\n self.ancestor_vars[alias] = contextvars.ContextVar(alias)\n \n try:\n val = getattr(target, field)\n except AttributeError:\n raise AttributeError(f'{field} does not existed')\n\n self.ancestor_vars[alias].set(val)\n \n\n def _build_ancestor_context(self):\n \"\"\"get values from contextvars and put into a dict\"\"\"\n return { k: v.get() for k, v in self.ancestor_vars.items()}\n \n\n def _validate_loader_instance(self, loader_instances: Dict[Any, Any]):\n for cls, loader in loader_instances.items():\n if not issubclass(cls, DataLoader):\n raise AttributeError(f'{cls.__name__} must be subclass of DataLoader')\n if not isinstance(loader, cls):\n raise AttributeError(f'{loader.__name__} is not instance of {cls.__name__}')\n return True\n \n\n def _execute_resolver_method(self, method):\n \"\"\"\n 1. inspect method, atttach context if declared in method\n 2. if params includes LoaderDepend, create instance and cache it.\n 2.1 create from DataLoader class\n 2.1.1 apply loader_filters into dataloader instance\n 2.2 ceate from batch_load_fn\n 3. execute method\n \"\"\"\n\n # >>> 1\n signature = inspect.signature(method)\n params = {}\n\n if signature.parameters.get('context'):\n if self.context is None:\n raise AttributeError('Resolver.context is missing')\n params['context'] = self.context\n\n if signature.parameters.get('ancestor_context'):\n if self.ancestor_vars is None:\n raise AttributeError(f'there is not class has {const.EXPOSE_TO_DESCENDANT} configed')\n params['ancestor_context'] = self._build_ancestor_context()\n\n # manage the creation of loader instances\n for k, v in signature.parameters.items():\n # >>> 2\n if isinstance(v.default, Depends):\n # Base: DataLoader or batch_load_fn\n Loader = v.default.dependency\n\n # check loader_instance first, if already defined in Resolver param, just take it.\n if self.loader_instances and self.loader_instances.get(Loader):\n loader = self.loader_instances.get(Loader)\n params[k] = loader\n continue\n\n # module.kls to avoid same kls name from different module\n cache_key = util.get_kls_full_path(v.default.dependency)\n hit = self.loader_instance_cache.get(cache_key)\n if hit:\n loader = hit\n else:\n # >>> 2.1\n # create loader instance \n if isclass(Loader):\n # if extra transform provides\n loader = Loader()\n\n filter_config = self.loader_filters.get(Loader, {})\n\n for field in util.get_class_field_annotations(Loader):\n # >>> 2.1.1\n # class ExampleLoader(DataLoader):\n # filtar_x: bool <--------------- set this field\n try:\n value = filter_config[field]\n setattr(loader, field, value)\n except KeyError:\n raise LoaderFieldNotProvidedError(f'{cache_key}.{field} not found in Resolver()')\n\n # >>> 2.2\n # build loader from batch_load_fn, filters config is impossible\n else:\n loader = DataLoader(batch_load_fn=Loader) # type:ignore\n\n self.loader_instance_cache[cache_key] = loader\n params[k] = loader\n\n # 3\n return method(**params)\n\n\n def _execute_post_method(self, method):\n signature = inspect.signature(method)\n params = {}\n\n if signature.parameters.get('context'):\n if self.context is None:\n raise AttributeError('Post.context is missing')\n params['context'] = self.context\n\n if signature.parameters.get('ancestor_context'):\n if self.ancestor_vars is None:\n raise AttributeError(f'there is not class has {const.EXPOSE_TO_DESCENDANT} configed')\n params['ancestor_context'] = self._build_ancestor_context()\n return method(**params)\n\n\n async def _resolve_obj_field(self, target, field, attr):\n \"\"\"\n resolve each single object field\n\n 1. validate the target field of resolver method existed.\n 2. exec methods\n 3. parse to target type and then continue resolve it\n 4. set back value to field\n \"\"\"\n\n # >>> 1\n target_attr_name = str(field).replace(const.PREFIX, '')\n\n if not hasattr(target, target_attr_name):\n raise ResolverTargetAttrNotFound(f\"attribute {target_attr_name} not found\")\n\n if self.ensure_type:\n if not attr.__annotations__:\n raise MissingAnnotationError(f'{field}: return annotation is required')\n\n # >>> 2\n val = self._execute_resolver_method(attr)\n while iscoroutine(val) or asyncio.isfuture(val):\n val = await val\n\n # >>> 3\n if not getattr(attr, const.HAS_MAPPER_FUNCTION, False): # defined in util.mapper\n val = util.try_parse_data_to_target_field_type(target, target_attr_name, val)\n\n val = await self._resolve(val)\n\n # >>> 4\n setattr(target, target_attr_name, val)\n\n\n async def _resolve(self, target: T) -> T:\n \"\"\" \n resolve object (pydantic, dataclass) or list.\n\n 1. iterate over elements if list\n 2. resolve object\n 2.1 resolve each single resolver fn and object fields\n 2.2 execute post fn\n \"\"\"\n\n # >>> 1\n if isinstance(target, (list, tuple)):\n await asyncio.gather(*[self._resolve(t) for t in target])\n\n # >>> 2\n if core.is_acceptable_instance(target):\n self._add_expose_fields(target)\n tasks = []\n # >>> 2.1\n resolve_list, attribute_list = core.iter_over_object_resolvers_and_acceptable_fields(target, self.scan_data)\n for field, attr in resolve_list:\n tasks.append(self._resolve_obj_field(target, field, attr))\n for field, attr in attribute_list:\n tasks.append(self._resolve(attr))\n\n await asyncio.gather(*tasks)\n\n # >>> 2.2\n # execute post methods, if context declared, self.context will be injected into it. \n for post_key in core.iter_over_object_post_methods(target, self.scan_data):\n post_attr_name = post_key.replace(const.POST_PREFIX, '')\n if not hasattr(target, post_attr_name):\n raise ResolverTargetAttrNotFound(f\"fail to run {post_key}(), attribute {post_attr_name} not found\")\n\n post_method = getattr(target, post_key)\n calc_result = self._execute_post_method(post_method)\n setattr(target, post_attr_name, calc_result)\n \n # finally, if post_default_handler is declared, run it.\n default_post_method = getattr(target, const.POST_DEFAULT_HANDLER, None)\n if default_post_method:\n self._execute_post_method(default_post_method)\n\n return target\n\n\n async def resolve(self, target: T) -> T:\n if isinstance(target, list) and target == []:\n return target\n\n self.scan_data = core.scan_and_store_required_fields(target)\n\n await self._resolve(target)\n return target "
},
{
"identifier": "LoaderDepend",
"path": "pydantic2_resolve/resolver.py",
"snippet": "def LoaderDepend( # noqa: N802\n dependency: Optional[Callable[..., Any]] = None,\n) -> Any:\n return Depends(dependency=dependency)"
}
] | from typing import List
from dataclasses import dataclass, field
from pydantic2_resolve import Resolver, LoaderDepend
import pytest | 2,573 |
@pytest.mark.asyncio
async def test_loader_depends_1():
BOOKS = {
1: [{'name': 'book1'}, {'name': 'book2'}],
2: [{'name': 'book3'}, {'name': 'book4'}],
3: [{'name': 'book1'}, {'name': 'book2'}],
}
@dataclass
class Book():
name: str
async def batch_load_fn(keys):
books = [[dict(name=bb['name']) for bb in BOOKS.get(k, [])] for k in keys]
return books
@dataclass
class Student():
id: int
name: str
books: List[Book] = field(default_factory=list)
|
@pytest.mark.asyncio
async def test_loader_depends_1():
BOOKS = {
1: [{'name': 'book1'}, {'name': 'book2'}],
2: [{'name': 'book3'}, {'name': 'book4'}],
3: [{'name': 'book1'}, {'name': 'book2'}],
}
@dataclass
class Book():
name: str
async def batch_load_fn(keys):
books = [[dict(name=bb['name']) for bb in BOOKS.get(k, [])] for k in keys]
return books
@dataclass
class Student():
id: int
name: str
books: List[Book] = field(default_factory=list) | def resolve_books(self, loader=LoaderDepend(batch_load_fn)): | 1 | 2023-11-01 02:37:26+00:00 | 4k |
WolfgangFahl/dcm | tests/test_rwth_aachen_module.py | [
{
"identifier": "CompetenceArea",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceArea(CompetenceElement):\n \"\"\"\n Represents a specific area within a competence aspect, containing various facets.\n\n Attributes:\n facets (List[CompetenceFacet]): A list of CompetenceFacet objects representing individual facets of this area.\n \"\"\"\n facets: List[CompetenceFacet] = field(default_factory=list)"
},
{
"identifier": "CompetenceAspect",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceAspect(CompetenceElement):\n \"\"\"\n Represents a broader category of competence, which includes various areas.\n\n Attributes:\n areas (List[CompetenceArea]): A list of CompetenceArea objects representing individual areas of this aspect.\n \"\"\"\n\n areas: List[CompetenceArea] = field(default_factory=list)\n credits: Optional[int] = None"
},
{
"identifier": "CompetenceElement",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceElement:\n \"\"\"\n A base class representing a generic competence element with common properties.\n\n Attributes:\n name (str): The name of the competence element.\n id (Optional[str]): An optional identifier for the competence element will be set to the name if id is None.\n url (Optional[str]): An optional URL for more information about the competence element.\n description (Optional[str]): An optional description of the competence element.\n color_code (str): A string representing a color code associated with the competence element.\n \"\"\"\n\n name: str\n id: Optional[str] = None\n url: Optional[str] = None\n description: Optional[str] = None\n color_code: Optional[str] = None\n\n def __post_init__(self):\n # Set the id to the the slug of the name if id is None\n if self.id is None:\n self.id = slugify(self.name)\n\n def as_html(self) -> str:\n \"\"\"\n convert me to html\n\n Returns:\n str: html markup\n \"\"\"\n html = f\"<h2>{self.name}</h2>\"\n if self.description:\n desc_html = markdown2.markdown(\n self.description, extras=[\"fenced-code-blocks\", \"tables\", \"spoiler\"]\n )\n html = html + \"\\n\" + desc_html\n return html\n\n def to_svg_node_config(self, url: str = None, **kwargs) -> SVGNodeConfig:\n \"\"\"\n convert me to an SVGNode Configuration\n\n Args:\n url(str): the url to use for clicking this svg node - if None use\n my configured url\n \"\"\"\n if url is None:\n url = self.url\n element_type = f\"{self.__class__.__name__}\"\n comment = f\"{element_type}:{self.description}\"\n svg_node_config = SVGNodeConfig(\n element_type=f\"{element_type}\",\n id=f\"{self.id}\",\n url=url,\n fill=self.color_code,\n title=self.name,\n comment=comment,\n **kwargs,\n )\n return svg_node_config"
},
{
"identifier": "CompetenceFacet",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceFacet(CompetenceElement):\n \"\"\"\n Represents a specific facet of a competence aspect, inheriting from CompetenceElement.\n\n This class can include additional properties or methods specific to a competence facet.\n \"\"\""
},
{
"identifier": "CompetenceLevel",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceLevel(CompetenceElement):\n \"\"\"\n Defines a specific level of competence within the framework.\n\n Attributes:\n level (int): level number starting from 1 as the lowest and going up to as many level as defined for the CompetenceTree\n icon(str): the name of a google mdi icon to be shown for this level\n utf8_icon(str): utf-8 char string to be used as icon\n \"\"\"\n\n level: int = 1\n icon: Optional[str] = None\n utf8_icon: Optional[str] = None"
},
{
"identifier": "CompetenceTree",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )"
}
] | import json
import os
from ngwidgets.basetest import Basetest
from dcm.dcm_core import (
CompetenceArea,
CompetenceAspect,
CompetenceElement,
CompetenceFacet,
CompetenceLevel,
CompetenceTree,
) | 2,946 | """
Created on 2023-11-11
@author: wf
"""
class TestModule(Basetest):
"""
test RWTH Aachen Modulhandbuch
"""
def get_name(self, json_node: dict, lang: str = "en") -> str:
"""
Retrieves the name of a specified JSON node in the specified language.
Args:
json_node (dict): The JSON node from which the name is to be extracted.
lang (str, optional): The language in which the name should be retrieved. Defaults to "en" (English).
Returns:
str: The name of the JSON node in the specified language. The result might be german (de) if
there is only a single name specified in the Modulhandbuch XML input which is germany by default
"""
names = json_node.get("NAME", [])
name = "?"
if isinstance(names, list):
for lang_name in names:
if isinstance(lang_name, dict):
node_lang = lang_name.get("@LANG", None)
if node_lang and node_lang == lang:
name = lang_name.get("#text", "?")
else:
# what's up here?
# might be german now ..
name = names["#text"]
pass
return name
def create_competence_element(
self, parent: CompetenceElement, json_node: dict, url: str
):
"""
convert the given json node to a competence element based on the level
1: CompetenceTree
2: CompetenceAspect
3: CompetenceFacet
Args:
parent(CompetenceElement): the parent element - None for the tree
json_node(dict): the current node to convert
url(str): the base_url for the node
"""
competence_element = None
lvl = json_node.get("@LVL", "?")
credits_str = json_node.get("@CREDITS", None)
credits = int(credits_str) if credits_str else None
level = int(lvl)
nr = json_node.get("@NR")
desc = None
name = self.get_name(json_node)
if lvl == "1":
| """
Created on 2023-11-11
@author: wf
"""
class TestModule(Basetest):
"""
test RWTH Aachen Modulhandbuch
"""
def get_name(self, json_node: dict, lang: str = "en") -> str:
"""
Retrieves the name of a specified JSON node in the specified language.
Args:
json_node (dict): The JSON node from which the name is to be extracted.
lang (str, optional): The language in which the name should be retrieved. Defaults to "en" (English).
Returns:
str: The name of the JSON node in the specified language. The result might be german (de) if
there is only a single name specified in the Modulhandbuch XML input which is germany by default
"""
names = json_node.get("NAME", [])
name = "?"
if isinstance(names, list):
for lang_name in names:
if isinstance(lang_name, dict):
node_lang = lang_name.get("@LANG", None)
if node_lang and node_lang == lang:
name = lang_name.get("#text", "?")
else:
# what's up here?
# might be german now ..
name = names["#text"]
pass
return name
def create_competence_element(
self, parent: CompetenceElement, json_node: dict, url: str
):
"""
convert the given json node to a competence element based on the level
1: CompetenceTree
2: CompetenceAspect
3: CompetenceFacet
Args:
parent(CompetenceElement): the parent element - None for the tree
json_node(dict): the current node to convert
url(str): the base_url for the node
"""
competence_element = None
lvl = json_node.get("@LVL", "?")
credits_str = json_node.get("@CREDITS", None)
credits = int(credits_str) if credits_str else None
level = int(lvl)
nr = json_node.get("@NR")
desc = None
name = self.get_name(json_node)
if lvl == "1": | tree = CompetenceTree( | 5 | 2023-11-06 09:24:24+00:00 | 4k |
StoneMoe/ASub | app/ui/views/project_view.py | [
{
"identifier": "Project",
"path": "app/core/models/project.py",
"snippet": "class Project:\r\n path: str # 工程目录(相对位置)\r\n name: str # 工程名称\r\n\r\n def __init__(self, name: str, existed_err=False):\r\n self.name = name\r\n self.path = os.path.join(Core.PROJ_DIR, name)\r\n try:\r\n os.makedirs(self.path)\r\n info(f'已创建目录 {self.path}')\r\n except OSError as e: # directory existed\r\n if existed_err:\r\n raise e\r\n\r\n def _prepare(self):\r\n info(f'正在预处理 \"{self.name}\" 的音频')\r\n tmp_path = os.path.join(self.path, 'source.wav')\r\n tmp_file = test_files(tmp_path)\r\n src_file = test_files(\r\n os.path.join(self.path, 'source.mp4'),\r\n os.path.join(self.path, f'{self.name}.mp4'),\r\n os.path.join(self.path, f'{self.name}.mp3')\r\n )\r\n if tmp_file:\r\n info(f'找到了临时文件 \"{tmp_file}\",跳过预处理')\r\n elif src_file:\r\n info(f'找到了 \"{src_file}\",开始预处理')\r\n if check_ffmpeg() != FFMpegStatus.READY:\r\n raise EnvironmentError('FFMpeg尚未安装')\r\n proc: Popen[bytes] = ffmpeg.input(src_file) \\\r\n .output(tmp_path, format='wav', acodec='pcm_s16le', ac=1, ar=16000) \\\r\n .overwrite_output() \\\r\n .run_async(pipe_stdout=True, pipe_stderr=True)\r\n out, err = proc.communicate()\r\n return_code = proc.wait()\r\n if return_code != 0:\r\n raise ChildProcessError('无法提取音频')\r\n info('预处理成功')\r\n else:\r\n raise FileNotFoundError(f'请将同名 mp4 文件放置在 {self.path}')\r\n\r\n def delete(self):\r\n \"\"\"Delete project folder\"\"\"\r\n shutil.rmtree(self.path)\r\n\r\n def transcribe(self, opt: TranscribeOpt):\r\n \"\"\"\r\n transcribe wav audio to SRT\r\n\r\n :return: transcribe result file path\r\n \"\"\"\r\n self._prepare()\r\n\r\n target_file = opt.make_srt_filepath(name=self.name, path=self.path)\r\n if os.path.isfile(target_file):\r\n info(f'文件 \"{target_file}\" 已存在,跳过听写')\r\n return target_file\r\n\r\n info(f'使用 {opt}')\r\n match opt.backend:\r\n # case Engine.CPP_CPU:\r\n # ext = ''\r\n # if opt.compress_ratio_threshold:\r\n # ext += f' -et {opt.compress_ratio_threshold} '\r\n # if opt.prompt_name:\r\n # ext += f' --prompt \"{DB.PROMPTS[opt.prompt_name]}\" '\r\n # if opt.speedup:\r\n # ext += f' -su '\r\n # if opt.ss and opt.t:\r\n # ss = opt.ss * 1000\r\n # t = opt.t * 1000\r\n # if opt.speedup:\r\n # ss /= 2\r\n # t /= 2\r\n # ext += f' -ot {ss} -d {t} '\r\n # cmd = f\".\\\\whisper\\\\main.exe -m data/whisper_model/ggml-large-v2.bin \" \\\r\n # f\"-pp -osrt -l {opt.lang} -t 8 {ext} -f {self.path}/source.wav -of {target_file.rstrip('.srt')}\"\r\n # print(f'运行: {cmd}')\r\n # proc = subprocess.Popen(cmd, shell=True, cwd=os.getcwd(), stdout=subprocess.PIPE)\r\n # for line in proc.stdout:\r\n # print(line.decode(Core.CODEC).rstrip())\r\n case 'py-gpu' | 'py-cpu':\r\n info('正在加载模型')\r\n import whisper\r\n import torch\r\n model = whisper.load_model(opt.model, download_root='whisper_model', device='cpu')\r\n if opt.quantize:\r\n info('正在量化模型')\r\n model = torch.quantization.quantize_dynamic(\r\n model, {torch.nn.Linear}, dtype=torch.qint8\r\n )\r\n if opt.backend == 'py-gpu':\r\n info('正在加载至显卡')\r\n model.to('cuda')\r\n result = model.transcribe(\r\n audio=f'{self.path}/source.wav',\r\n language=opt.lang,\r\n compression_ratio_threshold=opt.compress_ratio_threshold,\r\n initial_prompt=Consts.PROMPTS[opt.prompt_name],\r\n verbose=True,\r\n )\r\n\r\n del model\r\n torch.cuda.empty_cache()\r\n\r\n segments = result['segments']\r\n srt = SRTFile(source=segments)\r\n srt.dump(target_file)\r\n case _:\r\n raise NotImplementedError(f'{opt.backend} 引擎尚未支持')\r\n\r\n info('听写完成')\r\n\r\n def translate(self, opt: TranscribeOpt, vocab=None):\r\n srt = SRTFile(source=opt.make_srt_filepath(self.name, self.path))\r\n srt.translate(vocab=vocab)\r\n\r\n @classmethod\r\n def list(cls) -> List[str]:\r\n \"\"\"list all projects\"\"\"\r\n names = os.listdir(Core.PROJ_DIR)\r\n directories = [name for name in names if os.path.isdir(os.path.join(Core.PROJ_DIR, name))]\r\n directories = sort_titles(directories)\r\n return directories\r\n\r\n @classmethod\r\n def bulk_create(cls, targets: List[tuple]):\r\n info(f'正在创建 {len(targets)} 个工程')\r\n for proj_name, filepath in targets:\r\n try:\r\n proj = Project(proj_name, existed_err=True)\r\n except OSError:\r\n info(f'\"{proj_name}\" 已存在,不再创建')\r\n continue\r\n\r\n if filepath:\r\n dst_filepath = os.path.join(proj.path, os.path.basename(filepath))\r\n info(f'正在将 {filepath} 复制到 {dst_filepath}')\r\n shutil.copy(filepath, dst_filepath)\r\n info('复制完毕')\r"
},
{
"identifier": "TranscribeOpt",
"path": "app/core/models/project.py",
"snippet": "class TranscribeOpt:\r\n \"\"\"\r\n :param backend: whisper implementation\r\n :param model: whisper model name\r\n :param quantize: whisper model quantization switch\r\n :param ss: transcribe start second\r\n :param t: transcribe time duration(second)\r\n :param compress_ratio_threshold: 2.4 ~ 3 is recommended, segments higher than this will be re-inferenced\r\n :param speedup: double speed, decrease quality\r\n :param prompt_name: name\r\n \"\"\"\r\n backend: str\r\n model: str\r\n quantize: bool\r\n lang: Optional[str]\r\n ss: int # TODO: implement in whisper.py mode\r\n t: int # TODO: implement in whisper.py mode\r\n compress_ratio_threshold: float\r\n speedup: bool # TODO: implement in whisper.py mode\r\n prompt_name: str\r\n\r\n def make_srt_filepath(self, name: str, path: str) -> str:\r\n return f'{path}/' \\\r\n f'{name}' \\\r\n f'[{self.backend}]' \\\r\n f'[{self.model}]' \\\r\n f'[q{int(self.quantize)}]' \\\r\n f'[L{self.lang or \"auto\"}]' \\\r\n f'[t{\"FULL\" if not (self.ss and self.t) else f\"{self.ss}-{self.ss + self.t}\"}]' \\\r\n f'[e{self.compress_ratio_threshold}]' \\\r\n f'[s{int(self.speedup)}]' \\\r\n f'[p{self.prompt_name or \"-\"}]' \\\r\n f'.srt'\r"
},
{
"identifier": "info",
"path": "app/core/utils/generic.py",
"snippet": "def info(text):\r\n print(f\"ℹ️{text}\")\r"
},
{
"identifier": "AutoLabel",
"path": "app/ui/components/label.py",
"snippet": "class AutoLabel(QLabel):\r\n def __init__(self, text, parent=None, elide_mode=None):\r\n super().__init__(text, parent)\r\n self._raw_text = text\r\n self._elide_mode = elide_mode if elide_mode is not None else Qt.ElideMiddle\r\n self._eliding = False\r\n\r\n def _get_elided_text(self):\r\n return self.fontMetrics().elidedText(self._raw_text, self._elide_mode, self.width())\r\n\r\n def resizeEvent(self, event: QtGui.QResizeEvent):\r\n super().resizeEvent(event)\r\n if self._eliding:\r\n return\r\n\r\n self._eliding = True\r\n super().setText(self._get_elided_text())\r\n self._eliding = False\r\n\r\n def setText(self, text):\r\n self._raw_text = text\r\n super().setText(self._get_elided_text())\r"
},
{
"identifier": "cfg",
"path": "app/ui/config.py",
"snippet": "class Engine(Enum):\r\nclass TranscribeModel(Enum):\r\nclass UILang(Enum):\r\nclass TranscribeLang(Enum):\r\nclass Config(QConfig):\r\n PY_CPU = \"py-cpu\"\r\n PY_GPU = \"py-gpu\"\r\n CPP_CPU = \"cpp-cpu\"\r\n LARGE_V2 = \"large-v2\"\r\n MEDIUM = \"medium\"\r\n SMALL = \"small\"\r\n BASE = \"base\"\r\n TINY = \"tiny\"\r\n CHINESE_SIMPLIFIED = \"chs\"\r\n CHINESE_TRADITIONAL = \"cht\"\r\n ENGLISH = \"en\"\r\n AUTO = \"auto\"\r\n AUTO = None\r\n def options(cls):\r"
},
{
"identifier": "CONTAINER_MARGINS",
"path": "app/ui/const.py",
"snippet": "CONTAINER_MARGINS = (32, 64, 32, 32)\r"
},
{
"identifier": "run_in_thread",
"path": "app/ui/utils.py",
"snippet": "def run_in_thread(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n if args and kwargs:\r\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\r\n elif args:\r\n t = threading.Thread(target=func, args=args)\r\n else:\r\n t = threading.Thread(target=func)\r\n t.daemon = True\r\n t.start()\r\n return t\r\n\r\n return wrapper\r"
},
{
"identifier": "clear_layout",
"path": "app/ui/utils.py",
"snippet": "def clear_layout(layout):\r\n while layout.count():\r\n child = layout.takeAt(0)\r\n if child.widget():\r\n child.widget().deleteLater()\r\n elif child.layout():\r\n clear_layout(child.layout())\r"
},
{
"identifier": "open_folder",
"path": "app/ui/utils.py",
"snippet": "def open_folder(folder_path):\r\n \"\"\"Open specific folder in file explorer application\"\"\"\r\n if os.name == 'nt': # Windows\r\n os.startfile(folder_path)\r\n elif os.name == 'posix': # Linux, macOS, etc.\r\n subprocess.Popen(['xdg-open', folder_path])\r\n else:\r\n raise OSError(f'Unsupported platform: {os.name}')\r"
},
{
"identifier": "SubtitleWindow",
"path": "app/ui/windows/subtitle_window.py",
"snippet": "class SubtitleWindow(QDialog, FramelessWindow):\n def __init__(self, filepath: str, parent=None):\n super().__init__(parent)\n self.srt_file = SRTFile(filepath)\n self.hBoxLayout = QVBoxLayout(self)\n self.tableView = TableWidget(self)\n self.saveButton = QPushButton(\"Save\", self)\n self.saveButton.clicked.connect(self._save_subtitle_file)\n\n self.hBoxLayout.setContentsMargins(*CONTAINER_MARGINS)\n self.hBoxLayout.addWidget(self.tableView)\n self.hBoxLayout.addWidget(self.saveButton)\n\n self.init_window()\n self._load_subtitle_file()\n\n def _load_subtitle_file(self):\n self.tableView.setWordWrap(False)\n self.tableView.setRowCount(len(self.srt_file.entries))\n self.tableView.setColumnCount(3)\n for i, entry in enumerate(self.srt_file.entries):\n self.tableView.setItem(i, 0, QTableWidgetItem(entry.index))\n self.tableView.setItem(i, 1, QTableWidgetItem(entry.time))\n self.tableView.setItem(i, 2, QTableWidgetItem(entry.text))\n\n self.tableView.verticalHeader().hide()\n self.tableView.setHorizontalHeaderLabels(['Index', 'Time', 'Text'])\n self.tableView.resizeColumnsToContents()\n\n def _save_subtitle_file(self):\n for i in range(self.tableView.rowCount()):\n self.srt_file.entries[i].index = self.tableView.item(i, 0).text()\n self.srt_file.entries[i].time = self.tableView.item(i, 1).text()\n self.srt_file.entries[i].text = self.tableView.item(i, 2).text()\n\n self.srt_file.dump()\n\n def init_window(self):\n self.setWindowTitle(f'编辑 {self.srt_file.filepath}')\n self.resize(625, 700)\n self._set_qss()\n\n def _set_qss(self):\n color = 'dark' if isDarkTheme() else 'light'\n with open(res_dir(f'app/ui/resource/qss/{color}/style.qss'), encoding='utf-8') as f:\n self.setStyleSheet(f.read())"
}
] | import os
from typing import Optional
from PyQt5.QtCore import pyqtSignal, QPoint, Qt
from PyQt5.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QAction
from qfluentwidgets import PushButton, FluentIcon, RoundMenu, ToolButton, MessageBox, StateToolTip
from app.core.models.project import Project, TranscribeOpt
from app.core.utils.generic import info
from app.ui.components.label import AutoLabel
from app.ui.config import cfg
from app.ui.const import CONTAINER_MARGINS
from app.ui.utils import run_in_thread, clear_layout, open_folder
from app.ui.windows.subtitle_window import SubtitleWindow
| 3,257 |
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
|
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
| self.label_title = AutoLabel('<Loading>', self, Qt.ElideMiddle)
| 3 | 2023-11-07 16:45:43+00:00 | 4k |
openshift/lightspeed-service | ols/src/llms/llm_loader.py | [
{
"identifier": "constants",
"path": "ols/src/constants.py",
"snippet": "SUMMARIZATION_TEMPLATE = \"\"\"\nThe following context contains several pieces of documentation. Please summarize the context for the user.\nDocumentation context:\n{context_str}\n\nSummary:\n\n\"\"\"\nSUMMARY_TASK_BREAKDOWN_TEMPLATE = (\n \"\"\"\nThe following documentation contains a task list. Your job is to extract the list of tasks. \"\"\"\n \"\"\"If the user-supplied query seems unrelated to the list of tasks, please reply that you do not know what to do with the query and the summary documentation. \"\"\"\n \"\"\"Use only the supplied content and extract the task list.\n\nSummary document:\n{context_str}\n\nUser query:\n{query_str}\n\nWhat are the tasks?\n\"\"\"\n)\nTASK_PERFORMER_PROMPT_TEMPLATE = \"\"\"\nInstructions:\n- You are a helpful assistant.\n- You are an expert in Kubernetes and OpenShift.\n- Respond to questions about topics other than Kubernetes and OpenShift with: \"I can only answer questions about Kubernetes and OpenShift\"\n- Refuse to participate in anything that could harm a human.\n- Your job is to look at the following description and provide a response.\n- Base your answer on the provided task and query and not on prior knowledge.\n\nTASK:\n{task}\nQUERY:\n{query}\n\nQuestion:\nDoes the above query contain enough background information to complete the task? Provide a yes or no answer with explanation.\n\nResponse:\n\"\"\"\nTASK_REPHRASER_PROMPT_TEMPLATE = \"\"\"\nInstructions:\n- You are a helpful assistant.\n- Your job is to combine the information from the task and query into a single, new task.\n- Base your answer on the provided task and query and not on prior knowledge.\n\nTASK:\n{task}\nQUERY:\n{query}\n\nPlease combine the information from the task and query into a single, new task.\n\nResponse:\n\"\"\"\nYES_OR_NO_CLASSIFIER_PROMPT_TEMPLATE = \"\"\"\nInstructions:\n- determine if a statement is a yes or a no\n- return a 1 if the statement is a yes statement\n- return a 0 if the statement is a no statement\n- return a 9 if you cannot determine if the statement is a yes or no\n\nExamples:\nStatement: Yes, that sounds good.\nResponse: 1\n\nStatement: No, I don't think that is wise.\nResponse: 0\n\nStatement: Apples are red.\nResponse: 9\n\nStatement: {statement}\nResponse:\n\"\"\"\nQUESTION_VALIDATOR_PROMPT_TEMPLATE = \"\"\"\nInstructions:\n- You are a question classifying tool\n- You are an expert in kubernetes and openshift\n- Your job is to determine if a question is about kubernetes or openshift and to provide a one-word response\n- If a question is not about kubernetes or openshift, answer with only the word INVALID\n- If a question is about kubernetes or openshift, answer with the word VALID\n- If a question is not about creating kubernetes or openshift yaml, answer with the word NOYAML\n- If a question is about creating kubernetes or openshift yaml, add the word YAML\n- Use a comma to separate the words\n- Do not provide explanation, only respond with the chosen words\n\nExample Question:\nCan you make me lunch with ham and cheese?\nExample Response:\nINVALID,NOYAML\n\nExample Question:\nWhy is the sky blue?\nExample Response:\nINVALID,NOYAML\n\nExample Question:\nCan you help configure my cluster to automatically scale?\nExample Response:\nVALID,NOYAML\n\nExample Question:\nplease give me a vertical pod autoscaler configuration to manage my frontend deployment automatically. Don't update the workload if there are less than 2 pods running.\nExample Response:\nVALID,YAML\n\nQuestion:\n{query}\nResponse:\n\"\"\"\nYAML_GENERATOR_PROMPT_TEMPLATE = \"\"\"\nInstructions:\n- Produce only a yaml response to the user request\n- Do not augment the response with markdown or other formatting beyond standard yaml formatting\n- Only provide a single yaml object containg a single resource type in your response, do not provide multiple yaml objects\n\nUser Request: {query}\n\"\"\"\nYAML_GENERATOR_WITH_HISTORY_PROMPT_TEMPLATE = \"\"\"\nInstructions:\n- Produce only a yaml response to the user request\n- Do not augment the response with markdown or other formatting beyond standard yaml formatting\n- Only provide a single yaml object containg a single resource type in your response, do not provide multiple yaml objects\n\nHere is the history of the conversation so far, you may find this relevant to the user request below:\n\n{history}\n\nUser Request: {query}\n\"\"\"\nPROVIDER_BAM = \"bam\"\nPROVIDER_OPENAI = \"openai\"\nPROVIDER_WATSONX = \"watsonx\"\nPROVIDER_TGI = \"tgi\"\nPROVIDER_OLLAMA = \"ollama\"\nTEI_EMBEDDING_MODEL = \"BAAI/bge-base-en-v1.5\"\nGRANITE_13B_CHAT_V1 = \"ibm/granite-13b-chat-v1\"\nGRANITE_13B_CHAT_V2 = \"ibm/granite-13b-chat-v2\"\nGRANITE_20B_CODE_INSTRUCT_V1 = \"ibm/granite-20b-code-instruct-v1\"\nGPT35_TURBO_1106 = \"gpt-3.5-turbo-1106\"\nGPT35_TURBO = \"gpt-3.5-turbo\"\nPRODUCT_INDEX = \"product\"\nPRODUCT_DOCS_PERSIST_DIR = \"./vector-db/ocp-product-docs\"\nSUMMARY_INDEX = \"summary\"\nSUMMARY_DOCS_PERSIST_DIR = \"./vector-db/summary-docs\"\nIN_MEMORY_CACHE = \"in-memory\"\nIN_MEMORY_CACHE_MAX_ENTRIES = 1000\nREDIS_CACHE = \"redis\"\nREDIS_CACHE_HOST = \"redis-stack.ols.svc\"\nREDIS_CACHE_PORT = 6379\nREDIS_CACHE_MAX_MEMORY = \"500mb\"\nREDIS_CACHE_MAX_MEMORY_POLICY = \"allkeys-lru\""
},
{
"identifier": "config",
"path": "ols/utils/config.py",
"snippet": "def load_empty_config() -> None:\ndef load_config_from_env() -> None:"
},
{
"identifier": "Logger",
"path": "ols/utils/logger.py",
"snippet": "class Logger:\n \"\"\"This class is a simple wrapper around the Python logging function.\n\n Usage:\n\n # Simple usage\n\n logger = Logger().logger\n\n logger.debug('Debug message')\n logger.info('Info message')\n logger.warning('Warning message')\n logger.error('Error message')\n logger.critical('Critical message')\n\n # To disable logging to file set logfile to None\n\n logger = Logger(logfile=None).logger\n\n # If want to pass the name of the function generating the message\n # you may use the \"inspect\" library and generate the message as follows\n\n self.logger.debug(f\"[{inspect.stack()[0][3]}] Message here.\")\n\n # When using on a class that may already have another instance\n\n self.logger = logger if logger else Logger(show_message=False).logger\n\n \"\"\"\n\n def __init__(\n self,\n logger_name: str = \"default\",\n log_level: str = logging.getLevelName(logging.INFO),\n logfile: str | None = None,\n show_message: bool = False,\n ):\n \"\"\"Initializes the Logger instance.\n\n Args:\n logger_name: The name of the logger instance.\n log_level: The logging level for general logging verbosity.\n logfile: The path to the log file. Set to `None` to disable file logging.\n show_message: Whether to display a message about setting logging levels.\n\n Note:\n - The default values can be overridden using environment variables `LOG_LEVEL`\n or `LOG_LEVEL_CONSOLE` and `LOG_LEVEL_FILE`.\n - To set logfile name set `LOG_FILE_NAME`\n - To override logfile maximum size set `LOG_FILE_SIZE`\n \"\"\"\n msg = \"\"\"\n ############################################################################\n Set LOG_LEVEL or LOG_LEVEL_CONSOLE and LOG_LEVEL_FILE environment variable (e.g., INFO, DEBUG)\n to control general logging verbosity or console/file specific logging level\n ############################################################################\n \"\"\"\n if show_message:\n print(msg)\n\n # Load the dotenv configuration in case config class has not been used\n dotenv.load_dotenv()\n\n self.logger_name = logger_name\n self.log_level = os.getenv(\"LOG_LEVEL\", log_level)\n self.log_level_console = os.getenv(\"LOG_LEVEL_CONSOLE\", self.log_level)\n self.log_level_file = os.getenv(\"LOG_LEVEL_FILE\", self.log_level)\n _logfile = os.getenv(\"LOG_FILE_NAME\")\n self.logfile = _logfile if _logfile else logfile\n self.logfile_maxSize = int(os.getenv(\"LOG_FILE_SIZE\", (1048576 * 100)))\n self.logfile_backupCount = 3\n\n self.set_handlers()\n\n def set_handlers(self) -> None:\n \"\"\"Sets formatting, handlers and logging levels.\"\"\"\n self.logger = logging.getLogger(self.logger_name)\n self.logger.setLevel(self.log_level)\n\n formatter = logging.Formatter(\n \"%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s: %(message)s\"\n )\n\n # console logging handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.log_level_console)\n console_handler.setStream(sys.stdout)\n console_handler.setFormatter(formatter)\n\n self.logger.addHandler(console_handler)\n\n # file logging handler (if not disabled)\n if self.logfile is not None:\n file_handler = RotatingFileHandler(\n self.logfile,\n maxBytes=self.logfile_maxSize,\n backupCount=self.logfile_backupCount,\n )\n file_handler.setLevel(self.log_level_file)\n file_handler.setFormatter(formatter)\n\n self.logger.addHandler(file_handler)"
}
] | import inspect
import os
import warnings
import json
from typing import Optional
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from ols.src import constants
from ols.utils import config
from ols.utils.logger import Logger
from langchain.chat_models import ChatOpenAI
from genai.credentials import Credentials
from genai.extensions.langchain import LangChainInterface
from genai.schemas import GenerateParams
from langchain.llms import Ollama
from langchain.llms import HuggingFaceTextGenInference
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.foundation_models.extensions.langchain import (
WatsonxLLM,
)
from ibm_watson_machine_learning.metanames import (
GenTextParamsMetaNames as GenParams,
) | 3,114 | """LLM backend libraries loader."""
# workaround to disable UserWarning
warnings.simplefilter("ignore", UserWarning)
class UnsupportedProvider(Exception):
"""Exception thrown when provided provider is not supported or is unknown."""
class LLMLoader:
"""Note: This class loads the LLM backend libraries if the specific LLM is loaded.
Known caveats: Currently supports a single instance/model per backend.
llm_backends: a string with a supported llm backend name ('openai','ollama','tgi','watson','bam').
params : (optional) array of parameters to override and pass to the llm backend
# using the class and overriding specific parameters
llm_backend = 'ollama'
params = {'temperature': 0.02, 'top_p': 0.95}
llm_config = LLMLoader(llm_backend=llm_backend, params=params)
llm_chain = LLMChain(llm=llm_config.llm, prompt=prompt)
"""
def __init__(
self,
provider: Optional[str] = None,
model: Optional[str] = None,
url: Optional[str] = None,
params: Optional[dict] = None,
logger=None,
) -> None:
"""Initialize loader using provided provider, model, and other parameters."""
self.logger = logger if logger is not None else Logger("llm_loader").logger
if provider is None:
raise Exception("ERROR: Missing provider")
self.provider = provider
self.url = url
if model is None:
raise Exception("ERROR: Missing model")
self.model = model
# return empty dictionary if not defined
self.llm_params = params if params else {}
self.llm = None
self._set_llm_instance()
def _set_llm_instance(self):
self.logger.debug(
f"[{inspect.stack()[0][3]}] Loading LLM {self.model} from {self.provider}"
)
# convert to string to handle None or False definitions
match str(self.provider).lower():
case constants.PROVIDER_OPENAI:
self._openai_llm_instance()
case constants.PROVIDER_OLLAMA:
self._ollama_llm_instance()
case constants.PROVIDER_TGI:
self._tgi_llm_instance()
case constants.PROVIDER_WATSONX:
self._watson_llm_instance()
case constants.PROVIDER_BAM:
self._bam_llm_instance()
case _:
msg = f"ERROR: Unsupported LLM {self.provider}"
self.logger.error(msg)
raise UnsupportedProvider(msg)
def _openai_llm_instance(self):
self.logger.debug(f"[{inspect.stack()[0][3]}] Creating OpenAI LLM instance")
try:
except Exception:
self.logger.error(
"ERROR: Missing openai libraries. Skipping loading backend LLM."
)
return
| """LLM backend libraries loader."""
# workaround to disable UserWarning
warnings.simplefilter("ignore", UserWarning)
class UnsupportedProvider(Exception):
"""Exception thrown when provided provider is not supported or is unknown."""
class LLMLoader:
"""Note: This class loads the LLM backend libraries if the specific LLM is loaded.
Known caveats: Currently supports a single instance/model per backend.
llm_backends: a string with a supported llm backend name ('openai','ollama','tgi','watson','bam').
params : (optional) array of parameters to override and pass to the llm backend
# using the class and overriding specific parameters
llm_backend = 'ollama'
params = {'temperature': 0.02, 'top_p': 0.95}
llm_config = LLMLoader(llm_backend=llm_backend, params=params)
llm_chain = LLMChain(llm=llm_config.llm, prompt=prompt)
"""
def __init__(
self,
provider: Optional[str] = None,
model: Optional[str] = None,
url: Optional[str] = None,
params: Optional[dict] = None,
logger=None,
) -> None:
"""Initialize loader using provided provider, model, and other parameters."""
self.logger = logger if logger is not None else Logger("llm_loader").logger
if provider is None:
raise Exception("ERROR: Missing provider")
self.provider = provider
self.url = url
if model is None:
raise Exception("ERROR: Missing model")
self.model = model
# return empty dictionary if not defined
self.llm_params = params if params else {}
self.llm = None
self._set_llm_instance()
def _set_llm_instance(self):
self.logger.debug(
f"[{inspect.stack()[0][3]}] Loading LLM {self.model} from {self.provider}"
)
# convert to string to handle None or False definitions
match str(self.provider).lower():
case constants.PROVIDER_OPENAI:
self._openai_llm_instance()
case constants.PROVIDER_OLLAMA:
self._ollama_llm_instance()
case constants.PROVIDER_TGI:
self._tgi_llm_instance()
case constants.PROVIDER_WATSONX:
self._watson_llm_instance()
case constants.PROVIDER_BAM:
self._bam_llm_instance()
case _:
msg = f"ERROR: Unsupported LLM {self.provider}"
self.logger.error(msg)
raise UnsupportedProvider(msg)
def _openai_llm_instance(self):
self.logger.debug(f"[{inspect.stack()[0][3]}] Creating OpenAI LLM instance")
try:
except Exception:
self.logger.error(
"ERROR: Missing openai libraries. Skipping loading backend LLM."
)
return | provider = config.llm_config.providers[constants.PROVIDER_OPENAI] | 0 | 2023-11-08 06:29:41+00:00 | 4k |
xlcaptain/LLM-Workbench | component/knowledge_chat.py | [
{
"identifier": "ElasticsearchServer",
"path": "component/pipelines/es.py",
"snippet": "class ElasticsearchServer:\n def __init__(self):\n self.client = Elasticsearch(\n ES_URL,\n verify_certs=False,\n )\n self.embedding = Embeddings()\n self.es = ElasticsearchStore(\n index_name='audit_index',\n embedding=self.embedding,\n es_connection=self.client,\n )\n\n def create_index(self, index_name: str):\n if not self.client.indices.exists(index=index_name):\n dims = len(self.embedding.embed_query(\"test\"))\n mapping = _default_knn_mapping(dims)\n self.client.indices.create(index=index_name, body={\"mappings\": mapping})\n logger.info(f\"Successfully Created Index: {index_name}!\")\n else:\n logger.info(f\"Index: {index_name} already exists!\")\n\n def doc_upload(self, index_name: str, data_url: str):\n self.create_index(index_name)\n\n docs = []\n for root, dirs, files in os.walk(data_url):\n for file in tqdm(files):\n file_path = os.path.join(root, file)\n res = load_document(file_path)\n if res:\n self.es.add_documents(res)\n logger.info(f\"Successfully inserted document {res[0].metadata}!\")\n logger.info(\"Successfully inserted documents!\")\n\n def doc_search(\n self, method: str, query: str, top_k: int, knn_boost: float, index_name: str\n ) -> List[Dict]:\n result = []\n query_vector = self.embedding.embed_query(query)\n if method == \"knn\":\n query_body = generate_knn_query(vec=query_vector, size=top_k)\n elif method == \"hybrid\":\n query_body = generate_hybrid_query(text=query, vec=query_vector, size=top_k, knn_boost=knn_boost)\n else:\n query_body = generate_search_query(vec=query_vector, size=top_k)\n\n response = self.client.search(index=index_name, body=query_body)\n hits = [hit for hit in response[\"hits\"][\"hits\"]]\n for i in hits:\n result.append(\n {\n \"content\": i[\"_source\"][\"text\"],\n 'source': i[\"_source\"][\"metadata\"][\"source\"],\n 'score': i[\"_score\"]\n }\n )\n return result\n\n def delete(self, index_name):\n if self.client.indices.exists(index=index_name):\n self.client.indices.delete(index=index_name)\n logger.info(f\"Successfully Deleted Index: {index_name}!\")"
},
{
"identifier": "handle_response",
"path": "component/pipelines/utils.py",
"snippet": "def handle_response(messages, temperature, history_len, message_placeholder):\n full_response = \"\"\n openai.api_key = 'xxxx'\n openai.api_base = BAICHUAN_URL\n for response in openai.ChatCompletion.create(\n model=\"baichuan\",\n messages=messages[-history_len * 2 - 1:],\n temperature=temperature,\n stream=True,\n ):\n full_response += response.choices[0].delta.get(\"content\", \"\")\n message_placeholder.markdown(full_response + \"▌\")\n message_placeholder.markdown(full_response)\n return full_response"
},
{
"identifier": "create_message",
"path": "component/pipelines/utils.py",
"snippet": "def create_message(role, content, reference=None):\n message = {\"role\": role, \"content\": content}\n if reference is not None:\n message[\"reference\"] = reference\n return message"
},
{
"identifier": "KNOWLEDGE_PROMPT",
"path": "component/pipelines/prompt.py",
"snippet": "KNOWLEDGE_PROMPT = \"\"\"\n你是由南京审计大学智能审计团队研发的‘审元’大模型,目前还在不断完善中。\n如果不是询问身份信息就正常根据下面指令回答。\n<指令>请仔细阅读以下已知信息,并根据已知内容以专业的方式回答提出的问题。并且满足以下要求:\n1.你的任务是从已知信息中找到问题的答案,而不是生成新的信息。\n2.回答应符合逻辑,且答案内不能出现大量重复内容。\n2.如果已知信息中明确包含问题对应的答案,请直接提供,并且参考第二条。如果已知信息中没有答案,或者答案不明确,请回答“无法根据已知信息回答该问题”。\n3.请避免在答案中添加任何编造的信息。所有回答请使用中文。\n</指令>\n<已知信息>{context}</已知信息>\n<问题>请回答:{query}</问题>\n\"\"\""
},
{
"identifier": "CHAT_EXAMPLES",
"path": "component/pipelines/prompt.py",
"snippet": "CHAT_EXAMPLES = [\"公共工程项目跟踪审计概念是什么?\",\n \"王天朝收受了哪些贿赂?\",\n \"如何认定本罪的标准?\"]"
}
] | import time
import os
import streamlit as st
import pandas as pd
from .pipelines.es import ElasticsearchServer
from .pipelines.utils import handle_response, create_message
from .pipelines.prompt import KNOWLEDGE_PROMPT, CHAT_EXAMPLES | 2,058 |
BAICHUAN_URL = os.getenv("BAICHUAN_URL")
def handle_kb_qa(prompt, top_k, threshold):
index_name = 'audit_index'
es_server = ElasticsearchServer()
# es_server.doc_upload(index_name=index_name)
result = es_server.doc_search(index_name=index_name, query=prompt, top_k=top_k, method='hybrid',
knn_boost=threshold)
context = "\n".join([doc['content'] for doc in result])
doc_prompt = KNOWLEDGE_PROMPT.format(query=prompt, context=context)
reference = [
{
"text": doc['content'],
"source": doc['source'],
"score": float(doc['score'])
}
for doc in result
]
return doc_prompt, reference, True
def knowledge_chat():
with st.sidebar:
# TODO: 对话模型与会话绑定
def on_mode_change():
st.session_state.messages = []
mode = st.session_state.vec_modify
text = f"已切换到 {mode} 模式。"
if mode == "知识库问答":
cur_kb = st.session_state.get("selected_kb")
if cur_kb:
text = f"{text} 当前知识库: `{cur_kb}`。"
st.toast(text)
# 模型参数选择
temperature = st.slider("Temperature:", 0.0, 1.0, 0.7, 0.05)
history_len = st.number_input("历史对话轮数:", 0, 10, 1)
# 知识库配置
with st.expander("知识库配置", True):
vec_modify = st.selectbox("请选择相似度搜索模式:",
["Elasticsearch",
],
index=0,
on_change=on_mode_change,
key="vec_modify",
)
kb_top_k = st.number_input("匹配知识条数:", 1, 6, 5)
score_threshold = st.slider(
f"{'知识匹配分数阈值:' if vec_modify == 'Faiss向量库' else '语义关键字权重:(0:代表仅使用关键字)'}:",
0.0, 1.0, float(0.5), 0.01)
# 清空对话
cols = st.columns(2)
if cols[1].button(
"清空对话",
use_container_width=True,
):
st.session_state.messages = []
st.experimental_rerun()
st.title("💬 审计知识库问答")
chat_input_placeholder = "请输入对话内容,换行请使用Shift+Enter "
df = pd.DataFrame({"示例": CHAT_EXAMPLES})
with st.expander("DataFrame", False):
st.table(df)
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(chat_input_placeholder, key="prompt"):
full_response = ''
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append(create_message("user", prompt))
with st.chat_message("assistant"):
message_placeholder = st.empty()
with st.spinner("思考中..."):
doc_prompt, reference, is_true = handle_kb_qa(prompt,
st.session_state.get("top_k", kb_top_k),
score_threshold)
if is_true:
|
BAICHUAN_URL = os.getenv("BAICHUAN_URL")
def handle_kb_qa(prompt, top_k, threshold):
index_name = 'audit_index'
es_server = ElasticsearchServer()
# es_server.doc_upload(index_name=index_name)
result = es_server.doc_search(index_name=index_name, query=prompt, top_k=top_k, method='hybrid',
knn_boost=threshold)
context = "\n".join([doc['content'] for doc in result])
doc_prompt = KNOWLEDGE_PROMPT.format(query=prompt, context=context)
reference = [
{
"text": doc['content'],
"source": doc['source'],
"score": float(doc['score'])
}
for doc in result
]
return doc_prompt, reference, True
def knowledge_chat():
with st.sidebar:
# TODO: 对话模型与会话绑定
def on_mode_change():
st.session_state.messages = []
mode = st.session_state.vec_modify
text = f"已切换到 {mode} 模式。"
if mode == "知识库问答":
cur_kb = st.session_state.get("selected_kb")
if cur_kb:
text = f"{text} 当前知识库: `{cur_kb}`。"
st.toast(text)
# 模型参数选择
temperature = st.slider("Temperature:", 0.0, 1.0, 0.7, 0.05)
history_len = st.number_input("历史对话轮数:", 0, 10, 1)
# 知识库配置
with st.expander("知识库配置", True):
vec_modify = st.selectbox("请选择相似度搜索模式:",
["Elasticsearch",
],
index=0,
on_change=on_mode_change,
key="vec_modify",
)
kb_top_k = st.number_input("匹配知识条数:", 1, 6, 5)
score_threshold = st.slider(
f"{'知识匹配分数阈值:' if vec_modify == 'Faiss向量库' else '语义关键字权重:(0:代表仅使用关键字)'}:",
0.0, 1.0, float(0.5), 0.01)
# 清空对话
cols = st.columns(2)
if cols[1].button(
"清空对话",
use_container_width=True,
):
st.session_state.messages = []
st.experimental_rerun()
st.title("💬 审计知识库问答")
chat_input_placeholder = "请输入对话内容,换行请使用Shift+Enter "
df = pd.DataFrame({"示例": CHAT_EXAMPLES})
with st.expander("DataFrame", False):
st.table(df)
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(chat_input_placeholder, key="prompt"):
full_response = ''
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append(create_message("user", prompt))
with st.chat_message("assistant"):
message_placeholder = st.empty()
with st.spinner("思考中..."):
doc_prompt, reference, is_true = handle_kb_qa(prompt,
st.session_state.get("top_k", kb_top_k),
score_threshold)
if is_true: | full_response = handle_response([ | 1 | 2023-11-01 07:54:03+00:00 | 4k |
NicolasZucchet/Online-learning-LR-dependencies | online_lru/rec.py | [
{
"identifier": "matrix_init",
"path": "online_lru/rec_init.py",
"snippet": "def matrix_init(key, shape, dtype=jnp.float32, normalization=1):\n return random.normal(key=key, shape=shape, dtype=dtype) / normalization"
},
{
"identifier": "truncated_normal_matrix_init",
"path": "online_lru/rec_init.py",
"snippet": "def truncated_normal_matrix_init(key, shape, dtype=jnp.float_, normalization=1):\n return random.truncated_normal(key, -2.0, 2.0, shape, dtype) / normalization"
},
{
"identifier": "theta_init",
"path": "online_lru/rec_init.py",
"snippet": "def theta_init(key, shape, max_phase, dtype=jnp.float32, log=True):\n u = random.uniform(key, shape=shape, dtype=dtype)\n theta = max_phase * u\n if log:\n theta = jnp.log(theta)\n return theta"
},
{
"identifier": "nu_init",
"path": "online_lru/rec_init.py",
"snippet": "def nu_init(key, shape, r_min, r_max, dtype=jnp.float32, log=True):\n u = random.uniform(key=key, shape=shape, dtype=dtype)\n nu = -0.5 * jnp.log(u * (r_max**2 - r_min**2) + r_min**2)\n if log:\n nu = jnp.log(nu)\n return nu"
},
{
"identifier": "gamma_log_init",
"path": "online_lru/rec_init.py",
"snippet": "def gamma_log_init(key, lamb, log=True):\n nu, theta = lamb\n if log:\n nu = jnp.exp(nu)\n theta = jnp.exp(theta)\n diag_lambda = jnp.exp(-nu + 1j * theta)\n return jnp.log(jnp.sqrt(1 - jnp.abs(diag_lambda) ** 2))"
}
] | from functools import partial
from flax import linen as nn
from .rec_init import matrix_init, truncated_normal_matrix_init, theta_init, nu_init, gamma_log_init
from flax.core.frozen_dict import unfreeze
import jax
import jax.numpy as jnp | 1,623 | d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta",
partial(theta_init, max_phase=self.max_phase, log=self.exp_param),
(self.d_hidden,),
) # phase of lambda in [0, max_phase]
self.nu = self.param(
"nu",
partial(nu_init, r_min=self.r_min, r_max=self.r_max, log=self.exp_param),
(self.d_hidden,),
) # norm of lambda in [r_min, r_max]
if self.gamma_norm:
self.gamma_log = self.param(
|
# Parallel scan operations
@jax.vmap
def binary_operator_diag(q_i, q_j):
"""Binary operator for parallel scan of linear recurrence"""
A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, A_j * b_i + b_j
@jax.vmap
def binary_operator_diag_spatial(q_i, q_j):
"""Same as above but stop the gradient for the recurrent connection"""
A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, jax.lax.stop_gradient(A_j * b_i) + b_j
class LRU(nn.Module):
"""
LRU layer that updates internal elegibility traces to allow online learning.
"""
d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta",
partial(theta_init, max_phase=self.max_phase, log=self.exp_param),
(self.d_hidden,),
) # phase of lambda in [0, max_phase]
self.nu = self.param(
"nu",
partial(nu_init, r_min=self.r_min, r_max=self.r_max, log=self.exp_param),
(self.d_hidden,),
) # norm of lambda in [r_min, r_max]
if self.gamma_norm:
self.gamma_log = self.param( | "gamma_log", partial(gamma_log_init, log=self.exp_param), (self.nu, self.theta) | 4 | 2023-11-01 13:18:32+00:00 | 4k |
uygarkurt/video-retalking | models/LNet.py | [
{
"identifier": "RETURNX",
"path": "models/transformer.py",
"snippet": "class RETURNX(nn.Module):\n def __init__(self,):\n super().__init__()\n\n def forward(self, x, y): # x is the cropped, y is the foreign reference \n return x"
},
{
"identifier": "Transformer",
"path": "models/transformer.py",
"snippet": "class Transformer(nn.Module):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n DualPreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),\n PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))\n ]))\n\n\n def forward(self, x, y): # x is the cropped, y is the foreign reference\n bs,c,h,w = x.size()\n\n # img to embedding\n x = x.view(bs,c,-1).permute(0,2,1)\n y = y.view(bs,c,-1).permute(0,2,1)\n\n for attn, ff in self.layers:\n x = attn(x, y) + x\n x = ff(x) + x\n\n x = x.view(bs,h,w,c).permute(0,3,1,2)\n return x"
},
{
"identifier": "Conv2d",
"path": "models/base_blocks.py",
"snippet": "class Conv2d(nn.Module):\n def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.conv_block = nn.Sequential(\n nn.Conv2d(cin, cout, kernel_size, stride, padding),\n nn.BatchNorm2d(cout)\n )\n self.act = nn.ReLU()\n self.residual = residual\n\n def forward(self, x):\n out = self.conv_block(x)\n if self.residual:\n out += x\n return self.act(out)"
},
{
"identifier": "LayerNorm2d",
"path": "models/base_blocks.py",
"snippet": "class LayerNorm2d(nn.Module):\n def __init__(self, n_out, affine=True):\n super(LayerNorm2d, self).__init__()\n self.n_out = n_out\n self.affine = affine\n\n if self.affine:\n self.weight = nn.Parameter(torch.ones(n_out, 1, 1))\n self.bias = nn.Parameter(torch.zeros(n_out, 1, 1))\n\n def forward(self, x):\n normalized_shape = x.size()[1:]\n if self.affine:\n return F.layer_norm(x, normalized_shape, \\\n self.weight.expand(normalized_shape), \n self.bias.expand(normalized_shape)) \n else:\n return F.layer_norm(x, normalized_shape) "
},
{
"identifier": "FirstBlock2d",
"path": "models/base_blocks.py",
"snippet": "class FirstBlock2d(nn.Module):\n def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):\n super(FirstBlock2d, self).__init__()\n kwargs = {'kernel_size': 7, 'stride': 1, 'padding': 3}\n conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect)\n\n if type(norm_layer) == type(None):\n self.model = nn.Sequential(conv, nonlinearity)\n else:\n self.model = nn.Sequential(conv, norm_layer(output_nc), nonlinearity)\n\n def forward(self, x):\n out = self.model(x)\n return out "
},
{
"identifier": "DownBlock2d",
"path": "models/base_blocks.py",
"snippet": "class DownBlock2d(nn.Module):\n def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):\n super(DownBlock2d, self).__init__()\n kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1}\n conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect)\n pool = nn.AvgPool2d(kernel_size=(2, 2))\n\n if type(norm_layer) == type(None):\n self.model = nn.Sequential(conv, nonlinearity, pool)\n else:\n self.model = nn.Sequential(conv, norm_layer(output_nc), nonlinearity, pool)\n\n def forward(self, x):\n out = self.model(x)\n return out "
},
{
"identifier": "UpBlock2d",
"path": "models/base_blocks.py",
"snippet": "class UpBlock2d(nn.Module):\n def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):\n super(UpBlock2d, self).__init__()\n kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1}\n conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect)\n if type(norm_layer) == type(None):\n self.model = nn.Sequential(conv, nonlinearity)\n else:\n self.model = nn.Sequential(conv, norm_layer(output_nc), nonlinearity)\n\n def forward(self, x):\n out = self.model(F.interpolate(x, scale_factor=2))\n return out"
},
{
"identifier": "FFCADAINResBlocks",
"path": "models/base_blocks.py",
"snippet": "class FFCADAINResBlocks(nn.Module):\n def __init__(self, num_block, input_nc, feature_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):\n super(FFCADAINResBlocks, self).__init__() \n self.num_block = num_block\n for i in range(num_block):\n model = FFCResnetBlock(input_nc, feature_nc, norm_layer, nonlinearity, use_spect)\n setattr(self, 'res'+str(i), model)\n\n def forward(self, x, z):\n for i in range(self.num_block):\n model = getattr(self, 'res'+str(i))\n x = model(x, z)\n return x "
},
{
"identifier": "Jump",
"path": "models/base_blocks.py",
"snippet": "class Jump(nn.Module):\n def __init__(self, input_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):\n super(Jump, self).__init__()\n kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1}\n conv = spectral_norm(nn.Conv2d(input_nc, input_nc, **kwargs), use_spect)\n if type(norm_layer) == type(None):\n self.model = nn.Sequential(conv, nonlinearity)\n else:\n self.model = nn.Sequential(conv, norm_layer(input_nc), nonlinearity)\n\n def forward(self, x):\n out = self.model(x)\n return out "
},
{
"identifier": "FinalBlock2d",
"path": "models/base_blocks.py",
"snippet": "class FinalBlock2d(nn.Module):\n def __init__(self, input_nc, output_nc, use_spect=False, tanh_or_sigmoid='tanh'):\n super(FinalBlock2d, self).__init__()\n kwargs = {'kernel_size': 7, 'stride': 1, 'padding':3}\n conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect)\n if tanh_or_sigmoid == 'sigmoid':\n out_nonlinearity = nn.Sigmoid()\n else:\n out_nonlinearity = nn.Tanh() \n self.model = nn.Sequential(conv, out_nonlinearity)\n\n def forward(self, x):\n out = self.model(x)\n return out "
}
] | import functools
import torch
import torch.nn as nn
from models.transformer import RETURNX, Transformer
from models.base_blocks import Conv2d, LayerNorm2d, FirstBlock2d, DownBlock2d, UpBlock2d, \
FFCADAINResBlocks, Jump, FinalBlock2d | 2,505 |
class Visual_Encoder(nn.Module):
def __init__(self, image_nc, ngf, img_f, layers, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Visual_Encoder, self).__init__()
self.layers = layers
self.first_inp = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
self.first_ref = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
for i in range(layers):
in_channels = min(ngf*(2**i), img_f)
out_channels = min(ngf*(2**(i+1)), img_f)
model_ref = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
model_inp = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
if i < 2:
ca_layer = RETURNX()
else:
ca_layer = Transformer(2**(i+1) * ngf,2,4,ngf,ngf*4)
setattr(self, 'ca' + str(i), ca_layer)
setattr(self, 'ref_down' + str(i), model_ref)
setattr(self, 'inp_down' + str(i), model_inp)
self.output_nc = out_channels * 2
def forward(self, maskGT, ref):
x_maskGT, x_ref = self.first_inp(maskGT), self.first_ref(ref)
out=[x_maskGT]
for i in range(self.layers):
model_ref = getattr(self, 'ref_down'+str(i))
model_inp = getattr(self, 'inp_down'+str(i))
ca_layer = getattr(self, 'ca'+str(i))
x_maskGT, x_ref = model_inp(x_maskGT), model_ref(x_ref)
x_maskGT = ca_layer(x_maskGT, x_ref)
if i < self.layers - 1:
out.append(x_maskGT)
else:
out.append(torch.cat([x_maskGT, x_ref], dim=1)) # concat ref features !
return out
class Decoder(nn.Module):
def __init__(self, image_nc, feature_nc, ngf, img_f, layers, num_block, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Decoder, self).__init__()
self.layers = layers
for i in range(layers)[::-1]:
if i == layers-1:
in_channels = ngf*(2**(i+1)) * 2
else:
in_channels = min(ngf*(2**(i+1)), img_f)
out_channels = min(ngf*(2**i), img_f)
up = UpBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
res = FFCADAINResBlocks(num_block, in_channels, feature_nc, norm_layer, nonlinearity, use_spect)
|
class Visual_Encoder(nn.Module):
def __init__(self, image_nc, ngf, img_f, layers, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Visual_Encoder, self).__init__()
self.layers = layers
self.first_inp = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
self.first_ref = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
for i in range(layers):
in_channels = min(ngf*(2**i), img_f)
out_channels = min(ngf*(2**(i+1)), img_f)
model_ref = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
model_inp = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
if i < 2:
ca_layer = RETURNX()
else:
ca_layer = Transformer(2**(i+1) * ngf,2,4,ngf,ngf*4)
setattr(self, 'ca' + str(i), ca_layer)
setattr(self, 'ref_down' + str(i), model_ref)
setattr(self, 'inp_down' + str(i), model_inp)
self.output_nc = out_channels * 2
def forward(self, maskGT, ref):
x_maskGT, x_ref = self.first_inp(maskGT), self.first_ref(ref)
out=[x_maskGT]
for i in range(self.layers):
model_ref = getattr(self, 'ref_down'+str(i))
model_inp = getattr(self, 'inp_down'+str(i))
ca_layer = getattr(self, 'ca'+str(i))
x_maskGT, x_ref = model_inp(x_maskGT), model_ref(x_ref)
x_maskGT = ca_layer(x_maskGT, x_ref)
if i < self.layers - 1:
out.append(x_maskGT)
else:
out.append(torch.cat([x_maskGT, x_ref], dim=1)) # concat ref features !
return out
class Decoder(nn.Module):
def __init__(self, image_nc, feature_nc, ngf, img_f, layers, num_block, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Decoder, self).__init__()
self.layers = layers
for i in range(layers)[::-1]:
if i == layers-1:
in_channels = ngf*(2**(i+1)) * 2
else:
in_channels = min(ngf*(2**(i+1)), img_f)
out_channels = min(ngf*(2**i), img_f)
up = UpBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
res = FFCADAINResBlocks(num_block, in_channels, feature_nc, norm_layer, nonlinearity, use_spect) | jump = Jump(out_channels, norm_layer, nonlinearity, use_spect) | 8 | 2023-11-02 18:25:51+00:00 | 4k |
fortelex/hiveline | hiveline/results/journeys.py | [
{
"identifier": "fptf",
"path": "hiveline/models/fptf.py",
"snippet": "def _remove_empty_keys(d):\ndef read_datetime(time_str):\ndef format_datetime(dt):\n def __init__(self, name=None, address=None, longitude=None, latitude=None, altitude=None):\n def to_dict(self):\n def to_json(self):\n def from_dict(json_str):\ndef location_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, location: Location = None, regions: list = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef station_from_json(data: dict | str | None):\n def __init__(self, id: str, station: Station, name: str, location: Location = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef stop_from_json(data: dict | str | None):\ndef place_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, stations: list[Station] = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef region_from_json(data: dict | str | None):\n def __init__(self, mode: str):\n def __str__(self):\n def __repr__(self):\n def to_string(self):\n def to_json(self):\n def from_string(mode):\n def __init__(self, id: str, name: str):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef operator_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, mode: Mode, routes: list, operator: Operator = None, sub_mode: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef line_from_json(data: dict | str | None):\n def __init__(self, id: str, line: Line, mode: Mode, stops: list[Station | Stop | Location], sub_mode: str = None):\n def to_dict(self):\n def to_json(self):\ndef route_from_json(data: dict | str | None):\n def __init__(self, arrival: int = None, departure: int = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def __init__(self, id: str, route: Route, mode: Mode, sequence: list[ScheduleSequenceElement], starts,\n sub_mode=None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef schedule_from_json(data: dict | str | None):\n def __init__(self, stop: Stop | Station | Location, arrival: datetime.datetime = None, arrival_delay: int = None,\n arrival_platform: str = None,\n departure: datetime.datetime = None, departure_delay: int = None, departure_platform: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef stopover_from_json(data: dict | str | None):\ndef get_location(place: Location | Station | Stop | Stopover) -> Location | None:\n def __init__(self, amount: float, currency: str):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef price_from_json(data: dict | str | None):\n def __init__(self, origin: Stop | Station | Location, destination: Stop | Station | Location,\n departure: datetime.datetime, arrival: datetime.datetime, mode: Mode, sub_mode: str = None,\n departure_delay: int = None,\n departure_platform: str = None,\n arrival_delay: int = None, arrival_platform: str = None, line: Line = None, direction: str = None,\n stopovers: list[Stopover] = None, schedule: Schedule = None, public: bool = True,\n operator: Operator = None,\n price: Price = None, polyline: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def get_departure(self, realtime=True):\n def get_arrival(self, realtime=True):\n def duration(self, realtime=True):\ndef leg_from_json(data: dict | str | None):\n def __init__(self, id: str, legs: list[Leg], price: Price = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def get_departure(self, realtime=True):\n def get_arrival(self, realtime=True):\n def duration(self, realtime=True):\n def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, Mode, bool]]:\ndef journey_from_json(data: dict | str | None):\ndef from_json(data: dict | str | None):\nclass Location:\nclass Station:\nclass Stop:\nclass Region:\nclass Mode(Enum):\nclass Operator:\nclass Line:\nclass Route:\nclass ScheduleSequenceElement:\nclass Schedule:\nclass Stopover:\nclass Price:\nclass Leg:\nclass Journey:\n TRAIN = 'train'\n BUS = 'bus'\n WATERCRAFT = 'watercraft'\n TAXI = 'taxi'\n GONDOLA = 'gondola'\n AIRCRAFT = 'aircraft'\n CAR = 'car'\n BICYCLE = 'bicycle'\n WALKING = 'walking'\n UNKNOWN = ''"
},
{
"identifier": "Options",
"path": "hiveline/models/options.py",
"snippet": "class Options:\n def __init__(self, result):\n self.vc_id = result[\"vc-id\"]\n self.sim_id = result[\"sim-id\"]\n self.created = fptf.read_datetime(result[\"created\"])\n self.meta = result[\"meta\"]\n self.traveller = Traveller(result[\"traveller\"])\n self.options = [Option.from_dict(o) for o in result[\"options\"]]\n\n def get_option(self, option_id: str):\n for o in self.options:\n if o.id == option_id:\n return o\n return None\n\n def to_dict(self):\n return {\n \"vc-id\": self.vc_id,\n \"sim-id\": self.sim_id,\n \"created\": fptf.format_datetime(self.created),\n \"meta\": self.meta,\n \"traveller\": self.traveller.to_dict(),\n \"options\": [o.to_dict() for o in self.options]\n }"
},
{
"identifier": "Option",
"path": "hiveline/models/options.py",
"snippet": "class Option:\n def __init__(self, id: str, origin: fptf.Location, destination: fptf.Location, departure: datetime.datetime,\n modes: list[fptf.Mode], journey: fptf.Journey,\n trace: list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]] | None = None):\n self.id = id\n self.origin = origin\n self.destination = destination\n self.departure = departure\n self.modes = modes\n self.journey = journey\n self.trace = trace\n\n def to_dict(self):\n return {\n \"route-option-id\": self.id,\n \"origin\": [self.origin.longitude, self.origin.latitude],\n \"destination\": [self.destination.longitude, self.destination.latitude],\n \"departure\": fptf.format_datetime(self.departure),\n \"modes\": [m.to_string() for m in self.modes],\n \"journey\": self.journey.to_dict()\n }\n\n @staticmethod\n def from_dict(result):\n id = result[\"route-option-id\"]\n origin = fptf.Location(longitude=result[\"origin\"][0], latitude=result[\"origin\"][1])\n destination = fptf.Location(longitude=result[\"destination\"][0], latitude=result[\"destination\"][1])\n departure = fptf.read_datetime(result[\"departure\"])\n modes = [fptf.Mode.from_string(m) for m in result[\"modes\"]]\n journey = fptf.journey_from_json(result[\"journey\"])\n trace = None\n return Option(id, origin, destination, departure, modes, journey, trace)\n\n def has_car(self):\n \"\"\"\n Check if a route option has a car leg\n :return: True if the route option has a car leg, False otherwise\n \"\"\"\n\n for leg in self.journey.legs:\n mode = leg.mode\n\n if mode == fptf.Mode.CAR:\n return True\n\n return False\n\n def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]]:\n if self.trace is None:\n self.trace = self.journey.get_trace()\n return self.trace"
},
{
"identifier": "get_database",
"path": "hiveline/mongo/db.py",
"snippet": "def get_database():\n dotenv.load_dotenv()\n\n user = os.getenv(\"UP_MONGO_USER\")\n password = os.getenv(\"UP_MONGO_PASSWORD\")\n domain = os.getenv(\"UP_MONGO_DOMAIN\")\n database = os.getenv(\"UP_MONGO_DATABASE\")\n\n connection_string = \"mongodb://%s:%s@%s/%s?authSource=admin\" % (user, password, domain, database)\n\n client = MongoClient(connection_string)\n\n return client[database]"
},
{
"identifier": "ensure_directory",
"path": "hiveline/routing/util.py",
"snippet": "def ensure_directory(path):\n \"\"\"\n Ensures that the given directory exists. If it does not exist, it will be created.\n :param path: The path to the directory\n :return:\n \"\"\"\n if not os.path.isdir(path):\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)"
}
] | import datetime
import json
import math
import os.path
from typing import Callable, Generator
from shapely import Polygon, Point
from hiveline.models import fptf
from hiveline.models.options import Options, Option
from hiveline.mongo.db import get_database
from hiveline.routing.util import ensure_directory | 2,452 |
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Journeys:
def __init__(self, sim_id: str, db=None, use_cache=True, cache="./cache"):
if db is None:
db = get_database()
self.db = db
self.sim_id = sim_id
if cache.endswith("/"):
cache = cache[:-1]
self.use_cache = use_cache
self.cache = cache + "/hiveline-journeys"
|
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Journeys:
def __init__(self, sim_id: str, db=None, use_cache=True, cache="./cache"):
if db is None:
db = get_database()
self.db = db
self.sim_id = sim_id
if cache.endswith("/"):
cache = cache[:-1]
self.use_cache = use_cache
self.cache = cache + "/hiveline-journeys" | ensure_directory(self.cache) | 4 | 2023-11-07 15:34:04+00:00 | 4k |
uhppoted/uhppoted-app-home-assistant | custom_components/uhppoted/event.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/uhppoted/const.py",
"snippet": "DOMAIN = 'uhppoted'"
},
{
"identifier": "CONF_BIND_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BIND_ADDR = 'bind_address'"
},
{
"identifier": "CONF_BROADCAST_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BROADCAST_ADDR = 'broadcast_address'"
},
{
"identifier": "CONF_LISTEN_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_LISTEN_ADDR = 'listen_address'"
},
{
"identifier": "CONF_DEBUG",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DEBUG = 'debug'"
},
{
"identifier": "ATTR_ADDRESS",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_ADDRESS = 'address'"
},
{
"identifier": "ATTR_NETMASK",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_NETMASK = 'netmask'"
},
{
"identifier": "ATTR_GATEWAY",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_GATEWAY = 'gateway'"
},
{
"identifier": "ATTR_FIRMWARE",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_FIRMWARE = 'firmware'"
},
{
"identifier": "configure_controllers",
"path": "custom_components/uhppoted/config.py",
"snippet": "def configure_controllers(options, f):\n if CONF_CONTROLLERS in options:\n controllers = options[CONF_CONTROLLERS]\n\n for c in controllers:\n controller = f'{c[CONF_CONTROLLER_ID]}'.strip()\n serial_no = f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip()\n address = f'{c[CONF_CONTROLLER_ADDR]}'.strip()\n\n f(controller, serial_no, address)"
},
{
"identifier": "configure_doors",
"path": "custom_components/uhppoted/config.py",
"snippet": "def configure_doors(options, g):\n if CONF_CONTROLLERS in options and CONF_DOORS in options:\n controllers = options[CONF_CONTROLLERS]\n doors = options[CONF_DOORS]\n\n for c in controllers:\n controller = f'{c[CONF_CONTROLLER_ID]}'.strip()\n serial_no = f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip()\n address = f'{c[CONF_CONTROLLER_ADDR]}'.strip()\n\n for d in doors:\n door = f'{d[CONF_DOOR_ID]}'.strip()\n door_no = f'{d[CONF_DOOR_NUMBER]}'.strip()\n door_controller = f'{d[CONF_DOOR_CONTROLLER]}'.strip()\n\n if door_controller == controller:\n g(controller, serial_no, door, door_no)"
},
{
"identifier": "ControllerInfo",
"path": "custom_components/uhppoted/controller.py",
"snippet": "class ControllerInfo(SensorEntity):\n _attr_icon = 'mdi:identifier'\n _attr_has_entity_name: True\n _attr_translation_key = 'controller_id'\n\n def __init__(self, u, controller, serial_no):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller} {serial_no}')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self._name = f'uhppoted.controller.{controller}.info'.lower()\n self._state = None\n self._attributes: Dict[str, Any] = {\n ATTR_ADDRESS: '',\n ATTR_NETMASK: '',\n ATTR_GATEWAY: '',\n ATTR_FIRMWARE: '',\n }\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.controller.{self.controller}.info'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._state != None:\n return f'{self._state}'\n\n return None\n\n @property\n def extra_state_attributes(self) -> Dict[str, Any]:\n return self._attributes\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update info')\n try:\n response = self.uhppote.get_controller(self.serial_no)\n\n if response.controller == self.serial_no:\n self._state = response.controller\n self._available = True\n self._attributes[ATTR_ADDRESS] = f'{response.ip_address}'\n self._attributes[ATTR_NETMASK] = f'{response.subnet_mask}'\n self._attributes[ATTR_GATEWAY] = f'{response.gateway}'\n self._attributes[ATTR_FIRMWARE] = f'{response.version} {response.date:%Y-%m-%d}'\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} information')"
},
{
"identifier": "ControllerDoorOpened",
"path": "custom_components/uhppoted/door.py",
"snippet": "class ControllerDoorOpened(EventEntity):\n _attr_icon = 'mdi:door'\n _attr_has_entity_name: True\n _attr_event_types = ['OPEN', 'CLOSE']\n\n def __init__(self, u, controller, serial_no, door, door_id):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller}: door:{door} open event')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self.door = door\n self.door_id = int(f'{door_id}')\n\n self._name = f'uhppoted.door.{door}.open.event'.lower()\n self._open = None\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.door.{self.door}.open.event'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update door {self.door}.open.event state')\n try:\n response = self.uhppote.get_status(self.serial_no)\n last = self._open\n\n if response.controller == self.serial_no:\n if self.door_id == 1:\n self._open = response.door_1_open == True\n elif self.door_id == 2:\n self._open = response.door_2_open == True\n elif self.door_id == 3:\n self._open = response.door_3_open == True\n elif self.door_id == 4:\n self._open = response.door_4_open == True\n else:\n self._open = None\n\n if self._open != last and self._open:\n self._trigger_event('OPEN')\n elif self._open != last and not self._open:\n self._trigger_event('CLOSE')\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} status')"
}
] | import datetime
import logging
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from uhppoted import uhppote
from .const import DOMAIN
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import ATTR_ADDRESS
from .const import ATTR_NETMASK
from .const import ATTR_GATEWAY
from .const import ATTR_FIRMWARE
from .config import configure_controllers
from .config import configure_doors
from .controller import ControllerInfo
from .door import ControllerDoorOpened | 1,818 | from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
options = entry.options
bind = options[CONF_BIND_ADDR]
broadcast = options[CONF_BROADCAST_ADDR]
listen = options[CONF_LISTEN_ADDR]
| from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
options = entry.options
bind = options[CONF_BIND_ADDR]
broadcast = options[CONF_BROADCAST_ADDR]
listen = options[CONF_LISTEN_ADDR] | debug = options[CONF_DEBUG] | 4 | 2023-11-06 18:46:49+00:00 | 4k |
shixiaoyu0216/SAC4IR | train.py | [
{
"identifier": "env_sac",
"path": "sacd/env_sac.py",
"snippet": "class Env():\r\n def __init__(self, observation_data, I, max_item_id, each_user, K, item_ctr_dict, pop_dict):\r\n def reset(self, observation):\r\n def step(self, action, pass_item_list):\r"
},
{
"identifier": "SacdAgent",
"path": "sacd/agent/sacd.py",
"snippet": "class SacdAgent(BaseAgent):\n def __init__(self, env, log_dir, num_steps=100000, batch_size=64,\n lr=0.0003, memory_size=1000000, gamma=0.99, multi_step=1,\n target_entropy_ratio=0.98, start_steps=20000,\n update_interval=4, target_update_interval=8000,\n use_per=False, dueling_net=False, num_eval_steps=125000,\n max_episode_steps=27000, log_interval=10, eval_interval=1000,\n cuda=False, state_re=True, seed=0, K=10):\n super().__init__(\n env, log_dir, num_steps, batch_size, memory_size, gamma,\n multi_step, target_entropy_ratio, start_steps, update_interval,\n target_update_interval, use_per, num_eval_steps, max_episode_steps,\n log_interval, eval_interval, cuda, state_re, seed, K)\n\n self.actor = PolicyNetwork(self.env.n_observation, self.env.n_actions, self.env, self.batch_size,\n state_representation=state_re).to(self.device)\n self.main_critic = TwinnedQNetwork(self.env.n_observation, self.env.n_actions, self.batch_size,\n state_representation=state_re, dueling_net=dueling_net).to(\n device=self.device)\n self.target_critic = TwinnedQNetwork(self.env.n_observation, self.env.n_actions, self.batch_size,\n state_representation=state_re, dueling_net=dueling_net).to(\n device=self.device).eval()\n self.target_critic.load_state_dict(self.main_critic.state_dict())\n disable_gradients(self.target_critic)\n self.policy_optim = Adam(self.actor.parameters(), lr=lr)\n self.q1_optim = Adam(self.main_critic.Q1.parameters(), lr=lr)\n self.q2_optim = Adam(self.main_critic.Q2.parameters(), lr=lr)\n self.avg_prob = 1.0 / self.env.n_actions\n self.maximum_entropy = -np.log(self.avg_prob) * target_entropy_ratio\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha = self.log_alpha.exp()\n self.alpha_optim = Adam([self.log_alpha], lr=lr)\n\n # 探索 随机动作\n def explore(self, state):\n state = np.array([int(i) for i in state[0].split('|')])\n state = torch.from_numpy(state).to(self.device)\n with torch.no_grad():\n action, _, _ = self.actor.sample(state)\n return action.item()\n\n def exploit(self, state, user_id):\n s = self.env.reset(np.array([int(i) for i in state.split('|')]))\n s = torch.from_numpy(s).to(self.device)\n with torch.no_grad():\n action = self.actor.act(s, user_id)\n return action\n\n def update_target_critic(self):\n self.target_critic.load_state_dict(self.main_critic.state_dict())\n\n def calc_current_q(self, states, actions, rewards, next_states, dones):\n curr_q1, curr_q2 = self.main_critic(states)\n curr_q1 = curr_q1.gather(1, actions.long() - 1)\n curr_q2 = curr_q2.gather(1, actions.long() - 1)\n return curr_q1, curr_q2\n\n def calc_target_q(self, states, actions, rewards, next_states, dones):\n with torch.no_grad():\n _, action_probs, log_action_probs = self.actor.sample(next_states)\n next_q1, next_q2 = self.target_critic(next_states)\n next_q = torch.min(next_q1, next_q2)\n next_v = (action_probs * (next_q - self.alpha * log_action_probs)).sum(dim=1, keepdim=True)\n return rewards + self.gamma_n * next_v\n\n def calc_critic_loss(self, batch):\n target_q = self.calc_target_q(*batch)\n curr_q1, curr_q2 = self.calc_current_q(*batch)\n errors = torch.abs(curr_q1.detach() - target_q)\n\n mean_q1 = curr_q1.detach().mean().item()\n mean_q2 = curr_q2.detach().mean().item()\n\n q1_loss = torch.mean((curr_q1 - target_q).pow(2))\n q2_loss = torch.mean((curr_q2 - target_q).pow(2))\n\n update_params(self.q1_optim, q1_loss)\n update_params(self.q2_optim, q2_loss)\n\n return q1_loss, q2_loss, errors, mean_q1, mean_q2\n\n def calc_policy_loss(self, batch):\n states, actions, rewards, next_states, dones = batch\n _, action_probs, log_action_probs = self.actor.sample(states)\n with torch.no_grad():\n q1, q2 = self.main_critic(states)\n q = torch.min(q1, q2)\n\n entropies = -torch.sum(action_probs * log_action_probs, dim=1, keepdim=True)\n q = torch.sum(action_probs * q, dim=1, keepdim=True)\n policy_loss = -(self.alpha * entropies + q).mean()\n update_params(self.policy_optim, policy_loss)\n\n return policy_loss, entropies.detach()\n\n def calc_entropy_loss(self, entropies):\n assert not entropies.requires_grad\n entropy_loss = torch.mean(self.log_alpha * (self.maximum_entropy - entropies))\n update_params(self.alpha_optim, entropy_loss)\n self.alpha = self.log_alpha.exp()\n return self.alpha\n\n def save_models(self, save_dir):\n super().save_models(save_dir)\n self.actor.save(os.path.join(save_dir, 'policy.pth'))\n self.main_critic.save(os.path.join(save_dir, 'online_critic.pth'))\n self.target_critic.save(os.path.join(save_dir, 'target_critic.pth'))"
},
{
"identifier": "rowdata_process_util",
"path": "util/rowdata_process_util.py",
"snippet": "class Dataset():\r\n def __init__(self, data_path, split=\"::\"):\r\n def loadData(self, dataset_path, split=\"::\"):\r\n def splitData(self):\r\n def getAllItem(self):\r\n def getPopular(self, data_dict_train):\r"
},
{
"identifier": "json_util",
"path": "util/json_util.py",
"snippet": "class JsonEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\ndef save_dict(filename, dic):\r\ndef load_dict(filename):\r"
},
{
"identifier": "Gini",
"path": "util/metric/Gini.py",
"snippet": "def Gini(p_dic):\r\n p_list = list(p_dic.values())\r\n cum = np.cumsum(sorted(np.append(p_list, 0)))\r\n sum = cum[-1]\r\n x = np.array(range(len(cum))) / len(p_list)\r\n y = cum / sum\r\n B = np.trapz(y, x=x)\r\n A = 0.5 - B\r\n G = A / (A + B)\r\n return G\r"
},
{
"identifier": "HR",
"path": "util/metric/HR.py",
"snippet": "def HR(test_set, rec_list):\r\n if list(set(test_set) & set(rec_list)):\r\n return 1\r\n else:\r\n return 0\r"
},
{
"identifier": "ndcg_metric",
"path": "util/metric/NDCG.py",
"snippet": "def ndcg_metric(topN_dict, test_dict):\r\n ndcg = 0\r\n for key, topn_set in topN_dict.items():\r\n test_set = test_dict.get(key)\r\n dsct_list = [1 / np.log2(i + 1) for i in range(1, len(topn_set) + 1)]\r\n z_k = sum(dsct_list)\r\n if test_set is not None:\r\n mask = [0 if i not in test_set else 1 for i in topn_set]\r\n ndcg += sum(np.multiply(dsct_list, mask)) / z_k\r\n ndcg = ndcg / len(topN_dict.items())\r\n return ndcg\r"
}
] | import copy
import csv
import os
import yaml
from datetime import datetime
from sacd import env_sac
from sacd.agent.sacd import SacdAgent
from util import rowdata_process_util, json_util
from util.metric.Gini import Gini
from util.metric.HR import HR
from util.metric.NDCG import ndcg_metric | 2,818 |
def getHistory(each_user, dataset_name):
history_list = []
try:
with open('./dataset/' + dataset_name + '/transition/' + str(each_user) + '_transition.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
history_list.append(row)
except:
return history_list
return history_list
def run_sac():
path = "./config/sacd.yaml"
env_id = "Recommender"
with open(path, encoding='utf-8') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
name = path.split('/')[-1].rstrip('.yaml')
cur_time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join('logs', env_id, f'{name}-seed{0}-{cur_time}')
dataset_name = "ml-1m"
obs_dataset = rowdata_process_util.Dataset('./dataset/' + dataset_name + '/ratings.dat')
train_dict = obs_dataset.splitData()
item_id_list, item_num, max_item_id = obs_dataset.getAllItem()
item_ctr_dict = json_util.load_dict('./dataset/' + dataset_name + '/click_through_rate.json')
pop_dict = obs_dataset.getPopular(train_dict)
K = config['K']
user_num = 0
precision_test = 0
hr_test = 0
ndcg_test = 0
pop_gini = copy.deepcopy(pop_dict)
for i in pop_gini.keys():
pop_gini[i] = 0
for each_user in train_dict:
user_history = getHistory(each_user, dataset_name)
history_list_train = user_history[:int(0.8 * len(user_history))]
history_list_test_set = user_history[int(0.8 * len(user_history)):]
history_list_test = history_list_test_set[0:1]
if len(history_list_train) <= 10 * 1.2:
continue
user_num += 1
pass_item_list = []
for user_id, item_id, ratings, __ in obs_dataset.data:
if user_id == each_user and ratings == 0:
pass_item_list.append(item_id)
test_set = list(set([int(i[1]) for i in history_list_test_set]) - set(pass_item_list))
observation_data = train_dict[each_user]
env = env_sac.Env(observation_data[-K:], list(set(item_id_list)), max_item_id, each_user, K, item_ctr_dict,
pop_dict)
agent = SacdAgent(env=env, log_dir=log_dir, cuda=False, state_re=True, dueling_net=False, **config)
agent.run_offpolicy(history_list_train)
state = history_list_test[0][0]
actions_list = agent.exploit(state, each_user)
precision_test += (len(set(actions_list) & set(test_set))) / (len(actions_list))
hr_test += HR(test_set, actions_list)
ndcg_test += ndcg_metric({each_user: actions_list}, {each_user: test_set})
for i in actions_list:
if i in pop_gini.keys():
pop_gini[i] += 1
if user_num != 0:
print("Precision: " + str(precision_test) + " / " + str(user_num) + " = " + str(precision_test / user_num))
print("HR: " + str(hr_test) + " / " + str(user_num) + " = " + str(hr_test / user_num))
print("NDCG: " + str(ndcg_test) + " / " + str(user_num) + " = " + str(ndcg_test / user_num))
for k in pop_gini.copy():
if pop_gini[k] == 0:
del pop_gini[k]
|
def getHistory(each_user, dataset_name):
history_list = []
try:
with open('./dataset/' + dataset_name + '/transition/' + str(each_user) + '_transition.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
history_list.append(row)
except:
return history_list
return history_list
def run_sac():
path = "./config/sacd.yaml"
env_id = "Recommender"
with open(path, encoding='utf-8') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
name = path.split('/')[-1].rstrip('.yaml')
cur_time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join('logs', env_id, f'{name}-seed{0}-{cur_time}')
dataset_name = "ml-1m"
obs_dataset = rowdata_process_util.Dataset('./dataset/' + dataset_name + '/ratings.dat')
train_dict = obs_dataset.splitData()
item_id_list, item_num, max_item_id = obs_dataset.getAllItem()
item_ctr_dict = json_util.load_dict('./dataset/' + dataset_name + '/click_through_rate.json')
pop_dict = obs_dataset.getPopular(train_dict)
K = config['K']
user_num = 0
precision_test = 0
hr_test = 0
ndcg_test = 0
pop_gini = copy.deepcopy(pop_dict)
for i in pop_gini.keys():
pop_gini[i] = 0
for each_user in train_dict:
user_history = getHistory(each_user, dataset_name)
history_list_train = user_history[:int(0.8 * len(user_history))]
history_list_test_set = user_history[int(0.8 * len(user_history)):]
history_list_test = history_list_test_set[0:1]
if len(history_list_train) <= 10 * 1.2:
continue
user_num += 1
pass_item_list = []
for user_id, item_id, ratings, __ in obs_dataset.data:
if user_id == each_user and ratings == 0:
pass_item_list.append(item_id)
test_set = list(set([int(i[1]) for i in history_list_test_set]) - set(pass_item_list))
observation_data = train_dict[each_user]
env = env_sac.Env(observation_data[-K:], list(set(item_id_list)), max_item_id, each_user, K, item_ctr_dict,
pop_dict)
agent = SacdAgent(env=env, log_dir=log_dir, cuda=False, state_re=True, dueling_net=False, **config)
agent.run_offpolicy(history_list_train)
state = history_list_test[0][0]
actions_list = agent.exploit(state, each_user)
precision_test += (len(set(actions_list) & set(test_set))) / (len(actions_list))
hr_test += HR(test_set, actions_list)
ndcg_test += ndcg_metric({each_user: actions_list}, {each_user: test_set})
for i in actions_list:
if i in pop_gini.keys():
pop_gini[i] += 1
if user_num != 0:
print("Precision: " + str(precision_test) + " / " + str(user_num) + " = " + str(precision_test / user_num))
print("HR: " + str(hr_test) + " / " + str(user_num) + " = " + str(hr_test / user_num))
print("NDCG: " + str(ndcg_test) + " / " + str(user_num) + " = " + str(ndcg_test / user_num))
for k in pop_gini.copy():
if pop_gini[k] == 0:
del pop_gini[k] | print("Gini: " + str(Gini(pop_gini))) | 4 | 2023-11-02 07:35:57+00:00 | 4k |
fw-ai/fireworks_poe_bot | fireworks_poe_bot/fw_poe_qr_bot.py | [
{
"identifier": "PoeBot",
"path": "fireworks_poe_bot/fastapi_poe/base.py",
"snippet": "class PoeBot:\n # Override these for your bot\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n \"\"\"Override this to return a response to user queries.\"\"\"\n yield self.text_event(\"hello\")\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n \"\"\"Override this to return non-standard settings.\"\"\"\n return SettingsResponse()\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n logger.error(f\"Error from Poe server: {error_request}\")\n\n # Helpers for generating responses\n @staticmethod\n def text_event(text: str) -> ServerSentEvent:\n return ServerSentEvent(data=json.dumps({\"text\": text}), event=\"text\")\n\n @staticmethod\n def replace_response_event(text: str) -> ServerSentEvent:\n return ServerSentEvent(\n data=json.dumps({\"text\": text}), event=\"replace_response\"\n )\n\n @staticmethod\n def done_event() -> ServerSentEvent:\n return ServerSentEvent(data=\"{}\", event=\"done\")\n\n @staticmethod\n def suggested_reply_event(text: str) -> ServerSentEvent:\n return ServerSentEvent(data=json.dumps({\"text\": text}), event=\"suggested_reply\")\n\n @staticmethod\n def meta_event(\n *,\n content_type: ContentType = \"text/markdown\",\n refetch_settings: bool = False,\n linkify: bool = True,\n suggested_replies: bool = True,\n ) -> ServerSentEvent:\n return ServerSentEvent(\n data=json.dumps(\n {\n \"content_type\": content_type,\n \"refetch_settings\": refetch_settings,\n \"linkify\": linkify,\n \"suggested_replies\": suggested_replies,\n }\n ),\n event=\"meta\",\n )\n\n @staticmethod\n def error_event(\n text: Optional[str] = None, *, allow_retry: bool = True\n ) -> ServerSentEvent:\n data: Dict[str, Union[bool, str]] = {\"allow_retry\": allow_retry}\n if text is not None:\n data[\"text\"] = text\n return ServerSentEvent(data=json.dumps(data), event=\"error\")\n\n # Internal handlers\n\n async def handle_report_feedback(\n self, feedback_request: ReportFeedbackRequest\n ) -> JSONResponse:\n await self.on_feedback(feedback_request)\n return JSONResponse({})\n\n async def handle_report_error(\n self, error_request: ReportErrorRequest\n ) -> JSONResponse:\n await self.on_error(error_request)\n return JSONResponse({})\n\n async def handle_settings(self, settings_request: SettingsRequest) -> JSONResponse:\n settings = await self.get_settings(settings_request)\n return JSONResponse(settings.dict())\n\n async def handle_query(self, query: QueryRequest) -> AsyncIterable[ServerSentEvent]:\n try:\n async for event in self.get_response(query):\n if isinstance(event, ServerSentEvent):\n yield event\n elif isinstance(event, ErrorResponse):\n yield self.error_event(event.text, allow_retry=event.allow_retry)\n elif isinstance(event, MetaResponse):\n yield self.meta_event(\n content_type=event.content_type,\n refetch_settings=event.refetch_settings,\n linkify=event.linkify,\n suggested_replies=event.suggested_replies,\n )\n elif event.is_suggested_reply:\n yield self.suggested_reply_event(event.text)\n elif event.is_replace_response:\n yield self.replace_response_event(event.text)\n else:\n yield self.text_event(event.text)\n except Exception as e:\n logger.exception(\"Error responding to query\")\n yield self.error_event(repr(e), allow_retry=False)\n yield self.done_event()"
},
{
"identifier": "PartialResponse",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class PartialResponse(BaseModel):\n \"\"\"Representation of a (possibly partial) response from a bot.\"\"\"\n\n text: str\n \"\"\"Partial response text.\n\n If the final bot response is \"ABC\", you may see a sequence\n of PartialResponse objects like PartialResponse(text=\"A\"),\n PartialResponse(text=\"B\"), PartialResponse(text=\"C\").\n\n \"\"\"\n\n raw_response: object = None\n \"\"\"For debugging, the raw response from the bot.\"\"\"\n\n full_prompt: Optional[str] = None\n \"\"\"For debugging, contains the full prompt as sent to the bot.\"\"\"\n\n request_id: Optional[str] = None\n \"\"\"May be set to an internal identifier for the request.\"\"\"\n\n is_suggested_reply: bool = False\n \"\"\"If true, this is a suggested reply.\"\"\"\n\n is_replace_response: bool = False\n \"\"\"If true, this text should completely replace the previous bot text.\"\"\""
},
{
"identifier": "QueryRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class QueryRequest(BaseRequest):\n \"\"\"Request parameters for a query request.\"\"\"\n\n query: List[ProtocolMessage]\n user_id: Identifier\n conversation_id: Identifier\n message_id: Identifier\n metadata: Identifier = \"\"\n api_key: str = \"<missing>\"\n access_key: str = \"<missing>\"\n temperature: float = 0.7\n skip_system_prompt: bool = False\n logit_bias: Dict[str, float] = {}\n stop_sequences: List[str] = []"
},
{
"identifier": "ReportErrorRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class ReportErrorRequest(BaseRequest):\n \"\"\"Request parameters for a report_error request.\"\"\"\n\n message: str\n metadata: Dict[str, Any]"
},
{
"identifier": "ReportFeedbackRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class ReportFeedbackRequest(BaseRequest):\n \"\"\"Request parameters for a report_feedback request.\"\"\"\n\n message_id: Identifier\n user_id: Identifier\n conversation_id: Identifier\n feedback_type: FeedbackType"
},
{
"identifier": "SettingsRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class SettingsRequest(BaseRequest):\n \"\"\"Request parameters for a settings request.\"\"\""
},
{
"identifier": "SettingsResponse",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class SettingsResponse(BaseModel):\n context_clear_window_secs: Optional[int] = None # deprecated\n allow_user_context_clear: bool = True # deprecated\n server_bot_dependencies: Dict[str, int] = Field(default_factory=dict)\n allow_attachments: bool = False\n introduction_message: str = \"\""
},
{
"identifier": "ErrorResponse",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class ErrorResponse(PartialResponse):\n \"\"\"Communicate errors from server bots.\"\"\"\n\n allow_retry: bool = False"
},
{
"identifier": "log_error",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_error(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "log_info",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_info(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "log_warn",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_warn(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "register_bot_plugin",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "def register_bot_plugin(config_key: str, BotConfigClass: type = ModelConfig):\n def decorator(BotPluginClass: type):\n BOT_PLUGINS.append(_BotPlugin(\n BotPluginClass=BotPluginClass,\n BotConfigClass=BotConfigClass,\n config_key=config_key,\n ))\n\n return decorator"
},
{
"identifier": "ModelConfig",
"path": "fireworks_poe_bot/config.py",
"snippet": "class ModelConfig(BaseModel):\n model: str\n api_key: str\n\n SERVER_endpoint_account_override: Optional[str] = None\n SERVER_endpoint_model_override: Optional[str] = None\n\n @property\n def model_fqn(self):\n if (\n self.SERVER_endpoint_account_override is not None\n or self.SERVER_endpoint_model_override is not None\n ):\n _, account, _, model = self.model.split(\"/\")\n account = self.SERVER_endpoint_account_override or account\n model = self.SERVER_endpoint_model_override or model\n return f\"accounts/{account}/models/{model}\"\n else:\n return self.model"
}
] | import base64
import copy
import io
import fireworks.client
import time
import uuid
import requests
import qrcode
import traceback
from typing import AsyncIterable, Dict, List, Optional, Union
from .fastapi_poe import PoeBot
from sse_starlette.sse import ServerSentEvent
from .fastapi_poe.types import (
PartialResponse,
QueryRequest,
ReportErrorRequest,
ReportFeedbackRequest,
SettingsRequest,
SettingsResponse,
ErrorResponse,
)
from fireworks.client.api import ChatMessage
from fireworks.client.error import InvalidRequestError
from fireworks.client.image import ImageInference, Answer
from fireworks_poe_bot.plugin import log_error, log_info, log_warn, register_bot_plugin
from fireworks_poe_bot.config import ModelConfig
from itertools import groupby
from PIL import Image
from google.cloud import storage | 3,103 | def parse_input(input_string, default_qr_strength, default_prompt_strength):
# Parse initial prompt
prompt_end_index = input_string.find('--')
if prompt_end_index == -1:
prompt_end_index = len(input_string)
prompt = input_string[:prompt_end_index].strip() if prompt_end_index != -1 else input_string.strip()
input_string = input_string[prompt_end_index:].strip()
qr_prompt = None
qr_strength = default_qr_strength
prompt_strength = default_prompt_strength
model = "sdxl"
while len(input_string) > 0:
next_flag_idx = input_string.find('--', 2)
if next_flag_idx == -1:
next_flag_idx = len(input_string)
# Parse the flag and its arguments
if input_string.startswith('--qr-strength'):
qr_strength = float(input_string[len("--qr-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--qr'):
qr_prompt = input_string[len("--qr"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--prompt-strength'):
prompt_strength = int(input_string[len("--prompt-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--model'):
model = input_string[len("--model"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
else:
raise ValueError(f'Unknown flag: {input_string[:next_flag_idx]}')
if qr_prompt is None:
raise ValueError('Please specify a QR prompt with a --qr flag.')
return prompt, qr_prompt, qr_strength, prompt_strength, model
def gen_qr_code(input_text: str) -> Image:
# Generate QR Code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=16,
border=4,
)
qr.add_data(input_text)
qr.make(fit=True)
# Create QR Code image
img = qr.make_image(fill_color="black", back_color="white")
# Padding the image to be 768x768
width, height = img.size
new_width = new_height = 768
# Create a new image with white background
new_img = Image.new("RGB", (new_width, new_height), "white")
# Paste the original image onto the new image, centered
new_img.paste(img, ((new_width - width) // 2, (new_height - height) // 2))
return new_img
class QRCodeConfig(ModelConfig):
gcs_bucket_name: str
conditioning_scale: Optional[float] = None
default_cfg_scale: Optional[float] = None
@register_bot_plugin("qr_models", QRCodeConfig)
class FireworksPoeQRBot(PoeBot):
def __init__(
self,
model: str,
api_key: str,
environment: str,
deployment: str,
server_version: str,
gcs_bucket_name: str,
conditioning_scale: float,
default_cfg_scale: float,
):
super().__init__()
self.model = model
self.api_key = api_key
self.environment = environment
self.deployment = deployment
self.server_version = server_version
self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8
model_atoms = model.split("/")
if len(model_atoms) != 4:
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
if model_atoms[0] != "accounts" or model_atoms[2] != "models":
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
self.account = model_atoms[1]
self.model = model_atoms[3]
self.client = ImageInference(account=self.account, model=self.model)
self.gcs_bucket_name = gcs_bucket_name
self.conditioning_scale = conditioning_scale
def _log_warn(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "WARNING",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
)
|
def parse_input(input_string, default_qr_strength, default_prompt_strength):
# Parse initial prompt
prompt_end_index = input_string.find('--')
if prompt_end_index == -1:
prompt_end_index = len(input_string)
prompt = input_string[:prompt_end_index].strip() if prompt_end_index != -1 else input_string.strip()
input_string = input_string[prompt_end_index:].strip()
qr_prompt = None
qr_strength = default_qr_strength
prompt_strength = default_prompt_strength
model = "sdxl"
while len(input_string) > 0:
next_flag_idx = input_string.find('--', 2)
if next_flag_idx == -1:
next_flag_idx = len(input_string)
# Parse the flag and its arguments
if input_string.startswith('--qr-strength'):
qr_strength = float(input_string[len("--qr-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--qr'):
qr_prompt = input_string[len("--qr"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--prompt-strength'):
prompt_strength = int(input_string[len("--prompt-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--model'):
model = input_string[len("--model"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
else:
raise ValueError(f'Unknown flag: {input_string[:next_flag_idx]}')
if qr_prompt is None:
raise ValueError('Please specify a QR prompt with a --qr flag.')
return prompt, qr_prompt, qr_strength, prompt_strength, model
def gen_qr_code(input_text: str) -> Image:
# Generate QR Code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=16,
border=4,
)
qr.add_data(input_text)
qr.make(fit=True)
# Create QR Code image
img = qr.make_image(fill_color="black", back_color="white")
# Padding the image to be 768x768
width, height = img.size
new_width = new_height = 768
# Create a new image with white background
new_img = Image.new("RGB", (new_width, new_height), "white")
# Paste the original image onto the new image, centered
new_img.paste(img, ((new_width - width) // 2, (new_height - height) // 2))
return new_img
class QRCodeConfig(ModelConfig):
gcs_bucket_name: str
conditioning_scale: Optional[float] = None
default_cfg_scale: Optional[float] = None
@register_bot_plugin("qr_models", QRCodeConfig)
class FireworksPoeQRBot(PoeBot):
def __init__(
self,
model: str,
api_key: str,
environment: str,
deployment: str,
server_version: str,
gcs_bucket_name: str,
conditioning_scale: float,
default_cfg_scale: float,
):
super().__init__()
self.model = model
self.api_key = api_key
self.environment = environment
self.deployment = deployment
self.server_version = server_version
self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8
model_atoms = model.split("/")
if len(model_atoms) != 4:
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
if model_atoms[0] != "accounts" or model_atoms[2] != "models":
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
self.account = model_atoms[1]
self.model = model_atoms[3]
self.client = ImageInference(account=self.account, model=self.model)
self.gcs_bucket_name = gcs_bucket_name
self.conditioning_scale = conditioning_scale
def _log_warn(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "WARNING",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
) | log_warn(payload) | 10 | 2023-11-03 23:24:23+00:00 | 4k |
In-Network-Machine-Learning/QCMP | receive_queues.py | [
{
"identifier": "path_stats",
"path": "q_table.py",
"snippet": "class path_stats():\n def __init__(self, path_queues, path_weights=0):\n self.path_queues = path_queues\n self.path_weights = path_weights\n self.action = 2\n self.reward = 0\n\n def weighted_average(self):\n queue_difference = abs(self.path_queues[0]-self.path_queues[1])\n weight_avg_queue = sum([self.path_queues[i] * self.path_weights[i] for i in range(len(self.path_weights))]) / sum(self.path_weights)\n return (-queue_difference + 50) + weight_avg_queue\n\n def get_next_action(self, table, epsilon):\n if np.random.random() < epsilon:\n self.action = np.random.choice(np.arange(3))\n else:\n # print(q_table[:, math.ceil(paths.path_queues[0] / 10), math.ceil(paths.path_queues[1] / 10)])\n self.action = np.argmax(table.q_table[:, math.ceil(min(10, self.path_queues[0] / 10)), math.ceil(min(10, self.path_queues[1] / 10))])\n\n def get_new_weights(self, old_paths, action_weight):\n if self.action == 2:\n self.path_weights = old_paths.path_weights\n elif self.action == 0:\n weights = [old_paths.path_weights[0] + action_weight, old_paths.path_weights[1] - action_weight]\n weights = [max(0, min(num, 100)) for num in weights]\n self.path_weights = weights\n elif self.action == 1:\n weights = [old_paths.path_weights[0] - action_weight, old_paths.path_weights[1] + action_weight]\n weights = [max(0, min(num, 100)) for num in weights]\n self.path_weights = weights\n\n def get_reward(self, old_paths):\n # Calculate reward\n old_average = old_paths.weighted_average()\n new_average = self.weighted_average()\n if new_average < old_average - 0.5:\n self.reward = 1\n elif new_average > old_average + 0.5:\n self.reward = -1\n else:\n self.reward = 0\n # print(old_average, new_average, new_paths.reward)\n\n def change_path_weights(self, old_paths, p4info_helper, ingress_sw, nhop_dmacs, nhop_ipv4s, ports):\n if self.path_weights[0] > old_paths.path_weights[0]:\n for i in range(old_paths.path_weights[0], self.path_weights[0]):\n update_path_weights(p4info_helper, ingress_sw=ingress_sw, value=i,\n nhop_dmac=nhop_dmacs[0], nhop_ipv4=nhop_ipv4s[0], port=ports[0])\n elif self.path_weights[0] < old_paths.path_weights[0]:\n for i in range(self.path_weights[0], old_paths.path_weights[0]):\n update_path_weights(p4info_helper, ingress_sw=ingress_sw, value=i,\n nhop_dmac=nhop_dmacs[1], nhop_ipv4=nhop_ipv4s[1], port=ports[1])"
},
{
"identifier": "q_table",
"path": "q_table.py",
"snippet": "class q_table():\n def __init__(self):\n self.q_table = self.init_q_table()\n self.parameters = {'LEARNING_RATE': 0.2,\n 'DISCOUNT': 0.1,\n 'epsilon': 0.4,\n 'action_weight': 5,\n 'pkt_counter': 0}\n\n def init_q_table(self):\n actions = ('updown', 'downup', 'no_change')\n q_table = np.random.rand(len(actions), 11, 11) * 0.1 - 0.05\n q_table = np.round(q_table, decimals=3)\n return q_table\n\n def update_q_table(self, LEARNING_RATE, DISCOUNT, old_paths, new_paths):\n # Calculate new Q-value\n indices = [old_paths.action, math.ceil(min(10, old_paths.path_queues[0] / 10)), math.ceil(min(10, old_paths.path_queues[1] / 10))]\n max_future_q = np.argmax(self.q_table[:, indices[1], indices[2]])\n current_q = self.q_table[indices[0], indices[1], indices[2]]\n new_q = (1 - LEARNING_RATE)* current_q + LEARNING_RATE * (old_paths.reward + DISCOUNT * max_future_q)\n new_q = np.round(max(-1, min(new_q, 1)), decimals=3)\n self.q_table[indices[0], indices[1], indices[2]] = new_q\n # print(current_q, new_q)\n\n def update_parameters(self):\n self.parameters['pkt_counter'] += 1\n if self.parameters['pkt_counter'] % 80 == 0:\n if self.parameters['epsilon'] > 0.1:\n self.parameters['epsilon'] = np.round(self.parameters['epsilon'] - 0.1, decimals=1) # [0.4, 0.3, 0.2, 0.1]\n if self.parameters['LEARNING_RATE'] > 0.05:\n self.parameters['LEARNING_RATE'] *= 0.85\n if self.parameters['action_weight'] > 1:\n self.parameters['action_weight'] = math.ceil(self.parameters['action_weight'] * 0.5) # [5, 3, 2, 1]\n print(self.parameters)\n\n def reset_parameters(self, path, reset_params):\n reset_params.append(path.path_queues)\n if len(reset_params) == 10:\n if (all(lst[0] > 98 for lst in reset_params) and all(lst[1] < 2 for lst in reset_params)) or (all(lst[1] > 98 for lst in reset_params) and all(lst[0] < 2 for lst in reset_params)):\n self.parameters['LEARNING_RATE'] = 0.1\n self.parameters['epsilon'] = 0.2\n self.parameters['action_weight'] = 5\n self.parameters['pkt_counter'] = 0\n print('PARAMETERS HAVE BEEN RESET!!!')\n reset_params.clear()\n if len(reset_params) == 10:\n reset_params.pop(0)"
}
] | import os
import sys
import grpc
import math
import numpy as np
import p4runtime_lib.bmv2
import p4runtime_lib.helper
from scapy.all import *
from scapy.layers.inet import _IPOption_HDR
from q_table import (path_stats,
q_table) | 3,101 |
class IPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swtraces",
adjust=lambda pkt,l:l*2+4),
ShortField("count", 0),
PacketListField("swtraces",
[],
SwitchTrace,
count_from=lambda pkt:(pkt.count*1)) ]
def runthat(switch_q_table, switch, mri, path_dicts, counter, index1, index2, index3, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params):
# index1 : index for where switch queue data is stored in path_dicts (list of dicts)
# index2 : which switch trace contains the queue length
# index3 : swid for path defining switch
switch_q_table.update_parameters()
queue_length = mri.swtraces[index2].qdepth
# print(mri.swtraces[i].swid, mri.swtraces[i].qdepth)
if mri.swtraces[index3].swid == diff_switches[0]:
path_dicts[index1]['path1'] = int(queue_length/2)
counter[index1][0] += 1
elif mri.swtraces[index3].swid == diff_switches[1]:
path_dicts[index1]['path2'] = int(queue_length/2)
counter[index1][1] += 1
if 3 in counter[index1]:
zero_indices = [i for i, x in enumerate(counter[index1]) if x == 0]
for index in zero_indices:
path_dicts[index1]["path{0}".format(index + 1)] = 100 # max queue length
if len(path_dicts[index1]) == 2:
global old_paths
# print(path_dict)
new_paths = path_stats([path_dicts[index1]['path1'], path_dicts[index1]['path2']])
switch_q_table.update_q_table(switch_q_table.parameters['LEARNING_RATE'], switch_q_table.parameters['DISCOUNT'], old_paths[index1], new_paths)
# print(q_table)
new_paths.get_next_action(switch_q_table, switch_q_table.parameters['epsilon'])
new_paths.get_new_weights(old_paths[index1], switch_q_table.parameters['action_weight'])
new_paths.get_reward(old_paths[index1])
print('s{0}'.format(index1+1), new_paths.path_weights, new_paths.action, new_paths.path_queues[::-1])
p4info_file_path = os.path.join(os.getcwd(), 'build/load_balance_advanced.p4.p4info.txt')
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
switch.MasterArbitrationUpdate()
new_paths.change_path_weights(old_paths[index1], p4info_helper, switch, nhop_dmacs, nhop_ipv4s, ports)
switch.shutdown()
switch_q_table.reset_parameters(new_paths, reset_params[index1])
old_paths[index1] = new_paths
path_dicts[index1].clear()
for i in range(len(counter[index1])):
counter[index1][i] = 0
def handle_pkt(pkt, s1_q_table, s2_q_table, s3_q_table, path_dicts, counter, reset_params):
# pkt.show2()
if pkt[IP]:
mri=pkt[IP][IPOption_MRI]
path_len = len(mri.swtraces)
if path_len == 3:
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0)
nhop_dmacs = ["00:00:00:00:01:04", "00:00:00:00:01:05"]
nhop_ipv4s = ["10.0.2.0", "10.0.3.0"]
ports = [4, 5]
diff_switches = [2, 3]
runthat(s1_q_table, s1, mri, path_dicts, counter, 0, 2, 1, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
if mri.swtraces[3].swid == 2:
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1)
nhop_dmacs = ["00:00:00:00:02:03", "00:00:00:00:02:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s2_q_table, s2, mri, path_dicts, counter, 1, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
elif mri.swtraces[3].swid == 3:
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2)
nhop_dmacs = ["00:00:00:00:03:03", "00:00:00:00:03:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s3_q_table, s3, mri, path_dicts, counter, 2, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
print("cannot find IP header in the packet")
sys.stdout.flush()
def main():
| # This file is part of the Planter extend project: QCMP.
# This program is a free software tool, which does ensemble in-network reinforcement learning for load balancing.
# licensed under Apache-2.0
#
# Utility: This file is used to receive telemetry traffic and update q-table
#
# Copyright (c) 2022-2023 Benjamin Rienecker Modified by Changgang Zheng
# Copyright (c) Computing Infrastructure Group, Department of Engineering Science, University of Oxford
#!/usr/bin/env python3
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../utils/'))
old_paths = [path_stats([0, 0], [50, 50]), path_stats([0, 0], [50, 50]), path_stats([0, 0], [50, 50])]
class SwitchTrace(Packet):
fields_desc = [ IntField("swid", 0),
IntField("qdepth", 0)]
def extract_padding(self, p):
return "", p
class IPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swtraces",
adjust=lambda pkt,l:l*2+4),
ShortField("count", 0),
PacketListField("swtraces",
[],
SwitchTrace,
count_from=lambda pkt:(pkt.count*1)) ]
def runthat(switch_q_table, switch, mri, path_dicts, counter, index1, index2, index3, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params):
# index1 : index for where switch queue data is stored in path_dicts (list of dicts)
# index2 : which switch trace contains the queue length
# index3 : swid for path defining switch
switch_q_table.update_parameters()
queue_length = mri.swtraces[index2].qdepth
# print(mri.swtraces[i].swid, mri.swtraces[i].qdepth)
if mri.swtraces[index3].swid == diff_switches[0]:
path_dicts[index1]['path1'] = int(queue_length/2)
counter[index1][0] += 1
elif mri.swtraces[index3].swid == diff_switches[1]:
path_dicts[index1]['path2'] = int(queue_length/2)
counter[index1][1] += 1
if 3 in counter[index1]:
zero_indices = [i for i, x in enumerate(counter[index1]) if x == 0]
for index in zero_indices:
path_dicts[index1]["path{0}".format(index + 1)] = 100 # max queue length
if len(path_dicts[index1]) == 2:
global old_paths
# print(path_dict)
new_paths = path_stats([path_dicts[index1]['path1'], path_dicts[index1]['path2']])
switch_q_table.update_q_table(switch_q_table.parameters['LEARNING_RATE'], switch_q_table.parameters['DISCOUNT'], old_paths[index1], new_paths)
# print(q_table)
new_paths.get_next_action(switch_q_table, switch_q_table.parameters['epsilon'])
new_paths.get_new_weights(old_paths[index1], switch_q_table.parameters['action_weight'])
new_paths.get_reward(old_paths[index1])
print('s{0}'.format(index1+1), new_paths.path_weights, new_paths.action, new_paths.path_queues[::-1])
p4info_file_path = os.path.join(os.getcwd(), 'build/load_balance_advanced.p4.p4info.txt')
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
switch.MasterArbitrationUpdate()
new_paths.change_path_weights(old_paths[index1], p4info_helper, switch, nhop_dmacs, nhop_ipv4s, ports)
switch.shutdown()
switch_q_table.reset_parameters(new_paths, reset_params[index1])
old_paths[index1] = new_paths
path_dicts[index1].clear()
for i in range(len(counter[index1])):
counter[index1][i] = 0
def handle_pkt(pkt, s1_q_table, s2_q_table, s3_q_table, path_dicts, counter, reset_params):
# pkt.show2()
if pkt[IP]:
mri=pkt[IP][IPOption_MRI]
path_len = len(mri.swtraces)
if path_len == 3:
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0)
nhop_dmacs = ["00:00:00:00:01:04", "00:00:00:00:01:05"]
nhop_ipv4s = ["10.0.2.0", "10.0.3.0"]
ports = [4, 5]
diff_switches = [2, 3]
runthat(s1_q_table, s1, mri, path_dicts, counter, 0, 2, 1, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
if mri.swtraces[3].swid == 2:
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1)
nhop_dmacs = ["00:00:00:00:02:03", "00:00:00:00:02:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s2_q_table, s2, mri, path_dicts, counter, 1, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
elif mri.swtraces[3].swid == 3:
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2)
nhop_dmacs = ["00:00:00:00:03:03", "00:00:00:00:03:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s3_q_table, s3, mri, path_dicts, counter, 2, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
print("cannot find IP header in the packet")
sys.stdout.flush()
def main(): | s1_q_table = q_table() | 1 | 2023-11-01 09:37:28+00:00 | 4k |
Fsoft-AIC/LSDM | run/predict_contact.py | [
{
"identifier": "ProxDataset_txt",
"path": "posa/dataset.py",
"snippet": "class ProxDataset_txt(Dataset): # when jump_step=8, for a whole seq, dataset's max_frame is 165, max num_seg is 29\n def __init__(self, data_dir, fix_orientation=False, no_obj_classes=8, max_frame=220,\n ds_weights_path=\"posa/support_files/downsampled_weights.npy\", jump_step=8, step_multiplier=1, max_objs=8, pnt_size=1024, \n objs_data_dir='data/protext/objs', max_cats=13, **kwargs):\n '''\n data_dir: directory that stores processed PROXD dataset.\n fix_orientation: flag that specifies whether we always make the first pose in a motion sequence facing\n towards a canonical direction.\n no_obj_classes: number of contact object classes.\n max_frame: the maximum motion sequence length which the model accepts (after applying frame skipping).\n ds_weights_path: the saved downsampling matrix for downsampling body vertices.\n jump_step: for every jump_step frames, we only select the first frame for some sequence.\n step_multiplier: a dummy parameter used to control the number of examples seen in each epoch (You can\n ignore it if you don't know how to adjust it).\n '''\n self.data_dir = data_dir\n self.max_objs = max_objs\n self.pnt_size = pnt_size\n self.max_cats = max_cats\n \n # Setup handle case for dataset: 0 for training, 1 for testing\n is_train = self.data_dir.split('_')[1]\n self.handle = 0 if is_train == 'train' else 1\n self.objs_dir = objs_data_dir\n self.context_dir = os.path.join(data_dir, \"context\")\n self.reduced_verts_dir = os.path.join(data_dir, \"reduced_vertices\")\n self.seq_names = [f.split('.txt')[0] for f in os.listdir(self.context_dir)]\n\n # Setup reading object files and cases\n self._setup_static_objs()\n\n # Initialize for human sequences\n self.reduced_verts_dict = dict()\n self.context_dict = dict()\n\n self.total_frames = 0\n for seq_name in self.seq_names:\n self.reduced_verts_dict[seq_name] = torch.tensor(np.load(os.path.join(self.reduced_verts_dir, seq_name + \".npy\")), dtype=torch.float32)\n with open(os.path.join(self.context_dir, seq_name + \".txt\")) as f:\n text_prompt, given_objs, target_obj = f.readlines()\n text_prompt = text_prompt.strip('\\n')\n given_objs = given_objs.strip('\\n').split(' ')\n self.context_dict[seq_name] = (text_prompt, given_objs, target_obj)\n\n self.fix_orientation = fix_orientation\n self.no_obj_classes = no_obj_classes\n self.ds_weights_path = ds_weights_path\n self.ds_weights = None\n self.associated_joints = None\n if fix_orientation:\n self.ds_weights = torch.tensor(np.load(self.ds_weights_path))\n self.associated_joints = torch.argmax(self.ds_weights, dim=1)\n\n self.jump_step = jump_step\n self.step_multiplier = step_multiplier\n\n @property\n def _cat(self):\n return {\n \"chair\": 1,\n \"table\": 2,\n \"cabinet\": 3,\n \"sofa\": 4,\n \"bed\": 5,\n \"chest_of_drawers\": 6,\n \"chest\": 6,\n \"stool\": 7,\n \"tv_monitor\": 8,\n \"tv\": 8,\n \"lighting\": 9,\n \"shelving\": 10,\n \"seating\": 11,\n \"furniture\": 12,\n \"human\": 0,\n }\n\n def _setup_static_objs(self):\n self.scenes = os.listdir(self.objs_dir)\n self.objs = dict()\n self.cats = dict()\n for scene in self.scenes:\n self.objs[scene] = dict()\n self.cats[scene] = dict()\n \n objs_list = os.listdir(os.path.join(self.objs_dir, scene))\n for obj_file in objs_list:\n obj = obj_file[:-4]\n cat = obj.split('.')[0].split('_')[0]\n # Read vertices of objects\n with open(os.path.join(self.objs_dir, scene, obj_file), 'rb') as f:\n verts = np.load(f)\n self.objs[scene][obj] = verts\n self.cats[scene][obj] = self._cat[cat]\n \n def __len__(self):\n return len(self.seq_names)\n\n def __getitem__(self, idx):\n # seq_idx = torch.randint(len(self.seq_names), size=(1,))\n seq_idx = idx\n seq_name = self.seq_names[seq_idx]\n scene = seq_name.split('_')[0]\n all_objs = self.objs[scene]\n all_cats = self.cats[scene]\n text_prompt, given_objs, target_obj = self.context_dict[seq_name]\n human_verts = self.reduced_verts_dict[seq_name]\n\n # Initialize for objects, note that, the first object is human\n obj_verts = torch.zeros(self.max_objs+1, self.pnt_size, 3)\n obj_verts[0] = human_verts.clone().detach()\n obj_mask = torch.zeros(self.max_objs+1)\n obj_cats = torch.zeros(self.max_objs+1, self.max_cats)\n obj_cats[0][self._cat['human']] = 1\n for idx, obj in enumerate(given_objs):\n cat = obj.split('_')[0]\n obj_verts[idx+1] = torch.tensor(all_objs[obj])\n obj_mask[idx+1] = 1\n obj_cats[idx+1][self._cat[cat]] = 1\n\n # Retrieve information of target vertices\n target_verts = all_objs[target_obj]\n target_cat = target_obj.split('_')[0]\n target_num = self._cat[target_cat]\n target_cat = torch.zeros(self.max_cats)\n target_cat[target_num] = 1\n\n return obj_mask, obj_verts, obj_cats, target_verts, target_cat, text_prompt"
},
{
"identifier": "create_model_and_diffusion",
"path": "util/model_util.py",
"snippet": "def create_model_and_diffusion(datatype):\n # model = SceneDiffusionModel(**get_model_args(args, data))\n if datatype == \"proxd\":\n model = SceneDiffusionModel(**get_default_model_proxd())\n else:\n model = SceneDiffusionModel(**get_default_model_humanise())\n diffusion = create_gaussian_diffusion(get_default_diffusion())\n return model, diffusion"
}
] | import os
import numpy as np
import argparse
import torch
import posa.data_utils as du
from tqdm import tqdm
from torch.utils.data import DataLoader
from posa.dataset import ProxDataset_txt
from util.model_util import create_model_and_diffusion | 2,428 |
# Example usage
# python predict_contact.py ../data/amass --load_model ../training/contactformer/model_ckpt/best_model_recon_acc.pt --output_dir ../results/amass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("data_dir", type=str,
help="path to POSA_temp dataset dir")
parser.add_argument("--load_model", type=str, default="../training/model_ckpt/epoch_0045.pt",
help="checkpoint path to load")
parser.add_argument("--encoder_mode", type=int, default=1,
help="different number represents different variants of encoder")
parser.add_argument("--decoder_mode", type=int, default=1,
help="different number represents different variants of decoder")
parser.add_argument("--n_layer", type=int, default=3, help="Number of layers in transformer")
parser.add_argument("--n_head", type=int, default=4, help="Number of heads in transformer")
parser.add_argument("--jump_step", type=int, default=8, help="Frame skip size for each input motion sequence")
parser.add_argument("--dim_ff", type=int, default=512,
help="Dimension of hidden layers in positionwise MLP in the transformer")
parser.add_argument("--f_vert", type=int, default=64, help="Dimension of the embeddings for body vertices")
parser.add_argument("--max_frame", type=int, default=256,
help="The maximum length of motion sequence (after frame skipping) which model accepts.")
parser.add_argument("--posa_path", type=str, default="../training/posa/model_ckpt/epoch_0349.pt",
help="The POSA model checkpoint that ContactFormer can pre-load")
parser.add_argument("--output_dir", type=str, default="../results/output")
parser.add_argument("--save_probability", dest='save_probability', action='store_const', const=True, default=False,
help="Save the probability of each contact labels, instead of the most possible contact label")
# Parse arguments and assign directories
args = parser.parse_args()
args_dict = vars(args)
data_dir = args_dict['data_dir']
ckpt_path = args_dict['load_model']
encoder_mode = args_dict['encoder_mode']
decoder_mode = args_dict['decoder_mode']
n_layer = args_dict['n_layer']
n_head = args_dict['n_head']
jump_step = args_dict['jump_step']
max_frame = args_dict['max_frame']
dim_ff = args_dict['dim_ff']
f_vert = args_dict['f_vert']
posa_path = args_dict['posa_path']
output_dir = args_dict['output_dir']
save_probability = args_dict['save_probability']
device = torch.device("cuda")
num_obj_classes = 8
pnt_size = 1024
# For fix_ori
fix_ori = True
ds_weights = torch.tensor(np.load("posa/support_files/downsampled_weights.npy"))
associated_joints = torch.argmax(ds_weights, dim=1)
os.makedirs(output_dir, exist_ok=True)
seq_name_list = sorted(os.listdir(os.path.join(data_dir, 'context')))
use_ddim = False # FIXME - hardcoded
clip_denoised = False # FIXME - hardcoded
# Setup names for output files
context_dir = os.path.join(data_dir, 'context')
files = os.listdir(context_dir)
up_tab = dict()
for file in files:look
reduced_file = file.split('.')[0]
with open(os.path.join(context_dir, file), 'r') as f:
prompt = f.readlines()[0].strip()
lookup_tab[prompt] = reduced_file
# Load in model checkpoints and set up data stream
|
# Example usage
# python predict_contact.py ../data/amass --load_model ../training/contactformer/model_ckpt/best_model_recon_acc.pt --output_dir ../results/amass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("data_dir", type=str,
help="path to POSA_temp dataset dir")
parser.add_argument("--load_model", type=str, default="../training/model_ckpt/epoch_0045.pt",
help="checkpoint path to load")
parser.add_argument("--encoder_mode", type=int, default=1,
help="different number represents different variants of encoder")
parser.add_argument("--decoder_mode", type=int, default=1,
help="different number represents different variants of decoder")
parser.add_argument("--n_layer", type=int, default=3, help="Number of layers in transformer")
parser.add_argument("--n_head", type=int, default=4, help="Number of heads in transformer")
parser.add_argument("--jump_step", type=int, default=8, help="Frame skip size for each input motion sequence")
parser.add_argument("--dim_ff", type=int, default=512,
help="Dimension of hidden layers in positionwise MLP in the transformer")
parser.add_argument("--f_vert", type=int, default=64, help="Dimension of the embeddings for body vertices")
parser.add_argument("--max_frame", type=int, default=256,
help="The maximum length of motion sequence (after frame skipping) which model accepts.")
parser.add_argument("--posa_path", type=str, default="../training/posa/model_ckpt/epoch_0349.pt",
help="The POSA model checkpoint that ContactFormer can pre-load")
parser.add_argument("--output_dir", type=str, default="../results/output")
parser.add_argument("--save_probability", dest='save_probability', action='store_const', const=True, default=False,
help="Save the probability of each contact labels, instead of the most possible contact label")
# Parse arguments and assign directories
args = parser.parse_args()
args_dict = vars(args)
data_dir = args_dict['data_dir']
ckpt_path = args_dict['load_model']
encoder_mode = args_dict['encoder_mode']
decoder_mode = args_dict['decoder_mode']
n_layer = args_dict['n_layer']
n_head = args_dict['n_head']
jump_step = args_dict['jump_step']
max_frame = args_dict['max_frame']
dim_ff = args_dict['dim_ff']
f_vert = args_dict['f_vert']
posa_path = args_dict['posa_path']
output_dir = args_dict['output_dir']
save_probability = args_dict['save_probability']
device = torch.device("cuda")
num_obj_classes = 8
pnt_size = 1024
# For fix_ori
fix_ori = True
ds_weights = torch.tensor(np.load("posa/support_files/downsampled_weights.npy"))
associated_joints = torch.argmax(ds_weights, dim=1)
os.makedirs(output_dir, exist_ok=True)
seq_name_list = sorted(os.listdir(os.path.join(data_dir, 'context')))
use_ddim = False # FIXME - hardcoded
clip_denoised = False # FIXME - hardcoded
# Setup names for output files
context_dir = os.path.join(data_dir, 'context')
files = os.listdir(context_dir)
up_tab = dict()
for file in files:look
reduced_file = file.split('.')[0]
with open(os.path.join(context_dir, file), 'r') as f:
prompt = f.readlines()[0].strip()
lookup_tab[prompt] = reduced_file
# Load in model checkpoints and set up data stream | valid_dataset = ProxDataset_txt(data_dir, max_frame=max_frame, fix_orientation=fix_ori, | 0 | 2023-11-06 07:55:51+00:00 | 4k |
molML/traversing_chem_space | active_learning/data_prep.py | [
{
"identifier": "molecular_graph_featurizer",
"path": "active_learning/utils.py",
"snippet": "def molecular_graph_featurizer(smiles: str, y=None, structural_feats: bool = True, functional_feats: bool = True):\n\n y = torch.tensor([y]).to(torch.long)\n\n mol = Chem.MolFromSmiles(smiles, sanitize=True)\n Chem.AssignStereochemistry(mol, cleanIt=True, force=True)\n\n # RDKIT Atom featurization\n x = atom_featurizer(mol, structural_feats, functional_feats)\n\n # Edge featurization\n edge_indices, edge_attrs = [], []\n for bond in mol.GetBonds():\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n\n edge_indices += [[i, j], [j, i]]\n\n edge_index = torch.tensor(edge_indices)\n edge_index = edge_index.t().to(torch.long).view(2, -1)\n\n # Sort indices.\n if edge_index.numel() > 0:\n perm = (edge_index[0] * x.size(0) + edge_index[1]).argsort()\n edge_index = edge_index[:, perm]\n\n if torch.isnan(x).any():\n return smiles\n # raise ValueError(f\"Featurizing {smiles} gave nan(s)\")\n\n graph = Data(x=x, edge_index=edge_index, smiles=smiles, y=y)\n\n return graph"
},
{
"identifier": "smiles_to_ecfp",
"path": "active_learning/utils.py",
"snippet": "def smiles_to_ecfp(smiles: list[str], radius: int = 2, nbits: int = 1024, silent: bool = True, to_array: bool = True) \\\n -> np.ndarray:\n \"\"\" Get a Numpy array of ECFPs from a list of SMILES strings \"\"\"\n from rdkit.Chem.AllChem import GetMorganFingerprintAsBitVect\n from rdkit.Chem import MolFromSmiles\n from rdkit.DataStructs import ConvertToNumpyArray\n\n if type(smiles) is str:\n smiles = [smiles]\n\n fp = [GetMorganFingerprintAsBitVect(MolFromSmiles(s), radius, nBits=nbits) for s in tqdm(smiles, disable=silent)]\n\n if not to_array:\n return fp\n\n output = []\n for f in fp:\n arr = np.zeros((1,))\n ConvertToNumpyArray(f, arr)\n output.append(arr)\n\n return np.asarray(output)"
},
{
"identifier": "get_tanimoto_matrix",
"path": "active_learning/utils.py",
"snippet": "def get_tanimoto_matrix(smiles: list[str], radius: int = 2, nBits: int = 1024, verbose: bool = True,\n scaffolds: bool = False, zero_diag: bool = True, as_vector: bool = False):\n \"\"\" Calculates a matrix of Tanimoto similarity scores for a list of SMILES string\"\"\"\n from active_learning.data_prep import smi_to_scaff\n\n # Make a fingerprint database\n db_fp = {}\n for smi in smiles:\n if scaffolds:\n m = Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False))\n else:\n m = Chem.MolFromSmiles(smi)\n fp = AllChem.GetMorganFingerprintAsBitVect(m, radius=radius, nBits=nBits)\n db_fp[smi] = fp\n\n smi_len = len(smiles)\n m = np.zeros([smi_len, smi_len], dtype=np.float16) # We use 16-bit floats to prevent giant matrices\n # Calculate upper triangle of matrix\n for i in tqdm(range(smi_len), disable=not verbose):\n for j in range(i, smi_len):\n m[i, j] = DataStructs.TanimotoSimilarity(db_fp[smiles[i]], db_fp[smiles[j]])\n # Fill in the lower triangle without having to loop (saves ~50% of time)\n m = m + m.T - np.diag(np.diag(m))\n # Fill the diagonal with 0's\n if zero_diag:\n np.fill_diagonal(m, 0)\n if as_vector:\n from scipy.spatial.distance import squareform\n m = squareform(m)\n\n return m"
},
{
"identifier": "check_featurizability",
"path": "active_learning/utils.py",
"snippet": "def check_featurizability(smiles: str):\n try:\n mol = Chem.MolFromSmiles(smiles, sanitize=True)\n Chem.AssignStereochemistry(mol, cleanIt=True, force=True)\n\n for atom in mol.GetAtoms():\n try:\n x_ = atom_props(atom)\n except:\n return False\n except:\n return False\n\n return True"
},
{
"identifier": "ROOT_DIR",
"path": "config.py",
"snippet": "ROOT_DIR = os.path.realpath(os.path.dirname(__file__))"
}
] | from active_learning.utils import molecular_graph_featurizer as smiles_to_graph
from active_learning.utils import smiles_to_ecfp, get_tanimoto_matrix, check_featurizability
from collections import OrderedDict
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit import Chem
from tqdm import tqdm
from typing import Any
from config import ROOT_DIR
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import torch
import os
import sys
import h5py
import h5py
import h5py | 2,235 |
def canonicalize(smiles: str, sanitize: bool = True):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles, sanitize=sanitize))
def get_data(random_state: int = 42, dataset: str = 'ALDH1'):
# read smiles from file and canonicalize them
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/inactives.smi')) as f:
inactives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/actives.smi')) as f:
actives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
# remove duplicates:
inactives = list(set(inactives))
actives = list(set(actives))
# remove intersecting molecules:
intersecting_mols = np.intersect1d(inactives, actives)
inactives = [smi for smi in inactives if smi not in intersecting_mols]
actives = [smi for smi in actives if smi not in intersecting_mols]
# remove molecules that have scaffolds that cannot be kekulized or featurized
inactives_, actives_ = [], []
for smi in tqdm(actives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
actives_.append(smi)
except:
pass
for smi in tqdm(inactives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
inactives_.append(smi)
except:
pass
# add to df
df = pd.DataFrame({'smiles': inactives_ + actives_,
'y': [0] * len(inactives_) + [1] * len(actives_)})
# shuffle
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
return df
def split_data(df: pd.DataFrame, random_state: int = 42, screen_size: int = 50000, test_size: int = 10000,
dataset: str = 'ALDH1') -> (pd.DataFrame, pd.DataFrame):
df_screen, df_test = train_test_split(df, stratify=df['y'].tolist(), train_size=screen_size, test_size=test_size,
random_state=random_state)
# write to csv
df_screen.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/screen.csv'), index=False)
df_test.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/test.csv'), index=False)
return df_screen, df_test
class MasterDataset:
""" Dataset that holds all data in an indexable way """
def __init__(self, name: str, df: pd.DataFrame = None, dataset: str = 'ALDH1', representation: str = 'ecfp', root: str = 'data',
overwrite: bool = False) -> None:
assert representation in ['ecfp', 'graph'], f"'representation' must be 'ecfp' or 'graph', not {representation}"
self.representation = representation
self.pth = os.path.join(ROOT_DIR, root, dataset, name)
# If not done already, process all data. Else just load it
if not os.path.exists(self.pth) or overwrite:
assert df is not None, "You need to supply a dataframe with 'smiles' and 'y' values"
os.makedirs(os.path.join(root, dataset, name), exist_ok=True)
self.process(df)
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
else:
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
def process(self, df: pd.DataFrame) -> None:
print('Processing data ... ', flush=True, file=sys.stderr)
index_smiles = OrderedDict({i: smi for i, smi in enumerate(df.smiles)})
smiles_index = OrderedDict({smi: i for i, smi in enumerate(df.smiles)})
smiles = np.array(df.smiles.tolist())
|
def canonicalize(smiles: str, sanitize: bool = True):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles, sanitize=sanitize))
def get_data(random_state: int = 42, dataset: str = 'ALDH1'):
# read smiles from file and canonicalize them
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/inactives.smi')) as f:
inactives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/actives.smi')) as f:
actives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
# remove duplicates:
inactives = list(set(inactives))
actives = list(set(actives))
# remove intersecting molecules:
intersecting_mols = np.intersect1d(inactives, actives)
inactives = [smi for smi in inactives if smi not in intersecting_mols]
actives = [smi for smi in actives if smi not in intersecting_mols]
# remove molecules that have scaffolds that cannot be kekulized or featurized
inactives_, actives_ = [], []
for smi in tqdm(actives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
actives_.append(smi)
except:
pass
for smi in tqdm(inactives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
inactives_.append(smi)
except:
pass
# add to df
df = pd.DataFrame({'smiles': inactives_ + actives_,
'y': [0] * len(inactives_) + [1] * len(actives_)})
# shuffle
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
return df
def split_data(df: pd.DataFrame, random_state: int = 42, screen_size: int = 50000, test_size: int = 10000,
dataset: str = 'ALDH1') -> (pd.DataFrame, pd.DataFrame):
df_screen, df_test = train_test_split(df, stratify=df['y'].tolist(), train_size=screen_size, test_size=test_size,
random_state=random_state)
# write to csv
df_screen.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/screen.csv'), index=False)
df_test.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/test.csv'), index=False)
return df_screen, df_test
class MasterDataset:
""" Dataset that holds all data in an indexable way """
def __init__(self, name: str, df: pd.DataFrame = None, dataset: str = 'ALDH1', representation: str = 'ecfp', root: str = 'data',
overwrite: bool = False) -> None:
assert representation in ['ecfp', 'graph'], f"'representation' must be 'ecfp' or 'graph', not {representation}"
self.representation = representation
self.pth = os.path.join(ROOT_DIR, root, dataset, name)
# If not done already, process all data. Else just load it
if not os.path.exists(self.pth) or overwrite:
assert df is not None, "You need to supply a dataframe with 'smiles' and 'y' values"
os.makedirs(os.path.join(root, dataset, name), exist_ok=True)
self.process(df)
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
else:
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
def process(self, df: pd.DataFrame) -> None:
print('Processing data ... ', flush=True, file=sys.stderr)
index_smiles = OrderedDict({i: smi for i, smi in enumerate(df.smiles)})
smiles_index = OrderedDict({smi: i for i, smi in enumerate(df.smiles)})
smiles = np.array(df.smiles.tolist()) | x = smiles_to_ecfp(smiles, silent=False) | 1 | 2023-11-10 08:53:40+00:00 | 4k |
yunik1004/SAiD | script/inference.py | [
{
"identifier": "SAID_UNet1D",
"path": "said/model/diffusion.py",
"snippet": "class SAID_UNet1D(SAID):\n \"\"\"SAiD model implemented using U-Net 1D model\"\"\"\n\n def __init__(\n self,\n audio_config: Optional[Wav2Vec2Config] = None,\n audio_processor: Optional[Wav2Vec2Processor] = None,\n noise_scheduler: Type[SchedulerMixin] = DDIMScheduler,\n in_channels: int = 32,\n feature_dim: int = -1,\n diffusion_steps: int = 1000,\n latent_scale: float = 1,\n prediction_type: str = \"epsilon\",\n ):\n \"\"\"Constructor of SAID_UNet1D\n\n Parameters\n ----------\n audio_config : Optional[Wav2Vec2Config], optional\n Wav2Vec2Config object, by default None\n audio_processor : Optional[Wav2Vec2Processor], optional\n Wav2Vec2Processor object, by default None\n noise_scheduler: Type[SchedulerMixin]\n Noise scheduler, by default DDIMScheduler\n in_channels : int\n Dimension of the input, by default 32\n feature_dim : int\n Dimension of the latent feature, by default -1\n diffusion_steps : int\n The number of diffusion steps, by default 1000\n latent_scale : float\n Scaling the latent, by default 1\n prediction_type: str\n Prediction type of the scheduler function, \"epsilon\", \"sample\", or \"v_prediction\", by default \"epsilon\"\n \"\"\"\n super().__init__(\n audio_config=audio_config,\n audio_processor=audio_processor,\n in_channels=in_channels,\n feature_dim=feature_dim,\n diffusion_steps=diffusion_steps,\n latent_scale=latent_scale,\n prediction_type=prediction_type,\n )\n\n # Denoiser\n self.denoiser = UNet1DConditionModel(\n in_channels=in_channels,\n out_channels=in_channels,\n cross_attention_dim=self.feature_dim\n if self.feature_dim > 0\n else self.audio_config.hidden_size,\n )"
},
{
"identifier": "fit_audio_unet",
"path": "said/util/audio.py",
"snippet": "def fit_audio_unet(\n waveform: torch.FloatTensor, sampling_rate: int, fps: int, divisor_unet: int\n) -> FittedWaveform:\n \"\"\"Fit the intput audio waveform into UNet1D\n\n Parameters\n ----------\n waveform : torch.FloatTensor\n (T_a), Mono waveform\n sampling_rate : int\n Sampling rate of the audio model\n fps : int\n The number of frames per second\n divisor_unet : int\n Length of the blendshape coefficients sequence should be divided by this number\n\n Returns\n -------\n FittedWaveform\n Fitted waveform with the window\n \"\"\"\n gcd = math.gcd(sampling_rate, fps)\n divisor_waveform = sampling_rate // gcd * divisor_unet\n\n waveform_len = waveform.shape[0]\n window_len = int(waveform_len / sampling_rate * fps)\n waveform_len_fit = math.ceil(waveform_len / divisor_waveform) * divisor_waveform\n\n if waveform_len_fit > waveform_len:\n tmp = torch.zeros(waveform_len_fit)\n tmp[:waveform_len] = waveform[:]\n waveform = tmp\n\n return FittedWaveform(waveform=waveform, window_size=window_len)"
},
{
"identifier": "load_audio",
"path": "said/util/audio.py",
"snippet": "def load_audio(audio_path: str, sampling_rate: int) -> torch.FloatTensor:\n \"\"\"Load the audio file\n\n Parameters\n ----------\n audio_path : str\n Path of the audio file\n sampling_rate : int\n Sampling rate of the output audio wave\n\n Returns\n -------\n torch.FloatTensor\n (T_a), Mono waveform\n \"\"\"\n waveform, sr = torchaudio.load(audio_path)\n if sr != sampling_rate:\n waveform = torchaudio.functional.resample(waveform, sr, sampling_rate)\n waveform_mono = torch.mean(waveform, dim=0)\n return waveform_mono"
},
{
"identifier": "load_blendshape_coeffs",
"path": "said/util/blendshape.py",
"snippet": "def load_blendshape_coeffs(coeffs_path: str) -> torch.FloatTensor:\n \"\"\"Load the blendshape coefficients file\n\n Parameters\n ----------\n coeffs_path : str\n Path of the blendshape coefficients file (csv format)\n\n Returns\n -------\n torch.FloatTensor\n (T_b, num_classes), Blendshape coefficients\n \"\"\"\n df = pd.read_csv(coeffs_path)\n coeffs = torch.FloatTensor(df.values)\n return coeffs"
},
{
"identifier": "save_blendshape_coeffs",
"path": "said/util/blendshape.py",
"snippet": "def save_blendshape_coeffs(\n coeffs: np.ndarray, classes: List[str], output_path: str\n) -> None:\n \"\"\"Save the blendshape coefficients into the file\n\n Parameters\n ----------\n coeffs : np.ndarray\n (T_b, num_classes), Blendshape coefficients\n classes : List[str]\n List of the class names of the coefficients\n output_path : str\n Path of the output file\n \"\"\"\n pout = pd.DataFrame(coeffs, columns=classes)\n pout.to_csv(output_path, index=False)"
},
{
"identifier": "save_blendshape_coeffs_image",
"path": "said/util/blendshape.py",
"snippet": "def save_blendshape_coeffs_image(coeffs: np.ndarray, output_path: str) -> None:\n \"\"\"Save the blendshape coefficients into the image file\n\n Parameters\n ----------\n coeffs : np.ndarray\n (T_b, num_classes), Blendshape coefficients\n output_path : str\n Path of the output file\n \"\"\"\n orig = (255 * coeffs.transpose()).round()\n img = Image.fromarray(orig).convert(\"L\")\n img.save(output_path)"
}
] | import argparse
import os
import torch
from diffusers import DDIMScheduler
from said.model.diffusion import SAID_UNet1D
from said.util.audio import fit_audio_unet, load_audio
from said.util.blendshape import (
load_blendshape_coeffs,
save_blendshape_coeffs,
save_blendshape_coeffs_image,
)
from dataset.dataset_voca import BlendVOCADataset | 2,487 | )
parser.add_argument(
"--intermediate_dir",
type=str,
default="../interm",
help="Saving directory of the intermediate outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--save_image",
type=bool,
default=False,
help="Save the output blendshape coefficients as an image",
)
parser.add_argument(
"--save_intermediate",
type=bool,
default=False,
help="Save the intermediate outputs",
)
parser.add_argument(
"--num_steps", type=int, default=1000, help="Number of inference steps"
)
parser.add_argument("--strength", type=float, default=1.0, help="How much to paint")
parser.add_argument(
"--guidance_scale", type=float, default=2.0, help="Guidance scale"
)
parser.add_argument(
"--guidance_rescale", type=float, default=0.0, help="Guidance scale"
)
parser.add_argument(
"--eta", type=float, default=0.0, help="Eta for DDIMScheduler, between [0, 1]"
)
parser.add_argument(
"--fps",
type=int,
default=60,
help="FPS of the blendshape coefficients sequence",
)
parser.add_argument(
"--divisor_unet",
type=int,
default=1,
help="Length of the blendshape coefficients sequence should be divided by this number",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--device",
type=str,
default="cuda:0",
help="GPU/CPU device",
)
parser.add_argument(
"--init_sample_path",
type=str,
help="Path of the initial sample file (csv format)",
)
parser.add_argument(
"--mask_path",
type=str,
help="Path of the mask file (csv format)",
)
args = parser.parse_args()
weights_path = args.weights_path
audio_path = args.audio_path
output_path = args.output_path
output_image_path = args.output_image_path
intermediate_dir = args.intermediate_dir
prediction_type = args.prediction_type
num_steps = args.num_steps
strength = args.strength
guidance_scale = args.guidance_scale
guidance_rescale = args.guidance_rescale
eta = args.eta
fps = args.fps
divisor_unet = args.divisor_unet
unet_feature_dim = args.unet_feature_dim
device = args.device
save_image = args.save_image
save_intermediate = args.save_intermediate
show_process = True
# Load init sample
init_sample_path = args.init_sample_path
init_samples = None
if init_sample_path is not None:
init_samples = load_blendshape_coeffs(init_sample_path).unsqueeze(0).to(device)
# Load mask
mask_path = args.mask_path
mask = None
if mask_path is not None:
mask = load_blendshape_coeffs(mask_path).unsqueeze(0).to(device)
# Load model
said_model = SAID_UNet1D(
noise_scheduler=DDIMScheduler,
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
)
said_model.load_state_dict(torch.load(weights_path, map_location=device))
said_model.to(device)
said_model.eval()
# Load data
waveform = load_audio(audio_path, said_model.sampling_rate)
# waveform = torch.zeros_like(waveform)
# Fit the size of waveform
| """Inference using the SAID_UNet1D model
"""
def main():
"""Main function"""
# Arguments
parser = argparse.ArgumentParser(
description="Inference the lipsync using the SAiD model"
)
parser.add_argument(
"--weights_path",
type=str,
default="../BlendVOCA/SAiD.pth",
help="Path of the weights of SAiD model",
)
parser.add_argument(
"--audio_path",
type=str,
default="../BlendVOCA/audio/FaceTalk_170731_00024_TA/sentence01.wav",
help="Path of the audio file",
)
parser.add_argument(
"--output_path",
type=str,
default="../out.csv",
help="Path of the output blendshape coefficients file (csv format)",
)
parser.add_argument(
"--output_image_path",
type=str,
default="../out.png",
help="Path of the image of the output blendshape coefficients",
)
parser.add_argument(
"--intermediate_dir",
type=str,
default="../interm",
help="Saving directory of the intermediate outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--save_image",
type=bool,
default=False,
help="Save the output blendshape coefficients as an image",
)
parser.add_argument(
"--save_intermediate",
type=bool,
default=False,
help="Save the intermediate outputs",
)
parser.add_argument(
"--num_steps", type=int, default=1000, help="Number of inference steps"
)
parser.add_argument("--strength", type=float, default=1.0, help="How much to paint")
parser.add_argument(
"--guidance_scale", type=float, default=2.0, help="Guidance scale"
)
parser.add_argument(
"--guidance_rescale", type=float, default=0.0, help="Guidance scale"
)
parser.add_argument(
"--eta", type=float, default=0.0, help="Eta for DDIMScheduler, between [0, 1]"
)
parser.add_argument(
"--fps",
type=int,
default=60,
help="FPS of the blendshape coefficients sequence",
)
parser.add_argument(
"--divisor_unet",
type=int,
default=1,
help="Length of the blendshape coefficients sequence should be divided by this number",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--device",
type=str,
default="cuda:0",
help="GPU/CPU device",
)
parser.add_argument(
"--init_sample_path",
type=str,
help="Path of the initial sample file (csv format)",
)
parser.add_argument(
"--mask_path",
type=str,
help="Path of the mask file (csv format)",
)
args = parser.parse_args()
weights_path = args.weights_path
audio_path = args.audio_path
output_path = args.output_path
output_image_path = args.output_image_path
intermediate_dir = args.intermediate_dir
prediction_type = args.prediction_type
num_steps = args.num_steps
strength = args.strength
guidance_scale = args.guidance_scale
guidance_rescale = args.guidance_rescale
eta = args.eta
fps = args.fps
divisor_unet = args.divisor_unet
unet_feature_dim = args.unet_feature_dim
device = args.device
save_image = args.save_image
save_intermediate = args.save_intermediate
show_process = True
# Load init sample
init_sample_path = args.init_sample_path
init_samples = None
if init_sample_path is not None:
init_samples = load_blendshape_coeffs(init_sample_path).unsqueeze(0).to(device)
# Load mask
mask_path = args.mask_path
mask = None
if mask_path is not None:
mask = load_blendshape_coeffs(mask_path).unsqueeze(0).to(device)
# Load model
said_model = SAID_UNet1D(
noise_scheduler=DDIMScheduler,
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
)
said_model.load_state_dict(torch.load(weights_path, map_location=device))
said_model.to(device)
said_model.eval()
# Load data
waveform = load_audio(audio_path, said_model.sampling_rate)
# waveform = torch.zeros_like(waveform)
# Fit the size of waveform | fit_output = fit_audio_unet(waveform, said_model.sampling_rate, fps, divisor_unet) | 1 | 2023-11-03 06:38:51+00:00 | 4k |
Harvard-Ophthalmology-AI-Lab/FairSeg | SAMed/segment_anything/utils/onnx.py | [
{
"identifier": "Sam",
"path": "SAMed/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n def forward(self, batched_input, multimask_output, image_size):\n if isinstance(batched_input, list):\n outputs = self.forward_test(batched_input, multimask_output)\n else:\n outputs = self.forward_train(batched_input, multimask_output, image_size)\n return outputs\n\n def forward_train(self, batched_input, multimask_output, image_size):\n input_images = self.preprocess(batched_input)\n image_embeddings = self.image_encoder(input_images)\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=None, boxes=None, masks=None\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=(image_size, image_size),\n original_size=(image_size, image_size)\n )\n outputs = {\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks\n }\n return outputs\n\n @torch.no_grad()\n def forward_test(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input promts,\n C is determiend by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "calculate_stability_score",
"path": "SAMed/segment_anything/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecesary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
}
] | import torch
import torch.nn as nn
from torch.nn import functional as F
from typing import Tuple
from ..modeling import Sam
from .amg import calculate_stability_score | 3,262 | # All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamOnnxModel(nn.Module):
"""
This model should not be called directly, but is used in ONNX export.
It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
with some functions modified to enable model tracing. Also supports extra
options controlling what information. See the ONNX export script for details.
"""
def __init__(
self,
model: Sam,
return_single_mask: bool,
use_stability_score: bool = False,
return_extra_metrics: bool = False,
) -> None:
super().__init__()
self.mask_decoder = model.mask_decoder
self.model = model
self.img_size = model.image_encoder.img_size
self.return_single_mask = return_single_mask
self.use_stability_score = use_stability_score
self.stability_score_offset = 1.0
self.return_extra_metrics = return_extra_metrics
@staticmethod
def resize_longest_image_size(
input_image_size: torch.Tensor, longest_side: int
) -> torch.Tensor:
input_image_size = input_image_size.to(torch.float32)
scale = longest_side / torch.max(input_image_size)
transformed_size = scale * input_image_size
transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
return transformed_size
def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
point_coords = point_coords + 0.5
point_coords = point_coords / self.img_size
point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
point_embedding = point_embedding * (point_labels != -1)
point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
point_labels == -1
)
for i in range(self.model.prompt_encoder.num_point_embeddings):
point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
i
].weight * (point_labels == i)
return point_embedding
def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
mask_embedding = mask_embedding + (
1 - has_mask_input
) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
return mask_embedding
def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
masks = F.interpolate(
masks,
size=(self.img_size, self.img_size),
mode="bilinear",
align_corners=False,
)
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)
masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]
orig_im_size = orig_im_size.to(torch.int64)
h, w = orig_im_size[0], orig_im_size[1]
masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
return masks
def select_masks(
self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
) -> Tuple[torch.Tensor, torch.Tensor]:
# Determine if we should return the multiclick mask or not from the number of points.
# The reweighting is used to avoid control flow.
score_reweight = torch.tensor(
[[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
).to(iou_preds.device)
score = iou_preds + (num_points - 2.5) * score_reweight
best_idx = torch.argmax(score, dim=1)
masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
return masks, iou_preds
@torch.no_grad()
def forward(
self,
image_embeddings: torch.Tensor,
point_coords: torch.Tensor,
point_labels: torch.Tensor,
mask_input: torch.Tensor,
has_mask_input: torch.Tensor,
orig_im_size: torch.Tensor,
):
sparse_embedding = self._embed_points(point_coords, point_labels)
dense_embedding = self._embed_masks(mask_input, has_mask_input)
masks, scores = self.model.mask_decoder.predict_masks(
image_embeddings=image_embeddings,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embedding,
dense_prompt_embeddings=dense_embedding,
)
if self.use_stability_score:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamOnnxModel(nn.Module):
"""
This model should not be called directly, but is used in ONNX export.
It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
with some functions modified to enable model tracing. Also supports extra
options controlling what information. See the ONNX export script for details.
"""
def __init__(
self,
model: Sam,
return_single_mask: bool,
use_stability_score: bool = False,
return_extra_metrics: bool = False,
) -> None:
super().__init__()
self.mask_decoder = model.mask_decoder
self.model = model
self.img_size = model.image_encoder.img_size
self.return_single_mask = return_single_mask
self.use_stability_score = use_stability_score
self.stability_score_offset = 1.0
self.return_extra_metrics = return_extra_metrics
@staticmethod
def resize_longest_image_size(
input_image_size: torch.Tensor, longest_side: int
) -> torch.Tensor:
input_image_size = input_image_size.to(torch.float32)
scale = longest_side / torch.max(input_image_size)
transformed_size = scale * input_image_size
transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
return transformed_size
def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
point_coords = point_coords + 0.5
point_coords = point_coords / self.img_size
point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
point_embedding = point_embedding * (point_labels != -1)
point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
point_labels == -1
)
for i in range(self.model.prompt_encoder.num_point_embeddings):
point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
i
].weight * (point_labels == i)
return point_embedding
def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
mask_embedding = mask_embedding + (
1 - has_mask_input
) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
return mask_embedding
def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
masks = F.interpolate(
masks,
size=(self.img_size, self.img_size),
mode="bilinear",
align_corners=False,
)
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)
masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]
orig_im_size = orig_im_size.to(torch.int64)
h, w = orig_im_size[0], orig_im_size[1]
masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
return masks
def select_masks(
self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
) -> Tuple[torch.Tensor, torch.Tensor]:
# Determine if we should return the multiclick mask or not from the number of points.
# The reweighting is used to avoid control flow.
score_reweight = torch.tensor(
[[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
).to(iou_preds.device)
score = iou_preds + (num_points - 2.5) * score_reweight
best_idx = torch.argmax(score, dim=1)
masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
return masks, iou_preds
@torch.no_grad()
def forward(
self,
image_embeddings: torch.Tensor,
point_coords: torch.Tensor,
point_labels: torch.Tensor,
mask_input: torch.Tensor,
has_mask_input: torch.Tensor,
orig_im_size: torch.Tensor,
):
sparse_embedding = self._embed_points(point_coords, point_labels)
dense_embedding = self._embed_masks(mask_input, has_mask_input)
masks, scores = self.model.mask_decoder.predict_masks(
image_embeddings=image_embeddings,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embedding,
dense_prompt_embeddings=dense_embedding,
)
if self.use_stability_score: | scores = calculate_stability_score( | 1 | 2023-11-03 17:05:40+00:00 | 4k |
anand2312/quill-server | quill_server/realtime/events.py | [
{
"identifier": "User",
"path": "quill_server/db/models.py",
"snippet": "class User(Base):\n __tablename__ = \"user\"\n\n id: Mapped[UUID] = mapped_column(pg_UUID(as_uuid=True), primary_key=True, default=uuid4) # noqa: A003\n username: Mapped[str] = mapped_column(unique=True)\n password: Mapped[str]\n created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())\n\n def __repr__(self) -> str:\n return f\"<User(id={self.id} username={self.username})>\""
},
{
"identifier": "GameMember",
"path": "quill_server/realtime/room.py",
"snippet": "class GameMember(BaseModel):\n \"\"\"Represents a user currently playing in a Quill room.\"\"\"\n\n user_id: str\n username: str"
},
{
"identifier": "Room",
"path": "quill_server/realtime/room.py",
"snippet": "class Room(BaseModel):\n \"\"\"Represents a Quill game room.\"\"\"\n\n room_id: str\n owner: GameMember\n users: list[GameMember]\n status: GameStatus\n\n @classmethod\n def new(cls: type[\"Room\"], owner: User) -> \"Room\":\n return cls(\n room_id=str(uuid4()),\n owner=_db_user_to_game_member(owner),\n users=[],\n status=GameStatus.LOBBY,\n )\n\n async def start(self) -> None:\n \"\"\"Start the game in this room.\"\"\"\n self.status = GameStatus.ONGOING\n logger.info(f\"Setting room:{self.room_id}:status = ONGOING\")\n await cache.client.set(f\"room:{self.room_id}:status\", str(self.status))\n\n async def end(self) -> None:\n \"\"\"End the game in this room.\"\"\"\n self.status = GameStatus.ENDED\n logger.info(f\"Setting room:{self.room_id}:status = ENDED\")\n await cache.client.set(f\"room:{self.room_id}:status\", str(self.status))\n\n async def join(self, user: User) -> None:\n \"\"\"Add a user to this room.\"\"\"\n # reject connection if the user is already in the room...\n if any([u.user_id == str(user.id) for u in self.users]):\n raise ValueError(\"User is already in this room\")\n # or if the game isn't in the lobby state anymore...\n elif self.status != GameStatus.LOBBY:\n raise ValueError(\"Room is no longer accepting members\")\n # or if the room already has 8 members\n elif len(self.users) == 8:\n raise ValueError(\"Maximum room capacity reached\")\n data = _db_user_to_game_member(user)\n self.users.append(data)\n logger.info(f\"Adding {data.username} to room:{self.room_id}\")\n await typing.cast(\n typing.Awaitable[int],\n cache.client.rpush(f\"room:{self.room_id}:users\", data.model_dump_json()),\n )\n\n async def leave(self, user: User) -> None:\n \"\"\"Remove a user from this room.\"\"\"\n data = _db_user_to_game_member(user)\n self.users.remove(data)\n logger.info(f\"Removing {data.username} from room:{self.room_id}\")\n res = await typing.cast(\n typing.Awaitable[int],\n cache.client.lrem(f\"room:{self.room_id}:users\", 1, data.model_dump_json()),\n )\n if res != 1:\n logger.warning(\n f\"Attempted removing {data.username} from room:{self.room_id} \"\n f\"but Redis gave a response != 1 ({res=})\"\n )\n\n async def to_redis(self) -> None:\n \"\"\"Writes the room to Redis.\"\"\"\n # all the dictionaries are being dumped to redis as JSON strings\n # room:id:users will be a list of JSON strings\n key = f\"room:{self.room_id}\"\n owner = self.owner.model_dump_json()\n users = [i.model_dump_json() for i in self.users]\n status = str(self.status)\n logger.info(f\"Writing {key} to Redis\")\n async with cache.client.pipeline(transaction=True) as pipe:\n pipe.set(f\"{key}:owner\", owner)\n pipe.set(f\"{key}:status\", str(status))\n if len(users) > 0:\n pipe.rpush(f\"{key}:users\", *users)\n await pipe.execute()\n logger.info(f\"Saved {key} to Redis\")\n\n @classmethod\n async def from_redis(cls: type[\"Room\"], room_id: str) -> typing.Optional[\"Room\"]:\n key = f\"room:{room_id}\"\n logger.info(f\"Fetching {key} from Redis\")\n status = await cache.client.get(f\"{key}:status\")\n if not status:\n logger.warning(f\"{key} does not exist in cache\")\n return\n owner_res = await cache.client.get(f\"{key}:owner\")\n owner = loads(owner_res)\n # redis-py has incorrect return types set, so we need to cast here\n # https://github.com/redis/redis-py/issues/2933\n users_res = await typing.cast(\n typing.Awaitable[list[bytes]], cache.client.lrange(f\"{key}:users\", 0, -1)\n )\n users = [loads(i) for i in users_res]\n return cls(room_id=room_id, owner=owner, users=users, status=status.decode())"
},
{
"identifier": "ChatMessage",
"path": "quill_server/realtime/room.py",
"snippet": "class ChatMessage(BaseModel):\n \"\"\"Represents a message sent by a Quill player.\"\"\"\n\n username: str\n message: str\n has_guessed: bool"
},
{
"identifier": "_db_user_to_game_member",
"path": "quill_server/realtime/room.py",
"snippet": "def _db_user_to_game_member(user: User) -> GameMember:\n return GameMember(user_id=str(user.id), username=user.username)"
},
{
"identifier": "MessageResponse",
"path": "quill_server/schema.py",
"snippet": "class MessageResponse(BaseModel):\n message: str"
}
] | from enum import StrEnum, auto
from functools import partial
from typing import Any, Generic, TypeVar
from collections.abc import Awaitable
from loguru import logger
from pydantic import BaseModel
from redis.asyncio import Redis
from quill_server.db.models import User
from quill_server.realtime.room import GameMember, Room, ChatMessage, _db_user_to_game_member
from quill_server.schema import MessageResponse
import typing | 2,061 |
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel):
user: GameMember
elements: list[ExcalidrawElement]
class EventType(StrEnum):
START = auto() # sent by the user to the server to trigger a game start
CONNECT = auto() # sent to the newly joined user
MEMBER_JOIN = auto() # sent to all connected users when a new user joins
MEMBER_LEAVE = auto() # sent to all connected users when a user disconnects from the room
OWNER_CHANGE = auto() # sent when the room owner changes
GAME_STATE_CHANGE = auto() # sent when the game starts or ends
MESSAGE = auto() # sent when any user sends a message in the chat
CORRECT_GUESS = auto() # sent when any user makes a correct guess
DRAWING = auto() # sent when a user is drawing on the board
TURN_START = auto() # sent when a new turn starts
TURN_END = auto() # sent when a turn ends
ERROR = auto() # sent to a user if it tries some illegal action
class Event(BaseModel, Generic[DataT]):
"""An event to be broadcasted."""
event_type: EventType
data: DataT
ConnectEvent = partial(Event[Room], event_type=EventType.CONNECT)
MemberJoinEvent = partial(Event[GameMember], event_type=EventType.MEMBER_JOIN)
MemberLeaveEvent = partial(Event[GameMember], event_type=EventType.MEMBER_LEAVE)
ChatMessageEvent = partial(Event[ChatMessage], event_type=EventType.MESSAGE)
CorrectGuessEvent = partial(Event[ChatMessage], event_type=EventType.CORRECT_GUESS)
GameStateChangeEvent = partial(Event[Room], event_type=EventType.GAME_STATE_CHANGE)
DrawingEvent = partial(Event[Drawing], event_type=EventType.DRAWING)
async def process_message(msg: dict[str, Any], room: Room, user: User, conn: Redis) -> Event:
event_type = msg.get("event_type")
event_data = msg.get("data")
if not event_type:
raise ValueError("Malformed message - no event_type found")
if not event_data:
raise ValueError("Malformed message - no event data found")
match EventType(event_type):
case EventType.START:
if str(user.id) == room.owner.user_id:
await room.start()
return GameStateChangeEvent(data=room)
else:
# user is not the room owner
|
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel):
user: GameMember
elements: list[ExcalidrawElement]
class EventType(StrEnum):
START = auto() # sent by the user to the server to trigger a game start
CONNECT = auto() # sent to the newly joined user
MEMBER_JOIN = auto() # sent to all connected users when a new user joins
MEMBER_LEAVE = auto() # sent to all connected users when a user disconnects from the room
OWNER_CHANGE = auto() # sent when the room owner changes
GAME_STATE_CHANGE = auto() # sent when the game starts or ends
MESSAGE = auto() # sent when any user sends a message in the chat
CORRECT_GUESS = auto() # sent when any user makes a correct guess
DRAWING = auto() # sent when a user is drawing on the board
TURN_START = auto() # sent when a new turn starts
TURN_END = auto() # sent when a turn ends
ERROR = auto() # sent to a user if it tries some illegal action
class Event(BaseModel, Generic[DataT]):
"""An event to be broadcasted."""
event_type: EventType
data: DataT
ConnectEvent = partial(Event[Room], event_type=EventType.CONNECT)
MemberJoinEvent = partial(Event[GameMember], event_type=EventType.MEMBER_JOIN)
MemberLeaveEvent = partial(Event[GameMember], event_type=EventType.MEMBER_LEAVE)
ChatMessageEvent = partial(Event[ChatMessage], event_type=EventType.MESSAGE)
CorrectGuessEvent = partial(Event[ChatMessage], event_type=EventType.CORRECT_GUESS)
GameStateChangeEvent = partial(Event[Room], event_type=EventType.GAME_STATE_CHANGE)
DrawingEvent = partial(Event[Drawing], event_type=EventType.DRAWING)
async def process_message(msg: dict[str, Any], room: Room, user: User, conn: Redis) -> Event:
event_type = msg.get("event_type")
event_data = msg.get("data")
if not event_type:
raise ValueError("Malformed message - no event_type found")
if not event_data:
raise ValueError("Malformed message - no event data found")
match EventType(event_type):
case EventType.START:
if str(user.id) == room.owner.user_id:
await room.start()
return GameStateChangeEvent(data=room)
else:
# user is not the room owner | data = MessageResponse(message="You do not own this room") | 5 | 2023-11-03 12:43:18+00:00 | 4k |
microsoft/PLEX | PLEX/models/trajectory_models/model.py | [
{
"identifier": "GaussianHead",
"path": "PLEX/models/heads/distributions.py",
"snippet": "class GaussianHead(nn.Module):\n def __init__(self, input_dim, output_dim, std_bounds,\n hidden_dim=None, squash=False):\n super().__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.std_bounds = std_bounds\n self.squash = squash\n self.mean_head = _head(input_dim, output_dim, hidden_dim=hidden_dim)\n self.std_head = _head(input_dim, output_dim, hidden_dim=hidden_dim)\n\n def forward(self, x):\n mean = self.mean_head(x)\n std = _rescale(self.std_head(x), *self.std_bounds)\n dist = D.Normal(loc=mean, scale=std)\n # dist = D.Independent(dist, 1) # diagonal\n\n if self.squash:\n dist = TanhWrappedDistribution(dist)\n\n return dist"
},
{
"identifier": "GaussianMixtureHead",
"path": "PLEX/models/heads/distributions.py",
"snippet": "class GaussianMixtureHead(nn.Module):\n def __init__(self, num_components, input_dim, output_dim, std_bounds,\n hidden_dim=None, squash=False):\n super().__init__()\n self.num_components = num_components\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.squash = squash\n self.std_bounds = std_bounds\n self.mean_heads = nn.ModuleList([\n _head(input_dim, output_dim, hidden_dim=hidden_dim)\n for _ in range(num_components)\n ])\n self.std_heads = nn.ModuleList([\n _head(input_dim, output_dim, hidden_dim=hidden_dim)\n for _ in range(num_components)\n ])\n self.logits_head = _head(input_dim, num_components, hidden_dim=hidden_dim)\n\n def forward(self, x):\n # mixture dim will come right after other batch dims\n batch_shape = tuple(x.shape[:-1])\n mixture_dim = len(batch_shape)\n\n # unnormalized logits to categorical distribution for mixing the modes\n logits = self.logits_head(x)\n mixture = D.Categorical(logits=logits)\n\n means = torch.stack([head(x) for head in self.mean_heads], dim=mixture_dim)\n stds = _rescale(\n torch.stack([head(x) for head in self.std_heads], dim=mixture_dim),\n *self.std_bounds\n )\n dists = D.Normal(loc=means, scale=stds)\n dists = D.Independent(dists, 1) # diagonal\n dist = D.MixtureSameFamily(mixture_distribution=mixture, component_distribution=dists)\n\n if self.squash:\n dist = TanhWrappedDistribution(dist)\n\n return dist"
},
{
"identifier": "R3M_Module",
"path": "PLEX/models/encoders/vision.py",
"snippet": "class R3M_Module(Module):\n def __init__(self, R3M_obj):\n super().__init__()\n self.R3M_obj = R3M_obj\n self.bn = nn.BatchNorm1d(self.R3M_obj.outdim)\n\n def forward(self, x, **kwargs):\n # \"Unprocess\" images so that they are in [0, 255] and upsample them to 224x224.\n x *= 255\n x = x.int()\n if (x.shape[-1] != 224 or x.shape[-2] != 224):\n preprocess = nn.Sequential(\n transforms.Resize(224)\n )\n x = preprocess(x)\n x = self.R3M_obj.forward(x, **kwargs)\n x = self.bn(x)\n return x\n\n def output_shape(self, input_shape=None):\n # The return dim of a BN layer is the same is its input dim (R3M's output dim)\n return [self.R3M_obj.outdim]"
}
] | import torch
import torch.nn as nn
import torch.distributions as D
import math
import torchvision
import os
import PLEX.util.globals as globals
import robomimic.utils.obs_utils as ObsUtils
from robomimic.models.base_nets import SpatialSoftmax, SpatialMeanPool, Module
from robomimic.models.obs_nets import obs_encoder_factory, ObservationEncoder
from torchvision.models.resnet import BasicBlock, Bottleneck
from PLEX.models.heads.distributions import GaussianHead, GaussianMixtureHead
from PLEX.models.encoders.vision import R3M_Module
from r3m.models.models_r3m import R3M
from r3m import load_r3m_from_path
from r3m import load_r3m | 1,972 |
def _action_loss(action_preds, action_targets, mask):
if isinstance(action_preds, D.Distribution):
# minimize negative log-likelihood, i.e. maximize likelihood
unmasked_losses = -action_preds.log_prob(action_targets)
elif torch.is_tensor(action_preds):
# minimize mean squared error
unmasked_losses = torch.mean((action_preds - action_targets)**2, dim=-1)
else:
raise RuntimeError(f'Invalid action_preds: {action_preds}')
# consider loss only in positions where mask = 1
assert unmasked_losses.shape == mask.shape
selected_losses = unmasked_losses[mask.bool()]
return selected_losses.mean()
class TrajectoryModel(nn.Module):
def __init__(self, camera_names, obs_dims,
proprio_dim, act_dim, hidden_dim,
image_encoder_arch='resnet18',
image_encoder_load=None,
use_random_crops=True,
pool_type='SpatialSoftmax',
action_output_type='gaussian',
action_tanh=True,
std_bounds=None,
impute_style=None,
data_dir=None,
history_len=None,
modalities_to_mask=['action'],
bc_mode=True):
super().__init__()
self.camera_names = camera_names
self.obs_dims = obs_dims
self.proprio_dim = proprio_dim
self.act_dim = act_dim
self.hidden_dim = hidden_dim
self.image_encoder_arch = image_encoder_arch
self.image_encoder_load = image_encoder_load
self.use_random_crops = use_random_crops
self.pool_type = pool_type
self.action_output_type = action_output_type
self.data_dir = data_dir
self.history_len = history_len
self.bc_mode = bc_mode
assert type(modalities_to_mask) == list
self.modalities_to_mask = modalities_to_mask
# In behavior cloning mode, we don't condidtion on context return.
# To implement this, we will map context return to a fixed embedding in this mode.
if self.bc_mode and 'return' not in self.modalities_to_mask:
self.modalities_to_mask.append('return')
self.action_tanh = action_tanh
self.std_bounds = std_bounds
assert len(std_bounds) == 2 and std_bounds[0] < std_bounds[1]
# For embedding inputs
self.return_encoder = nn.Linear(1, hidden_dim)
self.action_encoder = nn.Linear(act_dim, hidden_dim)
# If we are in image-based mode, we will need image and proprio encoders.
if not globals.full_state_mode:
self.proprio_encoder = nn.Linear(proprio_dim, hidden_dim)
# For Robomimic's resnet18 encoder, we have to tell the encoder what its output dim should be.
if self.image_encoder_arch == 'resnet18':
self.image_encoder_feature_dim = 64
self.image_encoder = self._create_image_encoder()
# For R3M, we just take the output dim from R3M itself.
if self.image_encoder_arch.startswith('r3m'):
self.image_encoder_feature_dim = int(self.image_encoder.output_shape()[0] / len(camera_names))
# For combining embeddings of images into single state
self.image_obs_combiner = nn.Linear(
self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.image_and_proprio_emb_combiner = nn.Linear(
hidden_dim + hidden_dim,
hidden_dim
)
# For combining embeddings of proprio data and images into single state
self.obs_combiner = nn.Linear(
hidden_dim + self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.context_encoder = self.image_encoder
else: # Otherwise we are in low-dimensional mode and we will need full state encoders.
assert type(self.obs_dims) == int
self.state_encoder = nn.Linear(self.obs_dims, hidden_dim)
self.context_encoder = self.state_encoder
# For predicting outputs
action_input_dim = hidden_dim
self.predict_proprio = torch.nn.Linear(hidden_dim, self.proprio_dim)
if self.action_output_type == 'gaussian_mixture':
num_components = 5
|
def _action_loss(action_preds, action_targets, mask):
if isinstance(action_preds, D.Distribution):
# minimize negative log-likelihood, i.e. maximize likelihood
unmasked_losses = -action_preds.log_prob(action_targets)
elif torch.is_tensor(action_preds):
# minimize mean squared error
unmasked_losses = torch.mean((action_preds - action_targets)**2, dim=-1)
else:
raise RuntimeError(f'Invalid action_preds: {action_preds}')
# consider loss only in positions where mask = 1
assert unmasked_losses.shape == mask.shape
selected_losses = unmasked_losses[mask.bool()]
return selected_losses.mean()
class TrajectoryModel(nn.Module):
def __init__(self, camera_names, obs_dims,
proprio_dim, act_dim, hidden_dim,
image_encoder_arch='resnet18',
image_encoder_load=None,
use_random_crops=True,
pool_type='SpatialSoftmax',
action_output_type='gaussian',
action_tanh=True,
std_bounds=None,
impute_style=None,
data_dir=None,
history_len=None,
modalities_to_mask=['action'],
bc_mode=True):
super().__init__()
self.camera_names = camera_names
self.obs_dims = obs_dims
self.proprio_dim = proprio_dim
self.act_dim = act_dim
self.hidden_dim = hidden_dim
self.image_encoder_arch = image_encoder_arch
self.image_encoder_load = image_encoder_load
self.use_random_crops = use_random_crops
self.pool_type = pool_type
self.action_output_type = action_output_type
self.data_dir = data_dir
self.history_len = history_len
self.bc_mode = bc_mode
assert type(modalities_to_mask) == list
self.modalities_to_mask = modalities_to_mask
# In behavior cloning mode, we don't condidtion on context return.
# To implement this, we will map context return to a fixed embedding in this mode.
if self.bc_mode and 'return' not in self.modalities_to_mask:
self.modalities_to_mask.append('return')
self.action_tanh = action_tanh
self.std_bounds = std_bounds
assert len(std_bounds) == 2 and std_bounds[0] < std_bounds[1]
# For embedding inputs
self.return_encoder = nn.Linear(1, hidden_dim)
self.action_encoder = nn.Linear(act_dim, hidden_dim)
# If we are in image-based mode, we will need image and proprio encoders.
if not globals.full_state_mode:
self.proprio_encoder = nn.Linear(proprio_dim, hidden_dim)
# For Robomimic's resnet18 encoder, we have to tell the encoder what its output dim should be.
if self.image_encoder_arch == 'resnet18':
self.image_encoder_feature_dim = 64
self.image_encoder = self._create_image_encoder()
# For R3M, we just take the output dim from R3M itself.
if self.image_encoder_arch.startswith('r3m'):
self.image_encoder_feature_dim = int(self.image_encoder.output_shape()[0] / len(camera_names))
# For combining embeddings of images into single state
self.image_obs_combiner = nn.Linear(
self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.image_and_proprio_emb_combiner = nn.Linear(
hidden_dim + hidden_dim,
hidden_dim
)
# For combining embeddings of proprio data and images into single state
self.obs_combiner = nn.Linear(
hidden_dim + self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.context_encoder = self.image_encoder
else: # Otherwise we are in low-dimensional mode and we will need full state encoders.
assert type(self.obs_dims) == int
self.state_encoder = nn.Linear(self.obs_dims, hidden_dim)
self.context_encoder = self.state_encoder
# For predicting outputs
action_input_dim = hidden_dim
self.predict_proprio = torch.nn.Linear(hidden_dim, self.proprio_dim)
if self.action_output_type == 'gaussian_mixture':
num_components = 5 | self.predict_action = GaussianMixtureHead( | 1 | 2023-11-06 09:38:09+00:00 | 4k |
S3raphimCS/Hackathon_telehack | backend/SPO_KROT/metrics/views.py | [
{
"identifier": "ExcelFile",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class ExcelFile(models.Model):\n file = models.FileField(\n upload_to='metrics',\n unique=True,\n blank=True, null=True,\n validators=[FileExtensionValidator(['xlsx', 'xls', 'xlsm'])],\n )\n\n @property\n def filename(self):\n return self.file.name.split('/')[-1:][0]"
},
{
"identifier": "Measurements",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class Measurements(models.Model):\n \"\"\"Модель записи измерений из отчета, которые будут изменяться.\"\"\"\n class Meta:\n verbose_name = \"Измерение\"\n verbose_name_plural = \"Измерения\"\n\n operator = models.ForeignKey(\n Operator,\n on_delete=models.CASCADE,\n )\n report = models.ForeignKey(\n Report,\n on_delete=models.CASCADE,\n )\n voice_service_non_accessibility = models.FloatField(\n _(\"Доля неуспешных попыток установления голосового соединения\"),\n validators=PERCENTAGE_VALIDATOR\n )\n voice_service_cut_off = models.FloatField(\n _(\"Доля обрывов голосовых соединений\"),\n validators=PERCENTAGE_VALIDATOR\n )\n speech_quality_on_call = models.FloatField(\n _(\"Средняя разборчивость речи на соединение\"),\n )\n negative_mos_samples_ratio = models.FloatField(\n _(\"Доля голосовых соединений с низкой разборчивостью речи\"),\n validators=PERCENTAGE_VALIDATOR\n )\n undelivered_messages = models.FloatField(\n _(\"Доля недоставленных SMS сообщений\"),\n validators=PERCENTAGE_VALIDATOR\n )\n avg_sms_delivery_time = models.FloatField(\n _(\"Среднее время доставки SMS сообщений\"),\n )\n http_failure_session = models.FloatField(\n _(\"Доля неуспешных сессий по протоколу HTTP\"),\n validators=PERCENTAGE_VALIDATOR\n )\n http_ul_mean_userdata_rate = models.FloatField(\n _(\"Среднее значение скорости передачи данных от абонента\"),\n )\n http_dl_mean_userdata_rate = models.FloatField(\n _(\"Среднее значение скорости передачи данных к абоненту\"),\n )\n http_session_time = models.FloatField(\n _(\"Продолжительность успешной сессии\"),\n )\n number_of_test_voice_connections = models.IntegerField(\n _(\"Общее количество тестовых голосовых соединений \"),\n )\n number_of_voice_sequences = models.IntegerField(\n _(\"Общее количество голосовых последовательностей в оцениваемых соединениях\"),\n )\n voice_connections_with_low_intelligibility = models.IntegerField(\n _(\"Количество голосовых соединений с низкой разборчивостью\"),\n )\n number_of_sms_messages = models.IntegerField(\n _(\"Общее количество отправленных SMS - сообщений\"),\n )\n number_of_connections_attempts_http = models.IntegerField(\n _(\"Общее количество попыток соединений с сервером передачи данных HTTP\"),\n )\n number_of_test_sessions_http = models.IntegerField(\n _(\"Общее количество тестовых сессий по протоколу HTTP\"),\n )\n\n def __str__(self):\n return f\"Метрика {self.operator} из отчета {self.report}\""
},
{
"identifier": "Operator",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class Operator(models.Model):\n \"\"\"Модель операторов связи для возможности добавления новых.\"\"\"\n class Meta:\n verbose_name = \"Оператор\"\n verbose_name_plural = \"Операторы\"\n\n name = models.CharField(\n _(\"Название оператора\"),\n max_length=50,\n blank=False, null=False,\n unique=True,\n )\n\n def __str__(self) -> models.CharField:\n return self.name"
},
{
"identifier": "Report",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class Report(models.Model):\n \"\"\"Модель отчетов для потенциального хранения информации об отчетах в БД.\"\"\"\n class Meta:\n verbose_name = \"Отчет\"\n verbose_name_plural = \"Отчеты\"\n\n title = models.CharField(\n _(\"Название отчета\"),\n max_length=200,\n blank=False, null=False,\n )\n region = models.CharField(\n _(\"Регион\"),\n max_length=50,\n blank=True, null=True,\n )\n city = models.CharField(\n _(\"Город\"),\n max_length=100,\n blank=True, null=True\n )\n start_date = models.DateField(\n _(\"Дата начала измерений\"),\n blank=True, null=True,\n )\n end_date = models.DateField(\n _(\"Дата конца измерений\"),\n blank=True, null=True,\n )\n publisher = models.ForeignKey(\n get_user_model(),\n on_delete=models.SET_NULL,\n null=True,\n )\n published = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __str__(self):\n return f\"с {self.start_date} по {self.end_date} Отчет: {self.title}\""
},
{
"identifier": "ExcelUploadSerializer",
"path": "backend/SPO_KROT/metrics/serializers.py",
"snippet": "class ExcelUploadSerializer(serializers.ModelSerializer):\n class Meta:\n model = ExcelFile\n fields = ('file',)"
},
{
"identifier": "ReportDetailSerializer",
"path": "backend/SPO_KROT/metrics/serializers.py",
"snippet": "class ReportDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Report\n fields = ('publisher', \"title\", \"region\", \"city\", \"start_date\", \"end_date\", \"published\", 'measurements_set')\n depth = 2\n\n publisher = serializers.CharField()"
},
{
"identifier": "ReportListSerializer",
"path": "backend/SPO_KROT/metrics/serializers.py",
"snippet": "class ReportListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Report\n fields = \"__all__\""
}
] | from re import split as resplit
from datefinder import find_dates
from django.db.models import CharField, DateField, Q
from django.utils.translation import gettext_lazy as _
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from openpyxl import load_workbook
from rest_framework import generics, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from users.models import CustomUser
from .models import ExcelFile, Measurements, Operator, Report
from .serializers import (ExcelUploadSerializer, ReportDetailSerializer,
ReportListSerializer) | 2,065 |
update_metrics_example = [
{
"id": 13,
"voice_service_non_accessibility": "0.3",
"voice_service_cut_off": "0.8",
"speech_quality_on_call": "4.2",
"negative_mos_samples_ratio": "0.3",
"undelivered_messages": "2.4",
"avg_sms_delivery_time": "6.3",
"http_failure_session": "2.2",
"http_ul_mean_userdata_rate": "2488.1",
"http_dl_mean_userdata_rate": "9700.9",
"http_session_time": "10.9",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
},
{
"id": 13,
"voice_service_non_accessibility": "0.1",
"voice_service_cut_off": "0.5",
"speech_quality_on_call": "4.5",
"negative_mos_samples_ratio": "0.1",
"undelivered_messages": "2.8",
"avg_sms_delivery_time": "6.6",
"http_failure_session": "2.6",
"http_ul_mean_userdata_rate": "2488.9",
"http_dl_mean_userdata_rate": "9700.2",
"http_session_time": "10.3",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
}
]
class ReportDetailView(generics.RetrieveAPIView):
"""Возвращает информацию о конретном отчет по ID."""
lookup_field = 'pk'
|
update_metrics_example = [
{
"id": 13,
"voice_service_non_accessibility": "0.3",
"voice_service_cut_off": "0.8",
"speech_quality_on_call": "4.2",
"negative_mos_samples_ratio": "0.3",
"undelivered_messages": "2.4",
"avg_sms_delivery_time": "6.3",
"http_failure_session": "2.2",
"http_ul_mean_userdata_rate": "2488.1",
"http_dl_mean_userdata_rate": "9700.9",
"http_session_time": "10.9",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
},
{
"id": 13,
"voice_service_non_accessibility": "0.1",
"voice_service_cut_off": "0.5",
"speech_quality_on_call": "4.5",
"negative_mos_samples_ratio": "0.1",
"undelivered_messages": "2.8",
"avg_sms_delivery_time": "6.6",
"http_failure_session": "2.6",
"http_ul_mean_userdata_rate": "2488.9",
"http_dl_mean_userdata_rate": "9700.2",
"http_session_time": "10.3",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
}
]
class ReportDetailView(generics.RetrieveAPIView):
"""Возвращает информацию о конретном отчет по ID."""
lookup_field = 'pk' | queryset = Report.objects.all() | 3 | 2023-11-09 12:55:04+00:00 | 4k |
lz1oceani/LLM-As-Hierarchical-Policy | hlm/utils/answer_utils.py | [
{
"identifier": "filter_stripped_lines",
"path": "hlm/utils/text_utils.py",
"snippet": "def filter_stripped_lines(lines):\n return [_.strip() for _ in lines if len(_.strip()) > 0]"
},
{
"identifier": "unique_texts",
"path": "hlm/utils/text_utils.py",
"snippet": "def unique_texts(texts):\n return list(dict.fromkeys(texts))"
},
{
"identifier": "all_matched_pos",
"path": "hlm/utils/text_utils.py",
"snippet": "def all_matched_pos(pattern, text):\n if isinstance(pattern, (list, tuple)):\n pattern = \"(\" + \"|\".join(pattern) + \")\"\n return sorted([match.start() for match in re.finditer(pattern, text)])"
}
] | import numpy as np, time, re, signal, math, os, warnings
from numbers import Number
from sympy import Symbol, Eq, simplify, solve
from sympy.parsing.latex import parse_latex
from math import *
from .text_utils import filter_stripped_lines, unique_texts, all_matched_pos
from .math_answer_utils import normalize_numbers, unwrap_latex_env, clean_up_latex_answer
from .metric_utils import compare_items | 1,794 | NO_ANSWER_TEMPLATE = [
"we can(not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we do( not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we can(not|n't) (determine|find)",
"there (are|is) no (solutions|answer|answers)" "the answer(.*?)is unknown",
"Finally,(.*?)to find the answer.$",
]
def get_answer_type(dataset_name):
dataset_name = dataset_name.lower()
num_names = ["gsm"]
latex_names = ["math"]
check = lambda name, keywords: any([_ in name for _ in keywords])
if check(dataset_name, num_names):
return "number"
elif check(dataset_name, latex_names):
return "latex"
else:
raise NotImplementedError
def get_re_templates(answer_type, choices=None):
templates = {
"number": ["(-?\(\d+\/\d+\)\/\d+|-?\d+\/\d+)", "(-?\d[\d,\. ]*)"],
"latex": [],
}
return templates.get(answer_type, None)
def extract_all_numbers(text):
templates = get_re_templates("number", None)
for template in templates:
nums = re.findall(template, text)
if len(nums) > 0:
return nums
return []
def extract_all_expressions(text):
if "$" in text:
text = text.replace("$\$", "$")
num = text.count("$")
if num % 2 == 0:
return list(re.findall(r"\$([^\$]*)\$", text))
else:
return []
pairs = [[r"\[", r"\]"], [r"\\begin\{align\}", r"\\end\{align\}"], [r"\\begin\{align\*\}", r"\\end\{align\*\}"]]
ret = []
for start, end in pairs:
sign = re.search(start, text) is not None and re.search(end, text) is not None
if sign:
ret += re.findall(rf"{start}([^{start}{end}]*){end}", text)
return ret
def extract_text_answer(text, answer_type=None, final_answer=None):
templates = get_re_templates(answer_type, None)
split_words = ["Therefore", ", so", "is"]
def remove_equal(nums):
if answer_type == "number" or "=" not in final_answer:
tmp = []
for num in nums:
if "=" in num:
num = num.split("=")[-1].strip()
if "\equiv" in num:
num = re.split(r"\\equiv", num)[-1].strip()
tmp.append(num)
nums = tmp
return nums
if "\\boxed" in text:
text = unwrap_latex_env(text, "boxed", is_single=True)
text = unwrap_latex_env(text, "textsf", is_single=False)
return remove_equal(clean_up_latex_answer(text))[0]
check = lambda _: ("$" in _ or "\[" in _) and answer_type == "latex"
clean_up_fn = clean_up_latex_answer if answer_type == "latex" else normalize_numbers
nums = []
for pos in all_matched_pos(split_words, text)[::-1]:
extract_fn = extract_all_expressions if check(text[pos:]) else extract_all_numbers
nums = extract_fn(text[pos:])
if len(nums) > 0:
break
if len(nums) == 0:
extract_fn = extract_all_expressions if check(text) else extract_all_numbers
nums = extract_fn(text)
if len(nums) >= 1:
nums = remove_equal(nums)
for num in nums:
if compare_items(num, final_answer, answer_type if answer_type == "number" else "text"): # About %1 in GSM
return clean_up_fn(num)
ret = nums[0]
return clean_up_fn(ret)
else:
return None
def extract_answer_from_sentence(sentence):
ret = sentence
for pattern in ANSWER_SPLIT_PATTERNS:
indices = list(re.finditer(pattern, sentence, flags=re.IGNORECASE))
if len(indices) > 0:
tmp = sentence[indices[-1].start() :]
if len(tmp) < len(ret):
ret = tmp
return ret
def extract_answers(responses, answer_type=None, max_num_lines=3, final_answer=None, **kwargs):
if isinstance(responses, list):
return [extract_answers(_, answer_type, max_num_lines, final_answer, **kwargs) for _ in responses]
sentences = re.split(r"\n", responses) # Split text by new line or latex expression \]
sentences = [re.sub(r"^#?\d+\. ", "", _) for _ in sentences] # remove starting #1, #2, ...
sentences = [_ for _ in sentences if not _.strip("#").lower().startswith("reference:")] # remove reference lines in Natural Program
|
warnings.simplefilter("ignore", SyntaxWarning)
class TimeoutException(Exception):
pass
ANSWER_SPLIT_PATTERNS = [
"answer is:?",
"answer:",
"answer to (?:the|this) question is",
# last letters
"concatenated letters are",
"concatenate the letters -",
"The answer of ",
]
NEGATIVE_PATTERNS = [
"is not needed to answer the question",
]
ANSWER_PREFIX = [
"answer: ",
"Therefore, there will be ",
"Therefore, \w+ have ",
"Therefore, \w+ and \w+ have ",
"Therefore, \w+ has ",
"Therefore,(.*?)is ",
"there (are|is) ",
"answer to(.*?)is ",
"answer to(.*?)will be ",
"answer to(.*?)would be ",
"answer to(.*?)becomes ",
"Therefore,(.*?)will be ",
"Therefore,(.*?)would be ",
"Therefore,(.*?)cost ",
"Therefore,(.*?)costs ",
"Therefore,(.*?)a total of ",
"There will be ",
"Therefore, ",
"[A-Z]\w+ will have ",
"[A-Z]\w+ have ",
"[A-Z]\w+ has ",
"\w+ still has ",
"^[A-Z]\w+ \w+ ",
" is ",
]
NUMBER_FIX_MAP = {
" zero ": " 0 ",
" no ": " 0 ",
" a ": " 1 ",
" one ": " 1 ",
" two ": " 2 ",
" three ": " 3 ",
" four ": " 4 ",
" five ": " 5 ",
" six ": " 6 ",
" seven ": " 7 ",
" eight ": " 8 ",
" nine ": " 9 ",
" ten ": " 10 ",
"\u2013": "-",
"hundred": "*100",
"thousand": "*1000",
"million": "*(10**6)",
"billion": "*(10**9)",
"trillion": "*(10**12)",
}
NO_ANSWER_TEMPLATE = [
"we can(not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we do( not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we can(not|n't) (determine|find)",
"there (are|is) no (solutions|answer|answers)" "the answer(.*?)is unknown",
"Finally,(.*?)to find the answer.$",
]
def get_answer_type(dataset_name):
dataset_name = dataset_name.lower()
num_names = ["gsm"]
latex_names = ["math"]
check = lambda name, keywords: any([_ in name for _ in keywords])
if check(dataset_name, num_names):
return "number"
elif check(dataset_name, latex_names):
return "latex"
else:
raise NotImplementedError
def get_re_templates(answer_type, choices=None):
templates = {
"number": ["(-?\(\d+\/\d+\)\/\d+|-?\d+\/\d+)", "(-?\d[\d,\. ]*)"],
"latex": [],
}
return templates.get(answer_type, None)
def extract_all_numbers(text):
templates = get_re_templates("number", None)
for template in templates:
nums = re.findall(template, text)
if len(nums) > 0:
return nums
return []
def extract_all_expressions(text):
if "$" in text:
text = text.replace("$\$", "$")
num = text.count("$")
if num % 2 == 0:
return list(re.findall(r"\$([^\$]*)\$", text))
else:
return []
pairs = [[r"\[", r"\]"], [r"\\begin\{align\}", r"\\end\{align\}"], [r"\\begin\{align\*\}", r"\\end\{align\*\}"]]
ret = []
for start, end in pairs:
sign = re.search(start, text) is not None and re.search(end, text) is not None
if sign:
ret += re.findall(rf"{start}([^{start}{end}]*){end}", text)
return ret
def extract_text_answer(text, answer_type=None, final_answer=None):
templates = get_re_templates(answer_type, None)
split_words = ["Therefore", ", so", "is"]
def remove_equal(nums):
if answer_type == "number" or "=" not in final_answer:
tmp = []
for num in nums:
if "=" in num:
num = num.split("=")[-1].strip()
if "\equiv" in num:
num = re.split(r"\\equiv", num)[-1].strip()
tmp.append(num)
nums = tmp
return nums
if "\\boxed" in text:
text = unwrap_latex_env(text, "boxed", is_single=True)
text = unwrap_latex_env(text, "textsf", is_single=False)
return remove_equal(clean_up_latex_answer(text))[0]
check = lambda _: ("$" in _ or "\[" in _) and answer_type == "latex"
clean_up_fn = clean_up_latex_answer if answer_type == "latex" else normalize_numbers
nums = []
for pos in all_matched_pos(split_words, text)[::-1]:
extract_fn = extract_all_expressions if check(text[pos:]) else extract_all_numbers
nums = extract_fn(text[pos:])
if len(nums) > 0:
break
if len(nums) == 0:
extract_fn = extract_all_expressions if check(text) else extract_all_numbers
nums = extract_fn(text)
if len(nums) >= 1:
nums = remove_equal(nums)
for num in nums:
if compare_items(num, final_answer, answer_type if answer_type == "number" else "text"): # About %1 in GSM
return clean_up_fn(num)
ret = nums[0]
return clean_up_fn(ret)
else:
return None
def extract_answer_from_sentence(sentence):
ret = sentence
for pattern in ANSWER_SPLIT_PATTERNS:
indices = list(re.finditer(pattern, sentence, flags=re.IGNORECASE))
if len(indices) > 0:
tmp = sentence[indices[-1].start() :]
if len(tmp) < len(ret):
ret = tmp
return ret
def extract_answers(responses, answer_type=None, max_num_lines=3, final_answer=None, **kwargs):
if isinstance(responses, list):
return [extract_answers(_, answer_type, max_num_lines, final_answer, **kwargs) for _ in responses]
sentences = re.split(r"\n", responses) # Split text by new line or latex expression \]
sentences = [re.sub(r"^#?\d+\. ", "", _) for _ in sentences] # remove starting #1, #2, ...
sentences = [_ for _ in sentences if not _.strip("#").lower().startswith("reference:")] # remove reference lines in Natural Program | sentences = filter_stripped_lines(sentences) | 0 | 2023-11-01 17:15:42+00:00 | 4k |
mitre/arlin | tests/test_analysis/test_visualization/test_visualization.py | [
{
"identifier": "COLORS",
"path": "arlin/analysis/visualization/colors.py",
"snippet": "COLORS = [\n base[\"b\"],\n tableau[\"tab:orange\"],\n base[\"g\"],\n base[\"r\"],\n base[\"c\"],\n base[\"m\"],\n base[\"y\"],\n base[\"k\"],\n tableau[\"tab:blue\"],\n tableau[\"tab:green\"],\n tableau[\"tab:red\"],\n tableau[\"tab:purple\"],\n tableau[\"tab:brown\"],\n tableau[\"tab:pink\"],\n tableau[\"tab:gray\"],\n css4[\"brown\"],\n css4[\"salmon\"],\n css4[\"chocolate\"],\n css4[\"burlywood\"],\n css4[\"darkgoldenrod\"],\n css4[\"gold\"],\n css4[\"khaki\"],\n css4[\"yellow\"],\n css4[\"darkolivegreen\"],\n css4[\"chartreuse\"],\n css4[\"lime\"],\n css4[\"turquoise\"],\n css4[\"darkslategray\"],\n css4[\"cadetblue\"],\n css4[\"powderblue\"],\n css4[\"steelblue\"],\n css4[\"dodgerblue\"],\n css4[\"royalblue\"],\n css4[\"navy\"],\n css4[\"mediumblue\"],\n css4[\"slateblue\"],\n css4[\"blueviolet\"],\n css4[\"mediumorchid\"],\n css4[\"darkmagenta\"],\n css4[\"magenta\"],\n css4[\"deeppink\"],\n css4[\"palevioletred\"],\n]"
},
{
"identifier": "GraphData",
"path": "arlin/analysis/visualization/visualization.py",
"snippet": "class GraphData:\n \"\"\"Class to save data that can be graphed in matplotlib.\"\"\"\n\n def __init__(\n self,\n x: np.ndarray,\n y: np.ndarray,\n title: str,\n colors: Optional[List[str]] = None,\n legend: Optional[Dict] = None,\n cmap: Optional[str] = None,\n error_bars: Optional[List[float]] = None,\n xlabel: Optional[str] = None,\n ylabel: Optional[str] = None,\n showall: bool = False,\n ):\n \"\"\"Initialize a GraphData object.\n\n Args:\n x (np.ndarray): X axis data\n y (np.ndarray): Y axis data\n title (str): Title of the graph\n colors (Optional[List[str]], optional): Point color for each datapoint.\n Defaults to None.\n legend (Optional[Dict], optional): Add a legend to the side of the graph.\n Defaults to None.\n cmap (Optional[str], optional): Add a colorbar to the side of the graph.\n Defaults to None.\n error_bars (Optional[List[float]], optional): Error bars for each datapoint.\n Defaults to None.\n xlabel (Optional[str], optional): Xlabels for the graph. Defaults to None.\n ylabel (Optional[str], optional): Ylabels for the graph. Defaults to None.\n showall (bool, optional): Show all axis in the figure. Defaults to False.\n \"\"\"\n self.x = x\n self.y = y\n self.title = title\n self.colors = colors\n self.legend = legend\n self.cmap = cmap\n self.error_bars = error_bars\n self.xlabel = xlabel\n self.ylabel = ylabel\n self.showall = showall\n\n def get_data(self) -> Dict[str, Any]:\n \"\"\"Get the data from within this GraphData.\n\n Returns:\n Dict[str, Any]: Dictionary with all stored class information.\n \"\"\"\n data = {\n \"x\": self.x,\n \"y\": self.y,\n \"title\": self.title,\n \"colors\": self.colors,\n \"legend\": self.legend,\n \"cmap\": self.cmap,\n \"error_bars\": self.error_bars,\n \"xlabel\": self.xlabel,\n \"ylabel\": self.ylabel,\n \"showall\": self.showall,\n }\n\n return data"
},
{
"identifier": "graph_individual_data",
"path": "arlin/analysis/visualization/visualization.py",
"snippet": "def graph_individual_data(\n filename: str,\n data: GraphData,\n) -> None:\n \"\"\"Graph given GraphData to a single plot and save a PNG to the given file.\n\n Args:\n data (GraphData): Data necessary to graph and individual plot.\n filename (str): Name for the PNG file.\n \"\"\"\n _ = plt.scatter(data.x, data.y, c=data.colors, cmap=data.cmap, s=1)\n\n if not data.showall:\n plt.axis(\"off\")\n else:\n plt.xticks(data.x)\n plt.xticks(rotation=90)\n plt.xlabel(data.xlabel)\n plt.ylabel(data.ylabel)\n\n plt.title(data.title)\n\n if data.legend is not None:\n data.legend.update({\"bbox_to_anchor\": (1.05, 1.0), \"loc\": \"upper left\"})\n plt.legend(**data.legend)\n\n if data.cmap is not None:\n plt.colorbar()\n\n if data.error_bars is not None:\n for i in range(len(data.x)):\n plt.errorbar(\n data.x[i],\n data.y[i],\n yerr=data.error_bars[i],\n ecolor=data.colors[i],\n mec=data.colors[i],\n mfc=data.colors[i],\n fmt=\"o\",\n capsize=5,\n )\n\n plt.tight_layout()\n\n logging.info(f\"Saving individual graph png to {filename}...\")\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n plt.savefig(filename, bbox_inches=\"tight\")\n plt.close()"
},
{
"identifier": "graph_multiple_data",
"path": "arlin/analysis/visualization/visualization.py",
"snippet": "def graph_multiple_data(\n file_path: str,\n figure_title: str,\n graph_datas: List[GraphData],\n horizontal: bool = True,\n) -> None:\n \"\"\"Graph multiple GraphDatas in the same figure.\n\n Args:\n file_path (str): Path to save figure to.\n figure_title (str): Title of the combination graph.\n graph_datas (List[GraphData]): GraphDatast to graph together.\n horizontal (bool, optional): Whether the figure should be wider than it is tall.\n Defaults to True.\n \"\"\"\n num_plots = len(graph_datas)\n nrows, ncols = _find_subplot_dims(num_plots, horizontal)\n\n fig, axs = plt.subplots(int(nrows), int(ncols))\n fig.set_size_inches(6 * ncols, 4 * nrows)\n fig.suptitle(figure_title)\n\n cur_num = 0\n for irow in range(int(nrows)):\n for icol in range(int(ncols)):\n data = graph_datas[cur_num]\n\n if horizontal and nrows == 1:\n axis = axs[icol]\n elif not horizontal and ncols == 1:\n axis = axs[irow]\n else:\n axis = axs[irow, icol]\n\n scp = axis.scatter(data.x, data.y, c=data.colors, cmap=data.cmap, s=1)\n\n axis.set_title(data.title)\n axis.title.set_size(10)\n\n if not data.showall:\n axis.axis(\"off\")\n else:\n axis.set_xticks(data.x)\n axis.set_xticklabels(axis.get_xticks(), rotation=90)\n axis.set_xlabel(data.xlabel)\n axis.set_ylabel(data.ylabel)\n\n if data.legend is not None: # and len(data.legend[\"labels\"]) < 4:\n extra_legends = {\"bbox_to_anchor\": (1.05, 1.0), \"loc\": \"upper left\"}\n data.legend.update(extra_legends)\n axis.legend(**data.legend)\n\n if data.cmap is not None:\n plt.colorbar(scp, ax=axis)\n\n if data.error_bars is not None:\n for i in range(len(data.x)):\n axis.errorbar(\n data.x[i],\n data.y[i],\n yerr=data.error_bars[i],\n ecolor=data.colors[i],\n mec=data.colors[i],\n mfc=data.colors[i],\n fmt=\"o\",\n capsize=5,\n )\n\n plt.tight_layout()\n\n cur_num += 1\n\n logging.info(f\"Saving combination graph png to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, bbox_inches=\"tight\")\n plt.close()"
},
{
"identifier": "_find_subplot_dims",
"path": "arlin/analysis/visualization/visualization.py",
"snippet": "def _find_subplot_dims(num_plots: int, horizontal: bool) -> Tuple[int, int]:\n \"\"\"Find the optimal dimensions needed for the subplot.\n\n Args:\n num_plots (int): Number of plots to graph.\n horizontal (bool): Should the figure be wider or taller?\n\n Returns:\n Tuple[int, int]: Height dimension, Width dimension\n \"\"\"\n # if number is a square number\n if num_plots == isqrt(num_plots) ** 2:\n return sqrt(num_plots), sqrt(num_plots)\n\n if num_plots == 2:\n dim_long = 2\n dim_short = 1\n elif num_plots % 2 == 0:\n dim_long = int(num_plots / 2)\n dim_short = 2\n else:\n dim_long = num_plots\n dim_short = 1\n\n if horizontal:\n return dim_short, dim_long\n else:\n return dim_long, dim_short"
}
] | import os
import numpy as np
import pytest
from matplotlib.patches import Patch
from arlin.analysis.visualization import (
COLORS,
GraphData,
graph_individual_data,
graph_multiple_data,
)
from arlin.analysis.visualization.visualization import _find_subplot_dims | 2,481 |
@pytest.fixture
def graph_data():
x = np.array([0, 1, 2, 3, 4])
y = np.array([2, 4, 6, 8, 10])
title = "Test"
colors = COLORS[0:5]
handles = [Patch(color=COLORS[i], label=str(i)) for i in range(5)]
labels = [f"Test {i}" for i in range(5)]
leg_title = "Test Groups"
legend = {"handles": handles, "labels": labels, "title": leg_title}
cmap = "viridis"
error_bars = [0.5, 0.5, 0.5, 0.5, 0.5]
xlabel = "Time Steps"
ylabel = "Values"
showall = True
|
@pytest.fixture
def graph_data():
x = np.array([0, 1, 2, 3, 4])
y = np.array([2, 4, 6, 8, 10])
title = "Test"
colors = COLORS[0:5]
handles = [Patch(color=COLORS[i], label=str(i)) for i in range(5)]
labels = [f"Test {i}" for i in range(5)]
leg_title = "Test Groups"
legend = {"handles": handles, "labels": labels, "title": leg_title}
cmap = "viridis"
error_bars = [0.5, 0.5, 0.5, 0.5, 0.5]
xlabel = "Time Steps"
ylabel = "Values"
showall = True
| graphdata = GraphData( | 1 | 2023-11-08 13:57:45+00:00 | 4k |
Giftify-Bot/Giftify-Bot | cogs/donations/donation_category.py | [
{
"identifier": "Giftify",
"path": "bot.py",
"snippet": "class Giftify(GiftifyHelper, commands.AutoShardedBot):\r\n user: discord.ClientUser\r\n\r\n colour: int = 0xCB3045\r\n __version_info__ = \"1.1.4\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n log_handler: LogHandler,\r\n pool: asyncpg.Pool,\r\n session: aiohttp.ClientSession,\r\n amari_client: AmariClient,\r\n ) -> None:\r\n self._log_handler = log_handler\r\n self._pool = pool\r\n self._session = session\r\n self._amari_client = amari_client\r\n\r\n intents = discord.Intents(messages=True, emojis=True, guilds=True)\r\n allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False)\r\n member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents)\r\n\r\n sentry_sdk.init(\r\n dsn=os.environ[\"SENTRY_DSN\"],\r\n integrations=[\r\n LoggingIntegration(\r\n level=logging.INFO,\r\n event_level=logging.ERROR,\r\n )\r\n ],\r\n traces_sample_rate=1.0,\r\n )\r\n\r\n super().__init__(\r\n command_prefix=commands.when_mentioned,\r\n tree_cls=CommandTree,\r\n help_command=None,\r\n description=\"A giveaway bot for hosting giveaways.\",\r\n intents=intents,\r\n allowed_mentions=allowed_mentions,\r\n chunk_guilds_at_startup=False,\r\n max_messages=None,\r\n activity=discord.CustomActivity(name=\"\\N{LINK SYMBOL} https://giftifybot.vercel.app\"),\r\n member_cache_flags=member_cache_flags,\r\n owner_ids=OWNER_IDS,\r\n )\r\n\r\n @property\r\n def log_handler(self) -> LogHandler:\r\n return self._log_handler\r\n\r\n @property\r\n def pool(self) -> asyncpg.Pool:\r\n return self._pool\r\n\r\n @property\r\n def session(self) -> aiohttp.ClientSession:\r\n return self._session\r\n\r\n @property\r\n def amari_client(self) -> AmariClient:\r\n return self._amari_client\r\n\r\n @property\r\n def timer_cog(self) -> TimerManager:\r\n return self.get_cog(\"TimerManager\") # type: ignore\r\n\r\n def run(self) -> None:\r\n raise NotImplementedError(\"Please use `.start()` instead.\")\r\n\r\n async def on_ready(self) -> None:\r\n self.log_handler.log.info(\"%s got a ready event at %s\", self.user.name, datetime.datetime.now())\r\n\r\n async def on_resume(self) -> None:\r\n self.log_handler.log.info(\"%s got a resume event at %s\", self.user.name, datetime.datetime.now())\r\n\r\n async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:\r\n if isinstance(error, commands.CommandInvokeError):\r\n origin_ = error.original\r\n assert ctx.command is not None\r\n if not isinstance(origin_, discord.HTTPException):\r\n print(f\"In {ctx.command.qualified_name}:\", file=sys.stderr)\r\n traceback.print_tb(origin_.__traceback__)\r\n print(f\"{origin_.__class__.__name__}: {origin_}\", file=sys.stderr)\r\n sentry_sdk.capture_exception(error)\r\n\r\n async def start(self) -> None:\r\n await super().start(token=os.environ[\"TOKEN\"], reconnect=True)\r\n\r\n async def setup_hook(self) -> None:\r\n self.start_time: datetime.datetime = datetime.datetime.now(datetime.timezone.utc)\r\n\r\n self.bot_app_info = await self.application_info()\r\n self.owner_ids = OWNER_IDS\r\n\r\n async def get_or_fetch_user(self, user_id: int) -> Optional[discord.User]:\r\n \"\"\"Looks up a user in cache or fetches if not found.\r\n\r\n Parameters\r\n -----------\r\n user_id: int\r\n The user ID to search for.\r\n\r\n Returns\r\n ---------\r\n Optional[User]\r\n The user or None if not found.\r\n \"\"\"\r\n\r\n user = self.get_user(user_id)\r\n if user is not None:\r\n return user\r\n\r\n try:\r\n user = await self.fetch_user(user_id)\r\n except discord.HTTPException:\r\n return None\r\n else:\r\n return user\r\n\r\n async def get_or_fetch_member(self, guild: discord.Guild, member_id: int) -> Optional[discord.Member]:\r\n \"\"\"Looks up a member in cache or fetches if not found.\r\n\r\n Parameters\r\n -----------\r\n guild: Guild\r\n The guild to look in.\r\n member_id: int\r\n The member ID to search for.\r\n\r\n Returns\r\n ---------\r\n Optional[Member]\r\n The member or None if not found.\r\n \"\"\"\r\n\r\n member = guild.get_member(member_id)\r\n if member is not None:\r\n return member\r\n\r\n shard: discord.ShardInfo = self.get_shard(guild.shard_id) # type: ignore # will never be None\r\n if shard.is_ws_ratelimited():\r\n try:\r\n member = await guild.fetch_member(member_id)\r\n except discord.HTTPException:\r\n return None\r\n else:\r\n return member\r\n\r\n members = await guild.query_members(limit=1, user_ids=[member_id], cache=True)\r\n if not members:\r\n return None\r\n return members[0]\r"
},
{
"identifier": "GuildDonationConfig",
"path": "models/donation_settings.py",
"snippet": "class GuildDonationConfig:\n \"\"\"Represents the donation configuration settings for a guild.\n\n Parameters\n ----------\n bot: Giftify\n The bot instance handling the configuration.\n guild discord.Guild\n The guild associated with the configuration.\n category: str\n The category or name of the donation configuration.\n symbol: str\n The symbol or identifier of the donation category.\n roles: Dict[int, discord.Role]\n A dictionary mapping of amount to `discord.Role`.\n managers: List[discord.Role]\n A list of `discord.Role` objects representing the roles with donation management permissions.\n logging: Optional[discord.TextChannel]\n An optional `discord.TextChannel` object used for logging donation events.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"bot\",\n \"guild\",\n \"category\",\n \"symbol\",\n \"roles\",\n \"managers\",\n \"logging\",\n )\n\n def __init__(\n self,\n bot: Giftify,\n *,\n guild: discord.Guild,\n category: str,\n symbol: str,\n roles: Dict[int, discord.Role],\n managers: List[discord.Role],\n logging: Optional[discord.TextChannel] = None,\n ):\n self.bot = bot\n self.guild = guild\n self.category = category\n self.symbol = symbol\n self.roles = roles\n self.managers = managers\n self.logging = logging\n\n def __str__(self):\n return self.category\n\n def __repr__(self):\n return f\"<GuildDonationConfig guild={self.guild!r}> category={self.category}\"\n\n @classmethod\n async def create(\n cls, guild_id: int, category: str, bot: Giftify, *, symbol: Optional[str] = None\n ) -> \"GuildDonationConfig\":\n record = await bot.pool.fetchrow(\n \"INSERT INTO donation_configs (guild, category, symbol) VALUES ($1, $2, $3) RETURNING *\",\n guild_id,\n category,\n symbol,\n )\n instance = await cls.from_record(bot, record=record)\n assert instance is not None\n return instance\n\n @classmethod\n async def from_record(\n cls, bot: Giftify, *, record: asyncpg.Record\n ) -> Optional[\"GuildDonationConfig\"]:\n guild = bot.get_guild(record[\"guild\"])\n if not guild:\n return None\n\n category = record[\"category\"]\n symbol = record[\"symbol\"]\n roles = {}\n managers = []\n logging: Optional[discord.TextChannel] = (\n guild.get_channel(record[\"logging\"]) if record[\"logging\"] else None\n ) # type: ignore\n\n for amount, role_id in record[\"roles\"].items():\n if role := guild.get_role(role_id):\n roles[int(amount)] = role\n\n for role_id in record[\"managers\"]:\n if role := guild.get_role(role_id):\n managers.append(role)\n\n return cls(\n bot,\n guild=guild,\n category=category,\n symbol=symbol,\n roles=roles,\n managers=managers,\n logging=logging,\n )\n\n async def update(\n self,\n key: str,\n value: Union[\n str, discord.TextChannel, Dict[int, discord.Role], List[discord.Role]\n ],\n ) -> None:\n \"\"\"\n Update a specific attribute of the GuildDonationConfig.\n\n Parameters\n ----------\n key: str\n The attribute name to be updated. Should be one of \"category\", \"symbol\", \"logging\", \"roles\", or \"managers\".\n value: Union[str, discord.TextChannel, Dict[int, discord.Role], List[discord.Role]]\n The new value for the attribute.\n\n Raises\n ------\n ValueError\n If an invalid key is provided.\n If the value is not of the expected type for the specified key.\n\n Returns\n -------\n None\n \"\"\"\n if key not in [\"category\", \"symbol\", \"logging\", \"roles\", \"managers\"]:\n raise ValueError(\n \"Invalid key provided. Valid keys are 'category', 'symbol', 'logging', 'roles', and 'managers'.\"\n )\n\n if key in [\"category\", \"symbol\"]:\n await self._update_config(key, str(value))\n setattr(self, key, value)\n elif key == \"logging\":\n if not isinstance(value, discord.TextChannel):\n raise ValueError(\"Value for 'logging' must be a discord.TextChannel.\")\n self.logging = value\n\n await self._update_config(key, value.id)\n elif key == \"roles\":\n if not isinstance(value, dict):\n raise ValueError(\"Value for 'roles' must be a dictionary.\")\n self.roles = value\n role_values = {amount: role.id for amount, role in value.items()}\n await self._update_config(key, role_values)\n elif key == \"managers\":\n if not isinstance(value, list):\n raise ValueError(\"Value for 'managers' must be a list.\")\n self.managers = value\n role_ids = [role.id for role in value]\n await self._update_config(key, role_ids)\n\n async def _update_config(\n self, key: str, value: Union[str, int, List[int], Dict[int, int]]\n ) -> None:\n await self.bot.pool.execute(\n f\"UPDATE donation_configs SET {key} = $1 WHERE guild = $2 AND category = $3\",\n value,\n self.guild.id,\n self.category,\n )\n\n async def delete(self):\n await self.bot.pool.execute(\n \"DELETE FROM donation_configs WHERE guild = $1 AND category = $2\",\n self.guild.id,\n self.category,\n )\n\n async def reset(self):\n await self.bot.pool.execute(\n \"DELETE FROM donations WHERE guild = $1 AND category = $2\",\n self.guild.id,\n self.category,\n )"
},
{
"identifier": "DonationCategoryTransformer",
"path": "utils/transformers.py",
"snippet": "class DonationCategoryTransformer(app_commands.Transformer):\r\n async def transform(\r\n self, interaction: Interaction, value: str\r\n ) -> GuildDonationConfig:\r\n assert interaction.guild is not None\r\n\r\n config = interaction.client.get_donation_config(interaction.guild, value)\r\n if not config:\r\n raise InvalidDonationCategoryError(\r\n f\"The donation category of name {value} does not exist!\",\r\n )\r\n\r\n return config\r\n\r\n async def autocomplete(\r\n self,\r\n interaction: Interaction,\r\n current: str,\r\n ) -> List[app_commands.Choice[str]]:\r\n assert interaction.guild is not None\r\n\r\n return [\r\n app_commands.Choice(name=category, value=category)\r\n for category in interaction.client.get_guild_donation_categories(\r\n interaction.guild\r\n )\r\n if current.lower() in category.lower()\r\n ]\r"
},
{
"identifier": "Interaction",
"path": "utils/tree.py",
"snippet": "class CommandTree(app_commands.CommandTree):\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r"
}
] | import datetime
import discord
from discord import app_commands
from discord.app_commands import Range, Transform
from discord.ext import commands
from bot import Giftify
from models.donation_settings import GuildDonationConfig
from utils.transformers import DonationCategoryTransformer
from utils.tree import Interaction | 3,494 |
class DonationCategory(commands.GroupCog):
"""Cog for creating/deleting donation category."""
bot: Giftify
category_command = app_commands.Group(
name="category",
description="Commands for creating or deleting donation categories.",
guild_only=True,
)
@category_command.command(name="create")
@app_commands.describe(
category="The unique name of the donation category.",
symbol="The symbol to represent the category.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def donation_category_create(
self,
interaction: Interaction,
category: Range[str, 3, 50],
symbol: Range[str, 1, 1] = "$",
) -> None:
"""The command to create a new donation category."""
await interaction.response.defer()
assert interaction.guild is not None
config = self.bot.get_donation_config(interaction.guild, category)
if config:
return await interaction.client.send(
interaction,
f"The donation category of name {category} already exists!",
"warn",
)
if len(self.bot.get_guild_donation_categories(interaction.guild)) >= 25:
return await interaction.client.send(
interaction,
"You cannot create more than `25` donation categories.",
"warn",
)
config = await GuildDonationConfig.create(
interaction.guild.id, category, self.bot, symbol=symbol
)
self.bot.donation_configs.append(config)
await interaction.client.send(
interaction,
f"Successfully created the donation category of name {category} and symbol {symbol}!",
)
@category_command.command(name="delete")
@app_commands.describe(category="The unique name of the donation category.")
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 25, key=lambda i: (i.guild, i.user.id))
async def donation_category_delete(
self,
interaction: Interaction,
|
class DonationCategory(commands.GroupCog):
"""Cog for creating/deleting donation category."""
bot: Giftify
category_command = app_commands.Group(
name="category",
description="Commands for creating or deleting donation categories.",
guild_only=True,
)
@category_command.command(name="create")
@app_commands.describe(
category="The unique name of the donation category.",
symbol="The symbol to represent the category.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def donation_category_create(
self,
interaction: Interaction,
category: Range[str, 3, 50],
symbol: Range[str, 1, 1] = "$",
) -> None:
"""The command to create a new donation category."""
await interaction.response.defer()
assert interaction.guild is not None
config = self.bot.get_donation_config(interaction.guild, category)
if config:
return await interaction.client.send(
interaction,
f"The donation category of name {category} already exists!",
"warn",
)
if len(self.bot.get_guild_donation_categories(interaction.guild)) >= 25:
return await interaction.client.send(
interaction,
"You cannot create more than `25` donation categories.",
"warn",
)
config = await GuildDonationConfig.create(
interaction.guild.id, category, self.bot, symbol=symbol
)
self.bot.donation_configs.append(config)
await interaction.client.send(
interaction,
f"Successfully created the donation category of name {category} and symbol {symbol}!",
)
@category_command.command(name="delete")
@app_commands.describe(category="The unique name of the donation category.")
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 25, key=lambda i: (i.guild, i.user.id))
async def donation_category_delete(
self,
interaction: Interaction, | category: Transform[GuildDonationConfig, DonationCategoryTransformer], | 2 | 2023-11-09 15:00:15+00:00 | 4k |
Zjy0401/CoCoFormer | utilities/run_model.py | [
{
"identifier": "get_device",
"path": "utilities/device.py",
"snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE"
},
{
"identifier": "get_lr",
"path": "utilities/lr_scheduling.py",
"snippet": "def get_lr(optimizer):\n\n for param_group in optimizer.param_groups:\n return param_group['lr']"
},
{
"identifier": "parse_train_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()"
},
{
"identifier": "parse_eval_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_eval_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-dataset_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-model_weights\", type=str, default=\"./baseline_loss3_CBSATBoutput_0.4_0.2_1/weights/epoch_0110.pickle\", help=\"Pickled model weights file saved with torch.save and model.state_dict()\")\n parser.add_argument(\"-n_workers\", type=int, default=16, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--gpu\", default=[0], nargs='+', type=int, help=\"For Multi-GPUs testing\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-batch_size\", type=int, default=8, help=\"Batch size to use\")\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider in the model\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n return parser.parse_args()"
}
] | import torch
import time
import numpy as np
import tqdm
import torch.nn as nn
from .constants import *
from utilities.device import get_device
from .lr_scheduling import get_lr
from dataset.jsf import *
from utilities.argument_funcs import parse_train_args, parse_eval_args
from thop import profile | 2,362 | # torch.set_printoptions(profile="full")
# train_epoch
def params(dataloader, model, model_disc):
args = parse_train_args()
model.eval()
for batch_num, batch in enumerate(dataloader):
flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),
batch[0][0][1].cuda(args.gpu[0]),
batch[0][0][2].cuda(args.gpu[0]))
)
print('flops:', flops, 'params:', params)
break
def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,
lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):
args = parse_train_args()
out = -1
start_epoch = 5
model.train()
model_disc.train()
for batch_num, batch in enumerate(dataloader):
time_before = time.time()
opt.zero_grad()
opt_disc.zero_grad()
x = batch[0]
tgt = batch[1]
for i in range(len(batch[0])):
if args.gpu[0] != -1:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cuda(device=args.gpu[0])
if isinstance(x[i], torch.Tensor):
x[i] = x[i].cuda(device=args.gpu[0])
if isinstance(tgt[i], list):
for j in range(len(tgt[i])):
tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])
if isinstance(tgt[i], torch.Tensor):
tgt[i] = tgt[i].cuda(device=args.gpu[0])
else:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cpu()
tgt[i][j] = tgt[i][j].cpu()
tgt = tgt[0][0]
tgt = tgt.flatten()
with torch.no_grad():
y1 = model.module(x[1][0], x[1][1], x[1][2])
y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)
loss1 = loss.forward(y1, tgt)
y2 = model.module(x[0][0], x[0][1], x[0][2])
# discriminator model loss:
if args.gpu[0] != -1:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])
else:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)
softmax = nn.Softmax(dim=-1)
d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)
d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)
loss3 = d_fake_loss + d_real_loss
# y3 = model(x[2])
# train for only CT
# y = model(x)
y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)
loss2 = loss.forward(y2, tgt)
# tgt = tgt.flatten()
# add scheduled sampling
# out = loss.forward(y, tgt)
# out = loss3
out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3
out.backward()
opt.step()
opt_disc.step()
if lr_scheduler is not None:
lr_scheduler.step()
if lr_disc_scheduler is not None:
lr_disc_scheduler.step()
time_after = time.time()
time_took = time_after - time_before
if (batch_num + 1) % print_modulus == 0:
| # torch.set_printoptions(profile="full")
# train_epoch
def params(dataloader, model, model_disc):
args = parse_train_args()
model.eval()
for batch_num, batch in enumerate(dataloader):
flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),
batch[0][0][1].cuda(args.gpu[0]),
batch[0][0][2].cuda(args.gpu[0]))
)
print('flops:', flops, 'params:', params)
break
def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,
lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):
args = parse_train_args()
out = -1
start_epoch = 5
model.train()
model_disc.train()
for batch_num, batch in enumerate(dataloader):
time_before = time.time()
opt.zero_grad()
opt_disc.zero_grad()
x = batch[0]
tgt = batch[1]
for i in range(len(batch[0])):
if args.gpu[0] != -1:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cuda(device=args.gpu[0])
if isinstance(x[i], torch.Tensor):
x[i] = x[i].cuda(device=args.gpu[0])
if isinstance(tgt[i], list):
for j in range(len(tgt[i])):
tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])
if isinstance(tgt[i], torch.Tensor):
tgt[i] = tgt[i].cuda(device=args.gpu[0])
else:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cpu()
tgt[i][j] = tgt[i][j].cpu()
tgt = tgt[0][0]
tgt = tgt.flatten()
with torch.no_grad():
y1 = model.module(x[1][0], x[1][1], x[1][2])
y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)
loss1 = loss.forward(y1, tgt)
y2 = model.module(x[0][0], x[0][1], x[0][2])
# discriminator model loss:
if args.gpu[0] != -1:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])
else:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)
softmax = nn.Softmax(dim=-1)
d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)
d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)
loss3 = d_fake_loss + d_real_loss
# y3 = model(x[2])
# train for only CT
# y = model(x)
y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)
loss2 = loss.forward(y2, tgt)
# tgt = tgt.flatten()
# add scheduled sampling
# out = loss.forward(y, tgt)
# out = loss3
out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3
out.backward()
opt.step()
opt_disc.step()
if lr_scheduler is not None:
lr_scheduler.step()
if lr_disc_scheduler is not None:
lr_disc_scheduler.step()
time_after = time.time()
time_took = time_after - time_before
if (batch_num + 1) % print_modulus == 0: | print("Epoch", cur_epoch, " Batch", batch_num + 1, "/", len(dataloader), "LR:", get_lr(opt_disc), | 1 | 2023-11-01 08:33:08+00:00 | 4k |
NWPlayer123/AnimalCrossing-dtk | configure.py | [
{
"identifier": "Object",
"path": "tools/project.py",
"snippet": "class Object:\n def __init__(self, completed, name, **options):\n self.name = name\n self.completed = completed\n self.options = {\n \"add_to_all\": True,\n \"cflags\": None,\n \"mw_version\": None,\n \"shiftjis\": True,\n \"source\": name,\n }\n self.options.update(options)"
},
{
"identifier": "ProjectConfig",
"path": "tools/project.py",
"snippet": "class ProjectConfig:\n def __init__(self):\n # Paths\n self.build_dir = Path(\"build\")\n self.src_dir = Path(\"src\")\n self.tools_dir = Path(\"tools\")\n\n # Tooling\n self.dtk_tag = None # Git tag\n self.build_dtk_path = None # If None, download\n self.compilers_tag = None # 1\n self.compilers_path = None # If None, download\n self.wibo_tag = None # Git tag\n self.wrapper = None # If None, download wibo on Linux\n self.sjiswrap_tag = None # Git tag\n self.sjiswrap_path = None # If None, download\n\n # Project config\n self.build_rels = True # Build REL files\n self.check_sha_path = None # Path to version.sha1\n self.config_path = None # Path to config.yml\n self.debug = False # Build with debug info\n self.generate_map = False # Generate map file(s)\n self.ldflags = None # Linker flags\n self.libs = None # List of libraries\n self.linker_version = None # mwld version\n self.version = None # Version name\n self.warn_missing_config = False # Warn on missing unit configuration\n self.warn_missing_source = False # Warn on missing source file\n\n # Progress output and progress.json config\n self.progress_all = True # Include combined \"all\" category\n self.progress_modules = True # Include combined \"modules\" category\n self.progress_each_module = (\n True # Include individual modules, disable for large numbers of modules\n )\n\n def validate(self):\n required_attrs = [\n \"build_dir\",\n \"src_dir\",\n \"tools_dir\",\n \"check_sha_path\",\n \"config_path\",\n \"ldflags\",\n \"linker_version\",\n \"libs\",\n \"version\",\n ]\n for attr in required_attrs:\n if getattr(self, attr) is None:\n sys.exit(f\"ProjectConfig.{attr} missing\")\n\n def find_object(self, name):\n for lib in self.libs:\n for obj in lib[\"objects\"]:\n if obj.name == name:\n return [lib, obj]\n return None\n\n def out_path(self):\n return self.build_dir / self.version"
},
{
"identifier": "calculate_progress",
"path": "tools/project.py",
"snippet": "def calculate_progress(config):\n out_path = config.out_path()\n build_config = load_build_config(config, out_path / \"config.json\")\n if not build_config:\n return\n\n class ProgressUnit:\n def __init__(self, name):\n self.name = name\n self.code_total = 0\n self.code_progress = 0\n self.data_total = 0\n self.data_progress = 0\n self.objects_progress = 0\n self.objects_total = 0\n self.objects = set()\n\n def add(self, build_obj):\n self.code_total += build_obj[\"code_size\"]\n self.data_total += build_obj[\"data_size\"]\n\n # Avoid counting the same object in different modules twice\n include_object = build_obj[\"name\"] not in self.objects\n if include_object:\n self.objects.add(build_obj[\"name\"])\n self.objects_total += 1\n\n if build_obj[\"autogenerated\"]:\n # Skip autogenerated objects\n return\n\n result = config.find_object(build_obj[\"name\"])\n if not result:\n return\n\n _, obj = result\n if not obj.completed:\n return\n\n self.code_progress += build_obj[\"code_size\"]\n self.data_progress += build_obj[\"data_size\"]\n if include_object:\n self.objects_progress += 1\n\n def code_frac(self):\n return self.code_progress / self.code_total\n\n def data_frac(self):\n return self.data_progress / self.data_total\n\n # Add DOL units\n all_progress = ProgressUnit(\"All\") if config.progress_all else None\n dol_progress = ProgressUnit(\"DOL\")\n for unit in build_config[\"units\"]:\n if all_progress:\n all_progress.add(unit)\n dol_progress.add(unit)\n\n # Add REL units\n rels_progress = ProgressUnit(\"Modules\") if config.progress_modules else None\n modules_progress = []\n for module in build_config[\"modules\"]:\n progress = ProgressUnit(module[\"name\"])\n modules_progress.append(progress)\n for unit in module[\"units\"]:\n if all_progress:\n all_progress.add(unit)\n if rels_progress:\n rels_progress.add(unit)\n progress.add(unit)\n\n # Print human-readable progress\n print(\"Progress:\")\n\n def print_category(unit):\n code_frac = unit.code_frac()\n data_frac = unit.data_frac()\n print(\n f\" {unit.name}: {code_frac:.2%} code, {data_frac:.2%} data ({unit.objects_progress} / {unit.objects_total} files)\"\n )\n print(f\" Code: {unit.code_progress} / {unit.code_total} bytes\")\n print(f\" Data: {unit.data_progress} / {unit.data_total} bytes\")\n\n if all_progress:\n print_category(all_progress)\n print_category(dol_progress)\n module_count = len(build_config[\"modules\"])\n if module_count > 0:\n print_category(rels_progress)\n if config.progress_each_module:\n for progress in modules_progress:\n print_category(progress)\n\n # Generate and write progress.json\n progress_json = {}\n\n def add_category(category, unit):\n progress_json[category] = {\n \"code\": unit.code_progress,\n \"code/total\": unit.code_total,\n \"data\": unit.data_progress,\n \"data/total\": unit.data_total,\n }\n\n if all_progress:\n add_category(\"all\", all_progress)\n add_category(\"dol\", dol_progress)\n if len(build_config[\"modules\"]) > 0:\n if rels_progress:\n add_category(\"modules\", rels_progress)\n if config.progress_each_module:\n for progress in modules_progress:\n add_category(progress.name, progress)\n with open(out_path / \"progress.json\", \"w\", encoding=\"utf-8\") as w:\n json.dump(progress_json, w, indent=4)"
},
{
"identifier": "generate_build",
"path": "tools/project.py",
"snippet": "def generate_build(config):\n build_config = load_build_config(config, config.out_path() / \"config.json\")\n generate_build_ninja(config, build_config)\n generate_objdiff_config(config, build_config)"
},
{
"identifier": "is_windows",
"path": "tools/project.py",
"snippet": "def is_windows():\n return os.name == \"nt\""
}
] | import sys
import argparse
from pathlib import Path
from tools.project import (
Object,
ProjectConfig,
calculate_progress,
generate_build,
is_windows,
) | 3,021 | config.compilers_path = args.compilers
config.debug = args.debug
config.generate_map = args.map
config.sjiswrap_path = args.sjiswrap
if not is_windows():
config.wrapper = args.wrapper
# Tool versions
config.compilers_tag = "20231018"
config.dtk_tag = "v0.5.8"
config.sjiswrap_tag = "v1.1.1"
config.wibo_tag = "0.6.9"
# Project
config.config_path = Path("config") / config.version / "config.yml"
config.check_sha_path = Path("config") / config.version / "build.sha1"
config.ldflags = [
"-proc gekko",
"-fp hardware",
"-nodefaults",
"-nostdlib",
]
# Base flags, common to most GC/Wii games.
# Generally leave untouched, with overrides added below.
cflags_base = [
"-nodefaults",
"-proc gekko",
"-align powerpc",
"-enum int",
"-fp hardware",
"-Cpp_exceptions off",
# "-W all",
"-O4,p",
"-inline auto",
'-pragma "cats off"',
'-pragma "warn_notinlined off"',
"-maxerrors 1",
"-nosyspath",
"-RTTI off",
"-fp_contract on",
"-str reuse",
"-i include",
"-i libc",
"-multibyte",
f"-DVERSION={version_num}",
]
# Debug flags
if config.debug:
cflags_base.extend(["-sym on", "-DDEBUG=1"])
else:
cflags_base.append("-DNDEBUG=1")
# Metrowerks library flags
cflags_runtime = [
*cflags_base,
"-use_lmw_stmw on",
"-str reuse,pool,readonly",
"-gccinc",
"-common off",
"-inline auto",
]
# REL flags
cflags_rel = [
*cflags_base,
"-sdata 0",
"-sdata2 0",
]
config.linker_version = "GC/1.3.2r"
# Helper function for Dolphin libraries
def DolphinLib(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.2.5n",
"cflags": cflags_base,
"host": False,
"objects": objects,
}
# Helper function for REL script objects
def Rel(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.3.2r",
"cflags": cflags_rel,
"host": True,
"objects": objects,
}
Matching = True
NonMatching = False
config.warn_missing_config = False
config.warn_missing_source = False
config.libs = [
{
"lib": "Runtime.PPCEABI.H",
"mw_version": config.linker_version,
"cflags": cflags_runtime,
"host": False,
"objects": [
Object(NonMatching, "Runtime.PPCEABI.H/global_destructor_chain.c"),
Object(NonMatching, "Runtime.PPCEABI.H/__init_cpp_exceptions.cpp"),
],
},
]
if args.mode == "configure":
# Write build.ninja and objdiff.json
generate_build(config)
elif args.mode == "progress":
# Print progress and write progress.json
config.progress_each_module = args.verbose
| #!/usr/bin/env python3
###
# Generates build files for the project.
# This file also includes the project configuration,
# such as compiler flags and the object matching status.
#
# Usage:
# python3 configure.py
# ninja
#
# Append --help to see available options.
###
# Game versions
DEFAULT_VERSION = 0
VERSIONS = [
"GAFE01", # USA
]
if len(VERSIONS) > 1:
versions_str = ", ".join(VERSIONS[:-1]) + f" or {VERSIONS[-1]}"
else:
versions_str = VERSIONS[0]
parser = argparse.ArgumentParser()
parser.add_argument(
"mode",
default="configure",
help="configure or progress (default: configure)",
nargs="?",
)
parser.add_argument(
"--version",
dest="version",
default=VERSIONS[DEFAULT_VERSION],
help=f"version to build ({versions_str})",
)
parser.add_argument(
"--build-dir",
dest="build_dir",
type=Path,
default=Path("build"),
help="base build directory (default: build)",
)
parser.add_argument(
"--compilers",
dest="compilers",
type=Path,
help="path to compilers (optional)",
)
parser.add_argument(
"--map",
dest="map",
action="store_true",
help="generate map file(s)",
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="build with debug info (non-matching)",
)
if not is_windows():
parser.add_argument(
"--wrapper",
dest="wrapper",
type=Path,
help="path to wibo or wine (optional)",
)
parser.add_argument(
"--build-dtk",
dest="build_dtk",
type=Path,
help="path to decomp-toolkit source (optional)",
)
parser.add_argument(
"--sjiswrap",
dest="sjiswrap",
type=Path,
help="path to sjiswrap.exe (optional)",
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="print verbose output",
)
args = parser.parse_args()
config = ProjectConfig()
config.version = args.version.upper()
if config.version not in VERSIONS:
sys.exit(f"Invalid version '{config.version}', expected {versions_str}")
version_num = VERSIONS.index(config.version)
# Apply arguments
config.build_dir = args.build_dir
config.build_dtk_path = args.build_dtk
config.compilers_path = args.compilers
config.debug = args.debug
config.generate_map = args.map
config.sjiswrap_path = args.sjiswrap
if not is_windows():
config.wrapper = args.wrapper
# Tool versions
config.compilers_tag = "20231018"
config.dtk_tag = "v0.5.8"
config.sjiswrap_tag = "v1.1.1"
config.wibo_tag = "0.6.9"
# Project
config.config_path = Path("config") / config.version / "config.yml"
config.check_sha_path = Path("config") / config.version / "build.sha1"
config.ldflags = [
"-proc gekko",
"-fp hardware",
"-nodefaults",
"-nostdlib",
]
# Base flags, common to most GC/Wii games.
# Generally leave untouched, with overrides added below.
cflags_base = [
"-nodefaults",
"-proc gekko",
"-align powerpc",
"-enum int",
"-fp hardware",
"-Cpp_exceptions off",
# "-W all",
"-O4,p",
"-inline auto",
'-pragma "cats off"',
'-pragma "warn_notinlined off"',
"-maxerrors 1",
"-nosyspath",
"-RTTI off",
"-fp_contract on",
"-str reuse",
"-i include",
"-i libc",
"-multibyte",
f"-DVERSION={version_num}",
]
# Debug flags
if config.debug:
cflags_base.extend(["-sym on", "-DDEBUG=1"])
else:
cflags_base.append("-DNDEBUG=1")
# Metrowerks library flags
cflags_runtime = [
*cflags_base,
"-use_lmw_stmw on",
"-str reuse,pool,readonly",
"-gccinc",
"-common off",
"-inline auto",
]
# REL flags
cflags_rel = [
*cflags_base,
"-sdata 0",
"-sdata2 0",
]
config.linker_version = "GC/1.3.2r"
# Helper function for Dolphin libraries
def DolphinLib(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.2.5n",
"cflags": cflags_base,
"host": False,
"objects": objects,
}
# Helper function for REL script objects
def Rel(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.3.2r",
"cflags": cflags_rel,
"host": True,
"objects": objects,
}
Matching = True
NonMatching = False
config.warn_missing_config = False
config.warn_missing_source = False
config.libs = [
{
"lib": "Runtime.PPCEABI.H",
"mw_version": config.linker_version,
"cflags": cflags_runtime,
"host": False,
"objects": [
Object(NonMatching, "Runtime.PPCEABI.H/global_destructor_chain.c"),
Object(NonMatching, "Runtime.PPCEABI.H/__init_cpp_exceptions.cpp"),
],
},
]
if args.mode == "configure":
# Write build.ninja and objdiff.json
generate_build(config)
elif args.mode == "progress":
# Print progress and write progress.json
config.progress_each_module = args.verbose | calculate_progress(config) | 2 | 2023-11-09 04:40:59+00:00 | 4k |
elenacliu/GraspStudio | grasp/grasp.py | [
{
"identifier": "Camera",
"path": "cameras/camera.py",
"snippet": "class Camera:\n config: CameraConfig\n\n def __init__(self, config : CameraConfig):\n self.config = config\n \n def rgb(self) -> NDArray:\n raise NotImplementedError('You should use a specified subclass!')\n\n def rgbd(self) -> Tuple[NDArray, NDArray]:\n raise NotImplementedError('You should use a specified subclass!')\n\n def depth_to_point_cloud(self, organized=False) -> Tuple[NDArray, NDArray]:\n \"\"\"\n organized: bool\n whether to keep the cloud in image shape (H,W,3)\n \"\"\"\n color_img, depth_img = self.rgbd()\n color_img = np.array(color_img, dtype=np.float32) / 255.0\n depth_img = np.array(depth_img / 1000, dtype=np.float32)\n\n # depth image resize to the color image size\n # just use the original size of depth image and color image\n # depth_img = cv2.resize(depth_img, (self.config.image_size_w, self.config.image_size_h), interpolation=cv2.INTER_NEAREST)\n # color_img = cv2.resize(color_img, (self.config.image_size_w, self.config.image_size_h), interpolation=cv2.INTER_LINEAR)\n # the scale should be considering again\n h, w = depth_img.shape\n\n # scale camera parameters\n scale_x = w / self.config.depth_w\n scale_y = h / self.config.depth_h\n\n fx = self.config.depth_fx * scale_x\n fy = self.config.depth_fy * scale_y\n\n x_offset = self.config.depth_ppx * scale_x\n y_offset = self.config.depth_ppy * scale_y\n\n indices = torch.from_numpy(np.indices((h, w), dtype=np.float32).transpose(1,2,0))\n \n z_e = torch.from_numpy(depth_img)\n x_e = (indices[..., 1] - x_offset) * z_e / fx\n y_e = (indices[..., 0] - y_offset) * z_e / fy\n point_cloud = torch.stack([x_e, y_e, z_e], axis=-1).numpy() # Shape: [H x W x 3]\n\n if not organized:\n color_img = color_img.reshape(-1, 3)\n point_cloud = point_cloud.reshape(-1, 3)\n return color_img, point_cloud\n\n @property\n def intrinsic(self):\n return {\n 'fx': self.config.fx,\n 'fy': self.config.fy,\n 'cx': self.config.ppx,\n 'cy': self.config.ppy,\n 'w': self.config.w,\n 'h': self.config.h\n }\n\n @property\n def depth_intrinsic(self):\n return {\n 'fx': self.config.depth_fx,\n 'fy': self.config.depth_fy,\n 'cx': self.config.depth_ppx,\n 'cy': self.config.depth_ppy,\n 'w': self.config.depth_w,\n 'h': self.config.depth_h\n }"
},
{
"identifier": "CameraConfig",
"path": "cameras/camera.py",
"snippet": "class CameraConfig(InstantiateConfig):\n \"\"\"Camera Config\"\"\"\n _target: Type = field(default_factory=lambda : Camera)\n # focal length of x axis\n fx: float = 0.0\n # focal length of y axis\n fy: float = 0.0\n # optical center of x\n ppx: float = 0.0\n # optical center of y\n ppy: float = 0.0\n # resolution x (width)\n w: int = 0.0\n # resolution y (height)\n h: int = 0.0\n # image size\n image_size_w: int = 1280\n image_size_h: int = 720\n # calibration matrix (camera on hand or camera on base)\n calibration: NDArray[np.float64] = None\n # depth camera focal length of x axis (optional)\n depth_fx: Optional[float] = None\n # depth camera focal length of y axis (optional)\n depth_fy: Optional[float] = None\n # depth camera ppx\n depth_ppx: Optional[float] = None\n # depth camera ppy\n depth_ppy: Optional[float] = None\n # depth resolution x (width)\n depth_w: Optional[int] = None\n # depth esolution y (height)\n depth_h: Optional[int] = None"
},
{
"identifier": "RealSenseCameraConfig",
"path": "cameras/realsense.py",
"snippet": "class RealSenseCameraConfig(CameraConfig):\n _target: Type = field(default_factory=lambda : RealSenseCamera)\n exposure: float = 500.0\n max_depth_value: float = 800.0"
},
{
"identifier": "MotionSolver",
"path": "motion_solver/solver.py",
"snippet": "class MotionSolver:\n config: MotionSolverConfig\n def __init__(self, config) -> None:\n self.config = config\n \n def ik(self, current_joints, target_pose : np.array) -> list:\n raise NotImplementedError"
},
{
"identifier": "PybulletMotionSolverConfig",
"path": "motion_solver/solver.py",
"snippet": "class PybulletMotionSolverConfig(MotionSolverConfig):\n _target: Type = field(default_factory=lambda : PybulletMotionSolver)\n upperlimits: Tuple = FrankaConstants.JOINT_LIMITS_UPPER.value\n lowerlimits: Tuple = FrankaConstants.JOINT_LIMITS_LOWER.value\n jointranges: Tuple = FrankaConstants.JOINT_RANGES.value\n robot_file: str = 'franka_panda/panda.urdf'"
},
{
"identifier": "InstantiateConfig",
"path": "config/base_config.py",
"snippet": "class InstantiateConfig(PrintableConfig):\n \"\"\"Config class for instantiating an the class specified in the _target attribute.\"\"\"\n\n _target: Type\n\n def setup(self, **kwargs) -> Any:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n return self._target(self, **kwargs)"
}
] | import numpy as np
import copy
import time
from dataclasses import dataclass, field
from typing import Type, Optional, List
from numpy.typing import NDArray
from cameras import CameraConfig, RealSenseCameraConfig, Camera
from motion_solver import PybulletMotionSolverConfig, MotionSolver
from config import InstantiateConfig | 1,909 | # Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class GraspBot for diifferent implementations of grasps."""
@dataclass
class GraspConfig(InstantiateConfig):
_target: Type = field(default_factory=lambda : Grasp)
max_gripper_width: float = 0.08
initial_camera2robot_transformation: NDArray = np.array([[-0.08920106, -0.99592763, 0.01308891, 0.33658066],
[-0.99519613, 0.08965247, 0.03933318, 0.02753368],
[-0.04034645, -0.00951747, -0.99914042, 0.6019472],
[ 0., 0. , 0. , 1. ]])
initial_joints: NDArray = np.array([-0.02159332, -0.80462398, 0.00235787, -2.16951674, 0.0373164, 1.35462832, 0.8590827])
place_joints: NDArray = np.array([1.8, -0.7855447937431189, 0.0003260311383163978, -2.3561892689822015, 0.000589521053350634, 1.5704794415504568, 0.7849731242977285])
| # Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class GraspBot for diifferent implementations of grasps."""
@dataclass
class GraspConfig(InstantiateConfig):
_target: Type = field(default_factory=lambda : Grasp)
max_gripper_width: float = 0.08
initial_camera2robot_transformation: NDArray = np.array([[-0.08920106, -0.99592763, 0.01308891, 0.33658066],
[-0.99519613, 0.08965247, 0.03933318, 0.02753368],
[-0.04034645, -0.00951747, -0.99914042, 0.6019472],
[ 0., 0. , 0. , 1. ]])
initial_joints: NDArray = np.array([-0.02159332, -0.80462398, 0.00235787, -2.16951674, 0.0373164, 1.35462832, 0.8590827])
place_joints: NDArray = np.array([1.8, -0.7855447937431189, 0.0003260311383163978, -2.3561892689822015, 0.000589521053350634, 1.5704794415504568, 0.7849731242977285]) | camera_config: CameraConfig = field(default_factory=lambda : RealSenseCameraConfig) | 2 | 2023-11-08 09:44:22+00:00 | 4k |
emadeldeen24/ECGTransForm | trainer.py | [
{
"identifier": "ecgTransForm",
"path": "models.py",
"snippet": "class ecgTransForm(nn.Module):\r\n def __init__(self, configs, hparams):\r\n super(ecgTransForm, self).__init__()\r\n\r\n filter_sizes = [5, 9, 11]\r\n self.conv1 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[0],\r\n stride=configs.stride, bias=False, padding=(filter_sizes[0] // 2))\r\n self.conv2 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[1],\r\n stride=configs.stride, bias=False, padding=(filter_sizes[1] // 2))\r\n self.conv3 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[2],\r\n stride=configs.stride, bias=False, padding=(filter_sizes[2] // 2))\r\n\r\n self.bn = nn.BatchNorm1d(configs.mid_channels)\r\n self.relu = nn.ReLU()\r\n self.mp = nn.MaxPool1d(kernel_size=2, stride=2, padding=1)\r\n self.do = nn.Dropout(configs.dropout)\r\n\r\n\r\n self.conv_block2 = nn.Sequential(\r\n nn.Conv1d(configs.mid_channels, configs.mid_channels * 2, kernel_size=8, stride=1, bias=False, padding=4),\r\n nn.BatchNorm1d(configs.mid_channels * 2),\r\n nn.ReLU(),\r\n nn.MaxPool1d(kernel_size=2, stride=2, padding=1)\r\n )\r\n\r\n self.conv_block3 = nn.Sequential(\r\n nn.Conv1d(configs.mid_channels * 2, configs.final_out_channels, kernel_size=8, stride=1, bias=False,\r\n padding=4),\r\n nn.BatchNorm1d(configs.final_out_channels),\r\n nn.ReLU(),\r\n nn.MaxPool1d(kernel_size=2, stride=2, padding=1),\r\n )\r\n \r\n self.inplanes = 128\r\n self.crm = self._make_layer(SEBasicBlock, 128, 3)\r\n\r\n self.encoder_layer = nn.TransformerEncoderLayer(d_model=configs.trans_dim, nhead=configs.num_heads, batch_first=True)\r\n self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=3)\r\n\r\n self.aap = nn.AdaptiveAvgPool1d(1)\r\n self.clf = nn.Linear(hparams[\"feature_dim\"], configs.num_classes)\r\n\r\n def _make_layer(self, block, planes, blocks, stride=1): # makes residual SE block\r\n downsample = None\r\n if stride != 1 or self.inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv1d(self.inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm1d(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(self.inplanes, planes, stride, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x_in):\r\n\r\n # Multi-scale Convolutions\r\n x1 = self.conv1(x_in)\r\n x2 = self.conv2(x_in)\r\n x3 = self.conv3(x_in)\r\n\r\n x_concat = torch.mean(torch.stack([x1, x2, x3],2), 2)\r\n x_concat = self.do(self.mp(self.relu(self.bn(x_concat))))\r\n\r\n x = self.conv_block2(x_concat)\r\n x = self.conv_block3(x)\r\n\r\n # Channel Recalibration Module\r\n x = self.crm(x)\r\n\r\n # Bi-directional Transformer\r\n x1 = self.transformer_encoder(x)\r\n x2 = self.transformer_encoder(torch.flip(x,[2]))\r\n x = x1+x2\r\n\r\n x = self.aap(x)\r\n x_flat = x.reshape(x.shape[0], -1)\r\n x_out = self.clf(x_flat)\r\n return x_out\r"
},
{
"identifier": "data_generator",
"path": "dataloader.py",
"snippet": "def data_generator(data_path, data_type, hparams):\r\n # original\r\n train_dataset = torch.load(os.path.join(data_path, data_type, f\"train.pt\"))\r\n val_dataset = torch.load(os.path.join(data_path, data_type, f\"val.pt\"))\r\n test_dataset = torch.load(os.path.join(data_path, data_type, f\"test.pt\"))\r\n\r\n # Loading datasets\r\n train_dataset = Load_Dataset(train_dataset)\r\n val_dataset = Load_Dataset(val_dataset)\r\n test_dataset = Load_Dataset(test_dataset)\r\n\r\n cw = train_dataset.y_data.numpy().tolist()\r\n cw_dict = {}\r\n for i in range(len(np.unique(train_dataset.y_data.numpy()))):\r\n cw_dict[i] = cw.count(i)\r\n # print(cw_dict)\r\n\r\n # Dataloaders\r\n batch_size = hparams[\"batch_size\"]\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,\r\n shuffle=True, drop_last=True, num_workers=0)\r\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size,\r\n shuffle=False, drop_last=True, num_workers=0)\r\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size,\r\n shuffle=False, drop_last=False, num_workers=0)\r\n return train_loader, val_loader, test_loader, cw_dict\r"
},
{
"identifier": "get_dataset_class",
"path": "configs/data_configs.py",
"snippet": "def get_dataset_class(dataset_name):\r\n \"\"\"Return the algorithm class with the given name.\"\"\"\r\n if dataset_name not in globals():\r\n raise NotImplementedError(\"Dataset not found: {}\".format(dataset_name))\r\n return globals()[dataset_name]\r"
},
{
"identifier": "get_hparams_class",
"path": "configs/hparams.py",
"snippet": "def get_hparams_class(dataset_name):\r\n \"\"\"Return the algorithm class with the given name.\"\"\"\r\n if dataset_name not in globals():\r\n raise NotImplementedError(\"Algorithm not found: {}\".format(dataset_name))\r\n return globals()[dataset_name]\r"
},
{
"identifier": "AverageMeter",
"path": "utils.py",
"snippet": "class AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r"
},
{
"identifier": "to_device",
"path": "utils.py",
"snippet": "def to_device(input, device):\r\n if torch.is_tensor(input):\r\n return input.to(device=device)\r\n elif isinstance(input, str):\r\n return input\r\n elif isinstance(input, collections.Mapping):\r\n return {k: to_device(sample, device=device) for k, sample in input.items()}\r\n elif isinstance(input, collections.Sequence):\r\n return [to_device(sample, device=device) for sample in input]\r\n else:\r\n raise TypeError(\"Input must contain tensor, dict or list, found {type(input)}\")\r"
},
{
"identifier": "_save_metrics",
"path": "utils.py",
"snippet": "def _save_metrics(pred_labels, true_labels, log_dir, home_path, classes_names):\r\n pred_labels = np.array(pred_labels).astype(int)\r\n true_labels = np.array(true_labels).astype(int)\r\n\r\n r = classification_report(true_labels, pred_labels, digits=6, output_dict=True)\r\n\r\n df = pd.DataFrame(r)\r\n accuracy = accuracy_score(true_labels, pred_labels)\r\n df[\"accuracy\"] = accuracy\r\n df = df * 100\r\n\r\n # save classification report\r\n file_name = \"classification_report.xlsx\"\r\n report_Save_path = os.path.join(home_path, log_dir, file_name)\r\n df.to_excel(report_Save_path)\r"
},
{
"identifier": "copy_Files",
"path": "utils.py",
"snippet": "def copy_Files(destination):\r\n destination_dir = os.path.join(destination, \"MODEL_BACKUP_FILES\")\r\n os.makedirs(destination_dir, exist_ok=True)\r\n copy(\"main.py\", os.path.join(destination_dir, \"main.py\"))\r\n copy(\"dataloader.py\", os.path.join(destination_dir, \"dataloader.py\"))\r\n copy(f\"models.py\", os.path.join(destination_dir, f\"models.py\"))\r\n copy(f\"configs/data_configs.py\", os.path.join(destination_dir, f\"data_configs.py\"))\r\n copy(f\"configs/hparams.py\", os.path.join(destination_dir, f\"hparams.py\"))\r\n copy(f\"trainer.py\", os.path.join(destination_dir, f\"trainer.py\"))\r\n copy(\"utils.py\", os.path.join(destination_dir, \"utils.py\"))\r"
},
{
"identifier": "_plot_umap",
"path": "utils.py",
"snippet": "def _plot_umap(model, data_loader, device, save_dir):\r\n import umap\r\n import umap.plot\r\n from matplotlib.colors import ListedColormap\r\n classes_names = ['N','S','V','F','Q']\r\n \r\n font = {'family' : 'Times New Roman',\r\n 'weight' : 'bold',\r\n 'size' : 17}\r\n plt.rc('font', **font)\r\n \r\n with torch.no_grad():\r\n # Source flow\r\n data = data_loader.dataset.x_data.float().to(device)\r\n labels = data_loader.dataset.y_data.view((-1)).long()\r\n out = model[0](data)\r\n features = model[1](out)\r\n\r\n\r\n if not os.path.exists(os.path.join(save_dir, \"umap_plots\")):\r\n os.mkdir(os.path.join(save_dir, \"umap_plots\"))\r\n \r\n #cmaps = plt.get_cmap('jet')\r\n model_reducer = umap.UMAP() #n_neighbors=3, min_dist=0.3, metric='correlation', random_state=42)\r\n embedding = model_reducer.fit_transform(features.detach().cpu().numpy())\r\n \r\n # Normalize the labels to [0, 1] for colormap\r\n norm_labels = labels / 4.0\r\n \r\n\r\n # Create a new colormap by extracting the first 5 colors from \"Paired\"\r\n paired = plt.cm.get_cmap('Paired', 12) # 12 distinct colors\r\n new_colors = [paired(0), paired(1), paired(2), paired(4), paired(6)] # Skip every second color, but take both from the first pair\r\n new_cmap = ListedColormap(new_colors)\r\n\r\n print(\"Plotting UMAP ...\")\r\n plt.figure(figsize=(16, 10))\r\n # scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, s=10, cmap='Spectral')\r\n scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=norm_labels, cmap=new_cmap, s=15)\r\n\r\n handles, _ = scatter.legend_elements(prop='colors')\r\n plt.legend(handles, classes_names, title=\"Classes\")\r\n file_name = \"umap_.png\"\r\n fig_save_name = os.path.join(save_dir, \"umap_plots\", file_name)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.savefig(fig_save_name, bbox_inches='tight')\r\n plt.close()\r"
},
{
"identifier": "fix_randomness",
"path": "utils.py",
"snippet": "def fix_randomness(SEED):\r\n random.seed(SEED)\r\n np.random.seed(SEED)\r\n torch.manual_seed(SEED)\r\n torch.cuda.manual_seed(SEED)\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r"
},
{
"identifier": "starting_logs",
"path": "utils.py",
"snippet": "def starting_logs(data_type, exp_log_dir, seed_id):\r\n log_dir = os.path.join(exp_log_dir, \"_seed_\" + str(seed_id))\r\n os.makedirs(log_dir, exist_ok=True)\r\n log_file_name = os.path.join(log_dir, f\"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log\")\r\n logger = _logger(log_file_name)\r\n logger.debug(\"=\" * 45)\r\n logger.debug(f'Dataset: {data_type}')\r\n logger.debug(\"=\" * 45)\r\n logger.debug(f'Seed: {seed_id}')\r\n logger.debug(\"=\" * 45)\r\n return logger, log_dir\r"
},
{
"identifier": "save_checkpoint",
"path": "utils.py",
"snippet": "def save_checkpoint(home_path, model, dataset, dataset_configs, log_dir, hparams):\r\n save_dict = {\r\n \"dataset\": dataset,\r\n \"configs\": dataset_configs.__dict__,\r\n \"hparams\": dict(hparams),\r\n \"model\": model[0].state_dict(),\r\n \"clf\": model[1].state_dict()\r\n }\r\n # save classification report\r\n save_path = os.path.join(home_path, log_dir, \"checkpoint.pt\")\r\n\r\n torch.save(save_dict, save_path)\r"
},
{
"identifier": "_calc_metrics",
"path": "utils.py",
"snippet": "def _calc_metrics(pred_labels, true_labels, classes_names):\r\n pred_labels = np.array(pred_labels).astype(int)\r\n true_labels = np.array(true_labels).astype(int)\r\n\r\n r = classification_report(true_labels, pred_labels, target_names=classes_names, digits=6, output_dict=True)\r\n accuracy = accuracy_score(true_labels, pred_labels)\r\n\r\n return accuracy * 100, r[\"macro avg\"][\"f1-score\"] * 100\r"
}
] | import torch
import torch.nn.functional as F
import os
import collections
import numpy as np
import warnings
import sklearn.exceptions
from models import ecgTransForm
from dataloader import data_generator
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, _plot_umap
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics | 3,417 |
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
|
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \ | data_generator(self.data_path, data_type, self.hparams) | 1 | 2023-11-06 14:11:19+00:00 | 4k |
WMD-group/CrystalSpace | app.py | [
{
"identifier": "get_plotly_embedding",
"path": "visualize_app/visualize_embedding.py",
"snippet": "def get_plotly_embedding(\n df: pd.DataFrame = None,\n opacity: float = 0.2,\n **kwargs,\n) -> go.Figure:\n \"\"\"\n Plot the embedding of a dataframe with plotly.\n\n Args:\n df: dataframe with columns x, y, z, smact_allowed, mp_data.\n opacity: opacity of the markers. Default is 0.8.\n kwargs: additional keyword arguments.\n Returns:\n fig: plotly figure object.\n \"\"\"\n # check if the dataframe is empty\n if df is None:\n return go.Figure()\n\n fig = px.scatter_3d(\n df,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n template=\"plotly_white\",\n color=\"label\",\n color_discrete_map={\n \"0\": \"#D9D9D9\",\n \"1\": \"rgba(34, 224, 0, 0.8)\",\n \"2\": \"rgba(255, 18, 1, 0.8)\",\n \"3\": \"rgba(0, 47, 255, 0.8)\",\n # \"0\": px.colors.qualitative.Vivid[-1], # \"#D9D9D9\"\n # \"1\": px.colors.qualitative.Vivid[0], # \"#22E000\",\n # \"2\": px.colors.qualitative.Vivid[1], # \"#FF1201\",\n # \"3\": px.colors.qualitative.Vivid[2], # \"#002FFF\",\n },\n opacity=opacity,\n hover_data=[\n \"formula\",\n ],\n )\n\n # update hovertemplate\n fig.update_traces(\n hovertemplate=\"<br>\".join(\n [\n \"formula: %{customdata[0]}\",\n ]\n )\n )\n\n # remove the background grid and axes and ticks and tick labels\n fig.update_layout(\n scene=dict(\n xaxis=dict(\n showticklabels=False,\n title=\"\",\n ),\n yaxis=dict(\n showticklabels=False,\n title=\"\",\n ),\n zaxis=dict(\n showticklabels=False,\n title=\"\",\n ),\n ),\n )\n\n # set title\n if \"title\" in kwargs:\n fig.update_layout(\n title=dict(\n text=kwargs[\"title\"],\n font=dict(size=20),\n x=0.5,\n y=0.95,\n xanchor=\"center\",\n yanchor=\"top\",\n )\n )\n\n # update the legend labels\n legend_label_map = {\n \"0\": \"Unlikely (False, False)\",\n \"1\": \"Interesting (False, True)\",\n \"2\": \"Missing (True, False)\",\n \"3\": \"Standard (True, True)\",\n }\n\n for trace in fig.data:\n trace.name = legend_label_map[trace.name]\n\n # update the marker\n\n fig.update_traces(\n marker=dict(\n size=5,\n # line=dict(width=0.5, color=\"Grey\"),\n ),\n selector=dict(mode=\"markers\"),\n )\n\n # update the legend title\n fig.update_layout(\n legend_title_text=\" click legend 👆 <br>(smact_allowed, mp_data)\",\n )\n return fig"
},
{
"identifier": "get_plotly_structure",
"path": "visualize_app/visualize_structure.py",
"snippet": "def get_plotly_structure(structure: Structure = None) -> go.Figure:\n \"\"\"\n Plot a pymatgen structure with its unit cell using plotly.\n Args:\n structure: pymatgen structure object.\n kwargs: additional keyword arguments.\n Returns:\n fig: plotly figure object.\n \"\"\"\n if structure is None:\n return px.scatter_3d()\n\n # Getting atomic positions and species using list comprehension\n positions = [site.coords for site in structure]\n species = [str(site.specie) for site in structure]\n\n # Getting atomic colors\n atomic_colors = [jmol_colors[Element(specie).Z] for specie in species]\n\n # Getting atomic radii\n # atomic_radii = [float(Element(specie).atomic_radius) for specie in species]\n\n # Extracting x, y, and z coordinates\n x, y, z = zip(*positions)\n\n # Getting lattice vectors\n a, b, c = structure.lattice.matrix\n\n # Define lines for the unit cell\n lines = [\n [[0, 0, 0], a],\n [[0, 0, 0], b],\n [[0, 0, 0], c],\n [a, a + b],\n [a, a + c],\n [b, b + a],\n [b, b + c],\n [c, c + a],\n [c, c + b],\n [a + b, a + b + c],\n [a + c, a + c + b],\n [b + c, b + c + a],\n ]\n\n # scatter atoms\n trace_atoms = go.Scatter3d(\n x=x,\n y=y,\n z=z,\n mode=\"markers\",\n text=species,\n hoverinfo=\"text\",\n marker=dict(\n symbol=\"circle\",\n sizemode=\"diameter\",\n color=atomic_colors,\n # size=[20 * r for r in atomic_radii],\n size=20,\n line=dict(color=\"black\", width=5),\n ),\n )\n\n # draw unit cell\n trace_lines = []\n for line in lines:\n x_values, y_values, z_values = zip(*line)\n trace_lines.append(\n go.Scatter3d(\n x=x_values,\n y=y_values,\n z=z_values,\n mode=\"lines\",\n line=dict(color=\"black\"),\n )\n )\n\n # remove the background grid\n layout = go.Layout(\n scene=dict(\n xaxis=dict(\n showticklabels=False,\n title=\"\",\n showgrid=False,\n zeroline=False,\n showline=False,\n visible=False,\n ),\n yaxis=dict(\n showticklabels=False,\n title=\"\",\n showgrid=False,\n zeroline=False,\n showline=False,\n visible=False,\n ),\n zaxis=dict(\n showticklabels=False,\n title=\"\",\n showgrid=False,\n zeroline=False,\n showline=False,\n visible=False,\n ),\n ),\n showlegend=False,\n )\n\n fig = go.Figure(data=[trace_atoms, *trace_lines], layout=layout)\n return fig"
},
{
"identifier": "fn_chemical_check",
"path": "visualize_app/utils.py",
"snippet": "def fn_chemical_check(\n df_embedding: pd.DataFrame, species_1: str, species_2: str\n) -> np.array:\n \"\"\"\n Check if the chemical system contains the specified species.\n\n Args:\n df_embedding (pd.DataFrame): Embedding dataframe.\n species_1 (str): Chemical species 1.\n species_2 (str): Chemical species 2.\n\n Returns:\n np.array: Boolean array for the chemical systems that contain the specified species.\n \"\"\"\n\n chemicals = np.array(df_embedding.index)\n\n # regular expression patterns\n pattern_1 = r\"{}(?:(?={})|(?![a-zA-Z]))\".format(species_1, species_2)\n pattern_2 = r\"{}(?:(?={})|(?![a-zA-Z]))\".format(species_2, species_1)\n # get the mask\n mask = np.array(\n [\n True\n if re.search(pattern_1, chemical)\n and re.search(pattern_2, chemical)\n else True\n if re.search(pattern_1, chemical) and species_2 == \"default\"\n else True\n if re.search(pattern_2, chemical) and species_1 == \"default\"\n else True\n if species_1 == \"default\" and species_2 == \"default\"\n else False\n for chemical in chemicals\n ]\n )\n\n return mask"
},
{
"identifier": "blank_fig",
"path": "visualize_app/utils.py",
"snippet": "def blank_fig():\n fig = go.Figure(go.Scatter(x=[], y=[]))\n fig.update_layout(template=None)\n fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)\n fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)\n\n return fig"
}
] | import os
import ase
import pandas as pd
import dash_bootstrap_components as dbc
from pathlib import Path
from fire import Fire
from pymatgen.core import Structure
from dash import Dash, html, Input, Output, dcc, dash_table, no_update
from visualize_app.visualize_embedding import get_plotly_embedding
from visualize_app.visualize_structure import get_plotly_structure
from visualize_app.utils import fn_chemical_check, blank_fig | 3,080 | # set the app title
dbc.Row(
[
html.H1(
"Crystal Space for Binary Compounds 🔮",
style={
"textAlign": "center",
"color": "black",
},
),
html.Hr(),
]
),
# set selector for methods
dbc.Row(
[
# set selector for dimension reduction method
dbc.Col(
dbc.Select(
id="reduction-method-select",
options=[
{"label": "t-SNE", "value": "tsne"},
{"label": "UMAP", "value": "umap"},
{"label": "PCA", "value": "pca"},
],
value="umap",
),
width=3,
),
# set selector for embedding method
dbc.Col(
dbc.Select(
id="embedding-method-select",
options=[
{"label": "magpie", "value": "magpie"},
{"label": "mat2vec", "value": "mat2vec"},
{"label": "megnet16", "value": "megnet16"},
{"label": "oliynyk", "value": "oliynyk"},
{"label": "skipatom", "value": "skipatom"},
{"label": "random_200", "value": "random_200"},
],
value="magpie",
),
width=3,
),
],
justify="start",
),
html.Br(),
# set selector for chemical systems
dbc.Row(
[
# set selector for chemical system 1
dbc.Col(
dbc.Select(
id="chemical-system-select-1",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 1", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
# set selector for chemical system 2
dbc.Col(
dbc.Select(
id="chemical-system-select-2",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 2", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
],
justify="start",
),
dcc.Store(id="embedding-data-store", data=None),
html.Br(),
# set scatter and crystal structure
dbc.Row(
[
# set the scatter plot
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Space",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
dcc.Markdown(
id="method-name",
children="",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
dcc.Graph(
id="3d-scatter-plot",
|
PARENT_DIR = Path(os.path.dirname(__file__))
# load label data
LABEL_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_label.pkl")
LABEL_DATA["label"] = LABEL_DATA["label"].astype(str)
# load materials project data
MP_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_mp.pkl")
def main(
debug: bool = False,
host: str = "0.0.0.0",
port: int = 8050,
):
"""Visualize the embedding of binary compounds.
:param debug: Debug mode, defaults to False
:param host: host address, defaults to "0.0.0.0"
:param port: port number, defaults to 8050
"""
# initialize the app - incorporate a Dash Bootstrap theme
external_stylesheets = [dbc.themes.MINTY]
app = Dash(__name__, external_stylesheets=external_stylesheets)
# app layout
app.layout = dbc.Container(
[
# set the app title
dbc.Row(
[
html.H1(
"Crystal Space for Binary Compounds 🔮",
style={
"textAlign": "center",
"color": "black",
},
),
html.Hr(),
]
),
# set selector for methods
dbc.Row(
[
# set selector for dimension reduction method
dbc.Col(
dbc.Select(
id="reduction-method-select",
options=[
{"label": "t-SNE", "value": "tsne"},
{"label": "UMAP", "value": "umap"},
{"label": "PCA", "value": "pca"},
],
value="umap",
),
width=3,
),
# set selector for embedding method
dbc.Col(
dbc.Select(
id="embedding-method-select",
options=[
{"label": "magpie", "value": "magpie"},
{"label": "mat2vec", "value": "mat2vec"},
{"label": "megnet16", "value": "megnet16"},
{"label": "oliynyk", "value": "oliynyk"},
{"label": "skipatom", "value": "skipatom"},
{"label": "random_200", "value": "random_200"},
],
value="magpie",
),
width=3,
),
],
justify="start",
),
html.Br(),
# set selector for chemical systems
dbc.Row(
[
# set selector for chemical system 1
dbc.Col(
dbc.Select(
id="chemical-system-select-1",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 1", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
# set selector for chemical system 2
dbc.Col(
dbc.Select(
id="chemical-system-select-2",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 2", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
],
justify="start",
),
dcc.Store(id="embedding-data-store", data=None),
html.Br(),
# set scatter and crystal structure
dbc.Row(
[
# set the scatter plot
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Space",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
dcc.Markdown(
id="method-name",
children="",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
dcc.Graph(
id="3d-scatter-plot", | figure=blank_fig(), | 3 | 2023-11-07 17:10:38+00:00 | 4k |
serl-robot/serl | serl/agents/sac/sac_learner.py | [
{
"identifier": "Agent",
"path": "serl/agents/agent.py",
"snippet": "class Agent(struct.PyTreeNode):\n actor: TrainState\n rng: PRNGKey\n\n def eval_actions(self, observations: np.ndarray) -> np.ndarray:\n actions = _eval_actions(self.actor.apply_fn, self.actor.params, observations)\n return np.asarray(actions)\n\n def sample_actions(self, observations: np.ndarray) -> np.ndarray:\n actions, new_rng = _sample_actions(\n self.rng, self.actor.apply_fn, self.actor.params, observations\n )\n return np.asarray(actions), self.replace(rng=new_rng)"
},
{
"identifier": "Temperature",
"path": "serl/agents/sac/temperature.py",
"snippet": "class Temperature(nn.Module):\n initial_temperature: float = 1.0\n\n @nn.compact\n def __call__(self) -> jnp.ndarray:\n log_temp = self.param(\n \"log_temp\",\n init_fn=lambda key: jnp.full((), jnp.log(self.initial_temperature)),\n )\n return jnp.exp(log_temp)"
},
{
"identifier": "DatasetDict",
"path": "serl/data/dataset.py",
"snippet": "def _check_lengths(dataset_dict: DatasetDict, dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(\n dataset_dict: Union[np.ndarray, DatasetDict], indx: np.ndarray\n) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n def _sample_jax(rng, src, max_indx: int):\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n def normalize_returns(self, scaling: float = 1000):\nclass Dataset(object):"
},
{
"identifier": "TanhNormal",
"path": "serl/distributions/tanh_normal.py",
"snippet": "class Normal(nn.Module):\n def __call__(self, inputs, *args, **kwargs) -> tfd.Distribution:"
},
{
"identifier": "Ensemble",
"path": "serl/networks/ensemble.py",
"snippet": "class Ensemble(nn.Module):\n net_cls: Type[nn.Module]\n num: int = 2\n\n @nn.compact\n def __call__(self, *args):\n ensemble = nn.vmap(\n self.net_cls,\n variable_axes={\"params\": 0},\n split_rngs={\"params\": True, \"dropout\": True},\n in_axes=None,\n out_axes=0,\n axis_size=self.num,\n )\n return ensemble()(*args)"
},
{
"identifier": "subsample_ensemble",
"path": "serl/networks/ensemble.py",
"snippet": "def subsample_ensemble(key: jax.random.PRNGKey, params, num_sample: int, num_qs: int):\n if num_sample is not None:\n all_indx = jnp.arange(0, num_qs)\n indx = jax.random.choice(key, a=all_indx, shape=(num_sample,), replace=False)\n\n if \"Ensemble_0\" in params:\n ens_params = jax.tree_util.tree_map(\n lambda param: param[indx], params[\"Ensemble_0\"]\n )\n params = params.copy(add_or_replace={\"Ensemble_0\": ens_params})\n else:\n params = jax.tree_util.tree_map(lambda param: param[indx], params)\n return params"
},
{
"identifier": "MLP",
"path": "serl/networks/mlp.py",
"snippet": "class MLP(nn.Module):\n hidden_dims: Sequence[int]\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n activate_final: bool = False\n use_layer_norm: bool = False\n scale_final: Optional[float] = None\n dropout_rate: Optional[float] = None\n spectral_norm: bool = False\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:\n\n for i, size in enumerate(self.hidden_dims):\n if i + 1 == len(self.hidden_dims) and self.scale_final is not None:\n x = nn.Dense(size, kernel_init=default_init(self.scale_final))(x)\n else:\n x = nn.Dense(size, kernel_init=default_init())(x)\n\n if i + 1 < len(self.hidden_dims) or self.activate_final:\n if self.dropout_rate is not None and self.dropout_rate > 0:\n x = nn.Dropout(rate=self.dropout_rate)(\n x, deterministic=not training\n )\n if self.use_layer_norm:\n x = nn.LayerNorm()(x)\n x = self.activations(x)\n return x"
},
{
"identifier": "StateActionValue",
"path": "serl/networks/state_action_value.py",
"snippet": "class StateActionValue(nn.Module):\n base_cls: nn.Module\n\n @nn.compact\n def __call__(\n self, observations: jnp.ndarray, actions: jnp.ndarray, *args, **kwargs\n ) -> jnp.ndarray:\n inputs = jnp.concatenate([observations, actions], axis=-1)\n outputs = self.base_cls()(inputs, *args, **kwargs)\n\n value = nn.Dense(1, kernel_init=default_init())(outputs)\n\n return jnp.squeeze(value, -1)"
}
] | from functools import partial
from typing import Dict, Optional, Sequence, Tuple
from flax import struct
from flax.training.train_state import TrainState
from serl.agents.agent import Agent
from serl.agents.sac.temperature import Temperature
from serl.data.dataset import DatasetDict
from serl.distributions import TanhNormal
from serl.networks import MLP, Ensemble, StateActionValue, subsample_ensemble
import gym
import jax
import jax.numpy as jnp
import optax | 2,238 | """Implementations of algorithms for continuous control."""
class SACLearner(Agent):
critic: TrainState
target_critic: TrainState
temp: TrainState
tau: float
discount: float
target_entropy: float
num_qs: int = struct.field(pytree_node=False)
num_min_qs: Optional[int] = struct.field(
pytree_node=False
) # See M in RedQ https://arxiv.org/abs/2101.05982
backup_entropy: bool = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
num_qs: int = 2,
num_min_qs: Optional[int] = None,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0,
backup_entropy: bool = True,
):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
if target_entropy is None:
target_entropy = -action_dim / 2
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_def = TanhNormal(actor_base_cls, action_dim)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
)
critic_cls = partial(StateActionValue, base_cls=critic_base_cls)
critic_def = Ensemble(critic_cls, num=num_qs)
critic_params = critic_def.init(critic_key, observations, actions)["params"]
critic = TrainState.create(
apply_fn=critic_def.apply,
params=critic_params,
tx=optax.adam(learning_rate=critic_lr),
)
target_critic_def = Ensemble(critic_cls, num=num_min_qs or num_qs)
target_critic = TrainState.create(
apply_fn=target_critic_def.apply,
params=critic_params,
tx=optax.GradientTransformation(lambda _: None, lambda _: None),
)
temp_def = Temperature(init_temperature)
temp_params = temp_def.init(temp_key)["params"]
temp = TrainState.create(
apply_fn=temp_def.apply,
params=temp_params,
tx=optax.adam(learning_rate=temp_lr),
)
return cls(
rng=rng,
actor=actor,
critic=critic,
target_critic=target_critic,
temp=temp,
target_entropy=target_entropy,
tau=tau,
discount=discount,
num_qs=num_qs,
num_min_qs=num_min_qs,
backup_entropy=backup_entropy,
)
| """Implementations of algorithms for continuous control."""
class SACLearner(Agent):
critic: TrainState
target_critic: TrainState
temp: TrainState
tau: float
discount: float
target_entropy: float
num_qs: int = struct.field(pytree_node=False)
num_min_qs: Optional[int] = struct.field(
pytree_node=False
) # See M in RedQ https://arxiv.org/abs/2101.05982
backup_entropy: bool = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
num_qs: int = 2,
num_min_qs: Optional[int] = None,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0,
backup_entropy: bool = True,
):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
if target_entropy is None:
target_entropy = -action_dim / 2
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_def = TanhNormal(actor_base_cls, action_dim)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
)
critic_cls = partial(StateActionValue, base_cls=critic_base_cls)
critic_def = Ensemble(critic_cls, num=num_qs)
critic_params = critic_def.init(critic_key, observations, actions)["params"]
critic = TrainState.create(
apply_fn=critic_def.apply,
params=critic_params,
tx=optax.adam(learning_rate=critic_lr),
)
target_critic_def = Ensemble(critic_cls, num=num_min_qs or num_qs)
target_critic = TrainState.create(
apply_fn=target_critic_def.apply,
params=critic_params,
tx=optax.GradientTransformation(lambda _: None, lambda _: None),
)
temp_def = Temperature(init_temperature)
temp_params = temp_def.init(temp_key)["params"]
temp = TrainState.create(
apply_fn=temp_def.apply,
params=temp_params,
tx=optax.adam(learning_rate=temp_lr),
)
return cls(
rng=rng,
actor=actor,
critic=critic,
target_critic=target_critic,
temp=temp,
target_entropy=target_entropy,
tau=tau,
discount=discount,
num_qs=num_qs,
num_min_qs=num_min_qs,
backup_entropy=backup_entropy,
)
| def update_actor(self, batch: DatasetDict) -> Tuple[Agent, Dict[str, float]]: | 2 | 2023-11-02 23:32:24+00:00 | 4k |
daily-demos/ai-meeting-assistant | server/call/operator.py | [
{
"identifier": "BotConfig",
"path": "server/config.py",
"snippet": "class BotConfig:\n _openai_api_key: str = None\n _openai_model_name: str = None\n _log_dir_path: str = None\n _daily_room_url: str = None\n _daily_meeting_token: str = None\n\n def __init__(self,\n openai_api_key: str,\n openai_model_name: str,\n daily_room_url: str = None,\n daily_meeting_token: str = None,\n log_dir_path: str = None):\n self._openai_api_key = openai_api_key\n self._openai_model_name = openai_model_name\n self._log_dir_path = log_dir_path\n self._daily_room_url = daily_room_url\n self._daily_meeting_token = daily_meeting_token\n\n @property\n def openai_model_name(self) -> str:\n return self._openai_model_name\n\n @property\n def openai_api_key(self) -> str:\n return self._openai_api_key\n\n @property\n def log_dir_path(self) -> str:\n return self._log_dir_path\n\n @property\n def daily_room_url(self) -> str:\n return self._daily_room_url\n\n @property\n def daily_meeting_token(self) -> str:\n return self._daily_meeting_token\n\n def get_log_file_path(self, room_name: str) -> str | None:\n \"\"\"Returns the log file for the given room name\"\"\"\n if not self.log_dir_path:\n return None\n return os.path.join(self.log_dir_path, f\"{room_name}.log\")\n\n def ensure_dirs(self):\n \"\"\"Creates required file directories if they do not already exist.\"\"\"\n if self.log_dir_path:\n ensure_dir(self.log_dir_path)"
},
{
"identifier": "Session",
"path": "server/call/session.py",
"snippet": "class Session(EventHandler):\n \"\"\"Class representing a single meeting happening within a Daily room.\"\"\"\n\n _config: BotConfig\n _assistant: Assistant\n _summary: Summary | None\n\n # Daily-related properties\n _id: str | None\n _call_client: CallClient | None\n _room: Room\n\n # Shutdown-related properties\n _is_destroyed: bool\n _shutdown_timer: threading.Timer | None = None\n\n def __init__(self, config: BotConfig):\n super().__init__()\n self._is_destroyed = False\n self._config = config\n self._summary = None\n self._id = None\n self._room = self._get_room_config(self._config.daily_room_url)\n self._logger = self.create_logger(self._room.name)\n self._assistant = OpenAIAssistant(\n config.openai_api_key,\n config.openai_model_name,\n self._logger)\n self._logger.info(\"Initialized session %s\", self._room.name)\n\n def start(self):\n # Start session on new thread\n task = threading.Thread(target=self._run)\n task.start()\n while not self.is_destroyed:\n time.sleep(1)\n\n @property\n def room_url(self) -> str:\n return self._room.url\n\n @property\n def id(self) -> str:\n return self._id\n\n @property\n def is_destroyed(self) -> bool:\n return self._is_destroyed\n\n def _get_room_config(self, room_url: str = None) -> Room:\n \"\"\"Creates a Daily room and uses it to start a session\"\"\"\n parsed_url = urlparse(room_url)\n room_name = os.path.basename(parsed_url.path)\n token = self._config.daily_meeting_token\n room = Room(url=room_url, name=room_name, token=token)\n return room\n\n def _run(self):\n \"\"\"Waits for at least one person to join the associated Daily room,\n then joins, starts transcription, and begins registering context.\"\"\"\n call_client = CallClient(event_handler=self)\n self._call_client = call_client\n room = self._room\n self._logger.info(\"Joining Daily room %s\", room.url)\n call_client.join(\n room.url,\n room.token,\n completion=self.on_joined_meeting)\n\n async def _generate_clean_transcript(self) -> bool:\n \"\"\"Generates a clean transcript from the raw context.\"\"\"\n if self._is_destroyed:\n return True\n try:\n await self._assistant.cleanup_transcript()\n except Exception as e:\n self._logger.warning(\n \"Failed to generate clean transcript: %s\", e)\n return False\n\n async def _query_assistant(self, custom_query: str = None) -> Future[str]:\n \"\"\"Queries the configured assistant with either the given query, or the\n configured assistant's default\"\"\"\n\n want_cached_summary = not bool(custom_query)\n answer = None\n\n # If we want a generic summary, and we have a cached one that's less than 15 seconds old,\n # just return that.\n if want_cached_summary and self._summary:\n seconds_since_generation = time.time() - self._summary.retrieved_at\n if seconds_since_generation < 15:\n self._logger.info(\"Returning cached summary\")\n answer = self._summary.content\n\n # If we don't have a cached summary, or it's too old, query the\n # assistant.\n if not answer:\n self._logger.info(\"Querying assistant\")\n try:\n answer = await self._assistant.query(custom_query)\n # If there was no custom query provided, save this as cached\n # summary.\n if want_cached_summary:\n self._logger.info(\"Saving general summary\")\n self._summary = Summary(\n content=answer, retrieved_at=time.time())\n except NoContextError:\n answer = (\"I don't have any context saved yet. Please speak to add some context or \"\n \"confirm that transcription is enabled.\")\n except Exception as e:\n self._logger.error(\n \"Failed to query assistant: %s\", e)\n answer = (\"Something went wrong while generating the summary. Please check the server logs.\")\n\n return answer\n\n def on_app_message_sent(self, _, error: str = None):\n \"\"\"Callback invoked when an app message is sent.\"\"\"\n if error:\n self._logger.error(\"Failed to send app message: %s\", error)\n\n def on_app_message(self,\n message: str,\n sender: str):\n \"\"\"Callback invoked when a Daily app message is received.\"\"\"\n # TODO message appears to be a dict when our docs say str.\n # For now dumping it to a JSON string and parsing it back out,\n # until designed behavior is clarified.\n jsonMsg = json.dumps(message)\n data = json.loads(jsonMsg)\n kind = data.get(\"kind\")\n if kind != \"assist\":\n return\n\n query = data.get(\"query\")\n\n recipient = sender\n\n # If this is a broadcast, set recipient to all participants\n if bool(data.get(\"broadcast\")):\n recipient = None\n\n task = data.get(\"task\")\n\n answer: str = None\n if task == \"summary\" or task == \"query\":\n answer = asyncio.run(self._query_assistant(query))\n elif task == \"transcript\":\n answer = self._assistant.get_clean_transcript()\n\n self._call_client.send_app_message({\n \"kind\": f\"ai-{task}\",\n \"data\": answer\n }, participant=recipient, completion=self.on_app_message_sent)\n\n def on_left_meeting(self, _, error: str = None):\n \"\"\"Cancels any ongoing shutdown timer and marks this session as destroyed\"\"\"\n if error:\n self._logger.error(\n \"Encountered error while leaving meeting: %s\", error)\n\n # There's a chance of a shutdown timer being ongoing at the time the bot\n # is kicked or leaves for other reasons. Clean up the shutdown timer if\n # that is the case.\n if self._shutdown_timer:\n self._logger.info(\n \"Participant left meeting - cancelling shutdown.\")\n self.cancel_shutdown_timer()\n\n # Similar to above, if this session has already been destroyed for any other reason,\n # Don't do this again.\n if self._is_destroyed:\n self._logger.info(\"Session %s already destroyed.\", self._room.url)\n return\n\n self._logger.info(\"Left meeting %s\", self._room.url)\n self._assistant.destroy()\n self._is_destroyed = True\n\n def on_joined_meeting(self, join_data, error):\n \"\"\"Callback invoked when the bot has joined the Daily room.\"\"\"\n if error:\n raise Exception(\"failed to join meeting\", error)\n self._logger.info(\"Bot joined meeting %s\", self._room.url)\n self._id = join_data[\"participants\"][\"local\"][\"id\"]\n\n # TODO (Liza): Remove this when transcription started events are invoked\n # as expected\n threading.Thread(\n target=self.start_transcript_polling,\n daemon=True).start()\n\n self._call_client.set_user_name(\"Daily AI Assistant\")\n\n # Check whether the bot is actually the only one in the call, in which case\n # the shutdown timer should start. The shutdown will be cancelled if\n # daily-python detects someone new joining.\n self.maybe_start_shutdown()\n\n def on_error(self, message):\n \"\"\"Callback invoked when an error is received.\"\"\"\n self._logger.error(\"Received meeting error: %s\", message)\n\n async def poll_async_func(self, async_func, interval):\n while True:\n await async_func()\n if self._is_destroyed:\n return\n await asyncio.sleep(interval)\n\n def start_transcript_polling(self):\n \"\"\"Starts an asyncio event loop and schedules generate_clean_transcript to run every 15 seconds.\"\"\"\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(\n self.poll_async_func(\n self._generate_clean_transcript, 15))\n\n # TODO: (Liza) Uncomment this when transcription events are properly invoked\n # if the transcription is starte before the bot joins.\n # def on_transcription_started(self, status):\n # self._logger.info(\"Transcription started: %s\", status)\n # threading.Thread(target=self.start_transcript_polling, daemon=True).start()\n\n def on_transcription_stopped(self, stopped_by: str, stopped_by_error: str):\n self._logger.info(\n \"Transcription stopped: %s (%s)\",\n stopped_by,\n stopped_by_error)\n\n def on_transcription_error(self, message):\n \"\"\"Callback invoked when a transcription error is received.\"\"\"\n self._logger.error(\"Received transcription error: %s\", message)\n\n def on_transcription_message(self, message):\n \"\"\"Callback invoked when a transcription message is received.\"\"\"\n user_name = message[\"user_name\"]\n text = message[\"text\"]\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n metadata = [user_name, 'voice', timestamp]\n self._assistant.register_new_context(text, metadata)\n\n def on_participant_joined(self, participant):\n # As soon as someone joins, stop shutdown process if one is in progress\n if self._shutdown_timer:\n self._logger.info(\"Participant joined - cancelling shutdown.\")\n self.cancel_shutdown_timer()\n\n def on_participant_left(self,\n participant,\n reason):\n \"\"\"Callback invoked when a participant leaves the Daily room.\"\"\"\n self.maybe_start_shutdown()\n\n def on_call_state_updated(self, state: Mapping[str, Any]) -> None:\n \"\"\"Invoked when the Daily call state has changed\"\"\"\n self._logger.info(\n \"Call state updated for session %s: %s\",\n self._room.url,\n state)\n if state == \"left\" and not self._is_destroyed:\n self._logger.info(\"Call state left, destroying immediately\")\n self.on_left_meeting(None)\n\n def maybe_start_shutdown(self) -> bool:\n \"\"\"Checks if the session should be shut down, and if so, starts the shutdown process.\"\"\"\n count = self._call_client.participant_counts()['present']\n self._logger.info(\n \"Participant count: %s\", count)\n\n # If there is at least one present participant, do nothing.\n if count > 1:\n return False\n\n self._logger.info(\"Starting shutdown timer\")\n\n # If there are no present participants left, wait 1 minute and\n # start shutdown.\n self._shutdown_timer = threading.Timer(60.0, self.shutdown)\n self._shutdown_timer.start()\n return True\n\n def shutdown(self):\n \"\"\"Shuts down the session, leaving the Daily room, invoking the shutdown callback,\n and cancelling any pending Futures\"\"\"\n self._logger.info(\n f\"Session {self._id} shutting down. Active threads: %s\",\n threading.active_count())\n\n self.cancel_shutdown_timer()\n self._call_client.leave(self.on_left_meeting)\n\n def cancel_shutdown_timer(self):\n \"\"\"Cancels the live shutdown timer\"\"\"\n if self._shutdown_timer:\n self._shutdown_timer.cancel()\n self._shutdown_timer = None\n\n def create_logger(self, name) -> Logger:\n \"\"\"Creates a logger for this session\"\"\"\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\n '%(asctime)s -[%(threadName)s-%(thread)s] - %(levelname)s - %(message)s')\n\n log_file_path = self._config.get_log_file_path(self._room.name)\n if log_file_path:\n file_handler = logging.FileHandler(\n self._config.get_log_file_path(self._room.name))\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n return logger"
}
] | import threading
import polling2
from daily import Daily
from server.config import BotConfig
from server.call.session import Session | 3,499 | """Module which keeps track of all ongoing sessions and provides
querying functionality to HTTP requesters."""
class Operator():
_sessions: list[Session]
_is_shutting_down: bool
_lock: threading.Lock
def __init__(self):
self._is_shutting_down = False
self._lock = threading.Lock()
self._sessions = []
Daily.init()
t = threading.Thread(target=self.cleanup)
t.start()
| """Module which keeps track of all ongoing sessions and provides
querying functionality to HTTP requesters."""
class Operator():
_sessions: list[Session]
_is_shutting_down: bool
_lock: threading.Lock
def __init__(self):
self._is_shutting_down = False
self._lock = threading.Lock()
self._sessions = []
Daily.init()
t = threading.Thread(target=self.cleanup)
t.start()
| def create_session(self, bot_config: BotConfig) -> Session: | 0 | 2023-11-02 11:17:16+00:00 | 4k |
Kushalhk/AutoFilter | plugins/gfilters.py | [
{
"identifier": "add_gfilter",
"path": "database/gfilters_mdb.py",
"snippet": "async def add_gfilter(gfilters, text, reply_text, btn, file, alert):\n mycol = mydb[str(gfilters)]\n # mycol.create_index([('text', 'text')])\n\n data = {\n 'text':str(text),\n 'reply':str(reply_text),\n 'btn':str(btn),\n 'file':str(file),\n 'alert':str(alert)\n }\n\n try:\n mycol.update_one({'text': str(text)}, {\"$set\": data}, upsert=True)\n except:\n logger.exception('Some error occured!', exc_info=True)"
},
{
"identifier": "get_gfilters",
"path": "database/gfilters_mdb.py",
"snippet": "async def get_gfilters(gfilters):\n mycol = mydb[str(gfilters)]\n\n texts = []\n query = mycol.find()\n try:\n for file in query:\n text = file['text']\n texts.append(text)\n except:\n pass\n return texts"
},
{
"identifier": "delete_gfilter",
"path": "database/gfilters_mdb.py",
"snippet": "async def delete_gfilter(message, text, gfilters):\n mycol = mydb[str(gfilters)]\n \n myquery = {'text':text }\n query = mycol.count_documents(myquery)\n if query == 1:\n mycol.delete_one(myquery)\n await message.reply_text(\n f\"'`{text}`' deleted. I'll not respond to that gfilter anymore.\",\n quote=True,\n parse_mode=enums.ParseMode.MARKDOWN\n )\n else:\n await message.reply_text(\"Couldn't find that gfilter!\", quote=True)"
},
{
"identifier": "count_gfilters",
"path": "database/gfilters_mdb.py",
"snippet": "async def count_gfilters(gfilters):\n mycol = mydb[str(gfilters)]\n\n count = mycol.count()\n return False if count == 0 else count"
},
{
"identifier": "active_connection",
"path": "database/connections_mdb.py",
"snippet": "async def active_connection(user_id):\n\n query = mycol.find_one(\n { \"_id\": user_id },\n { \"_id\": 0, \"group_details\": 0 }\n )\n if not query:\n return None\n\n group_id = query['active_group']\n return int(group_id) if group_id != None else None"
},
{
"identifier": "get_file_id",
"path": "utils.py",
"snippet": "def get_file_id(msg: Message):\n if msg.media:\n for message_type in (\n \"photo\",\n \"animation\",\n \"audio\",\n \"document\",\n \"video\",\n \"video_note\",\n \"voice\",\n \"sticker\"\n ):\n obj = getattr(msg, message_type)\n if obj:\n setattr(obj, \"message_type\", message_type)\n return obj"
},
{
"identifier": "gfilterparser",
"path": "utils.py",
"snippet": "def gfilterparser(text, keyword):\n if \"buttonalert\" in text:\n text = (text.replace(\"\\n\", \"\\\\n\").replace(\"\\t\", \"\\\\t\"))\n buttons = []\n note_data = \"\"\n prev = 0\n i = 0\n alerts = []\n for match in BTN_URL_REGEX.finditer(text):\n # Check if btnurl is escaped\n n_escapes = 0\n to_check = match.start(1) - 1\n while to_check > 0 and text[to_check] == \"\\\\\":\n n_escapes += 1\n to_check -= 1\n\n # if even, not escaped -> create button\n if n_escapes % 2 == 0:\n note_data += text[prev:match.start(1)]\n prev = match.end(1)\n if match.group(3) == \"buttonalert\":\n # create a thruple with button label, url, and newline status\n if bool(match.group(5)) and buttons:\n buttons[-1].append(InlineKeyboardButton(\n text=match.group(2),\n callback_data=f\"gfilteralert:{i}:{keyword}\"\n ))\n else:\n buttons.append([InlineKeyboardButton(\n text=match.group(2),\n callback_data=f\"gfilteralert:{i}:{keyword}\"\n )])\n i += 1\n alerts.append(match.group(4))\n elif bool(match.group(5)) and buttons:\n buttons[-1].append(InlineKeyboardButton(\n text=match.group(2),\n url=match.group(4).replace(\" \", \"\")\n ))\n else:\n buttons.append([InlineKeyboardButton(\n text=match.group(2),\n url=match.group(4).replace(\" \", \"\")\n )])\n\n else:\n note_data += text[prev:to_check]\n prev = match.start(1) - 1\n else:\n note_data += text[prev:]\n\n try:\n return note_data, buttons, alerts\n except:\n return note_data, buttons, None"
},
{
"identifier": "split_quotes",
"path": "utils.py",
"snippet": "def split_quotes(text: str) -> List:\n if not any(text.startswith(char) for char in START_CHAR):\n return text.split(None, 1)\n counter = 1 # ignore first char -> is some kind of quote\n while counter < len(text):\n if text[counter] == \"\\\\\":\n counter += 1\n elif text[counter] == text[0] or (text[0] == SMART_OPEN and text[counter] == SMART_CLOSE):\n break\n counter += 1\n else:\n return text.split(None, 1)\n\n # 1 to avoid starting quote, and counter is exclusive so avoids ending\n key = remove_escapes(text[1:counter].strip())\n # index will be in range, or `else` would have been executed and returned\n rest = text[counter + 1:].strip()\n if not key:\n key = text[0] + text[0]\n return list(filter(None, [key, rest]))"
},
{
"identifier": "ADMINS",
"path": "info.py",
"snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]"
}
] | import io
from pyrogram import filters, Client, enums
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from database.gfilters_mdb import(
add_gfilter,
get_gfilters,
delete_gfilter,
count_gfilters
)
from database.connections_mdb import active_connection
from utils import get_file_id, gfilterparser, split_quotes
from info import ADMINS | 2,063 |
@Client.on_message(filters.command(['gfilter', 'addg']) & filters.incoming & filters.user(ADMINS))
async def addgfilter(client, message):
args = message.text.html.split(None, 1)
if len(args) < 2:
await message.reply_text("Command Incomplete :(", quote=True)
return
extracted = split_quotes(args[1])
text = extracted[0].lower()
if not message.reply_to_message and len(extracted) < 2:
await message.reply_text("Add some content to save your filter!", quote=True)
return
if (len(extracted) >= 2) and not message.reply_to_message:
reply_text, btn, alert = gfilterparser(extracted[1], text)
fileid = None
if not reply_text:
await message.reply_text("You cannot have buttons alone, give some text to go with it!", quote=True)
return
elif message.reply_to_message and message.reply_to_message.reply_markup:
try:
rm = message.reply_to_message.reply_markup
btn = rm.inline_keyboard
msg = get_file_id(message.reply_to_message)
if msg:
fileid = msg.file_id
reply_text = message.reply_to_message.caption.html
else:
reply_text = message.reply_to_message.text.html
fileid = None
alert = None
except:
reply_text = ""
btn = "[]"
fileid = None
alert = None
elif message.reply_to_message and message.reply_to_message.media:
try:
msg = get_file_id(message.reply_to_message)
fileid = msg.file_id if msg else None
reply_text, btn, alert = gfilterparser(extracted[1], text) if message.reply_to_message.sticker else gfilterparser(message.reply_to_message.caption.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
elif message.reply_to_message and message.reply_to_message.text:
try:
fileid = None
reply_text, btn, alert = gfilterparser(message.reply_to_message.text.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
else:
return
await add_gfilter('gfilters', text, reply_text, btn, fileid, alert)
await message.reply_text(
f"GFilter for `{text}` added",
quote=True,
parse_mode=enums.ParseMode.MARKDOWN
)
@Client.on_message(filters.command(['viewgfilters', 'gfilters']) & filters.incoming & filters.user(ADMINS))
async def get_all_gfilters(client, message):
|
@Client.on_message(filters.command(['gfilter', 'addg']) & filters.incoming & filters.user(ADMINS))
async def addgfilter(client, message):
args = message.text.html.split(None, 1)
if len(args) < 2:
await message.reply_text("Command Incomplete :(", quote=True)
return
extracted = split_quotes(args[1])
text = extracted[0].lower()
if not message.reply_to_message and len(extracted) < 2:
await message.reply_text("Add some content to save your filter!", quote=True)
return
if (len(extracted) >= 2) and not message.reply_to_message:
reply_text, btn, alert = gfilterparser(extracted[1], text)
fileid = None
if not reply_text:
await message.reply_text("You cannot have buttons alone, give some text to go with it!", quote=True)
return
elif message.reply_to_message and message.reply_to_message.reply_markup:
try:
rm = message.reply_to_message.reply_markup
btn = rm.inline_keyboard
msg = get_file_id(message.reply_to_message)
if msg:
fileid = msg.file_id
reply_text = message.reply_to_message.caption.html
else:
reply_text = message.reply_to_message.text.html
fileid = None
alert = None
except:
reply_text = ""
btn = "[]"
fileid = None
alert = None
elif message.reply_to_message and message.reply_to_message.media:
try:
msg = get_file_id(message.reply_to_message)
fileid = msg.file_id if msg else None
reply_text, btn, alert = gfilterparser(extracted[1], text) if message.reply_to_message.sticker else gfilterparser(message.reply_to_message.caption.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
elif message.reply_to_message and message.reply_to_message.text:
try:
fileid = None
reply_text, btn, alert = gfilterparser(message.reply_to_message.text.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
else:
return
await add_gfilter('gfilters', text, reply_text, btn, fileid, alert)
await message.reply_text(
f"GFilter for `{text}` added",
quote=True,
parse_mode=enums.ParseMode.MARKDOWN
)
@Client.on_message(filters.command(['viewgfilters', 'gfilters']) & filters.incoming & filters.user(ADMINS))
async def get_all_gfilters(client, message): | texts = await get_gfilters('gfilters') | 1 | 2023-11-03 12:21:26+00:00 | 4k |
tiendatnguyen-vision/Orbit-symmetrize | RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py | [
{
"identifier": "torch_dtype",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def torch_dtype(dtype):\n \"\"\" Convert a string representation of a torch dtype to the actual\n torch dtype. \"\"\"\n if isinstance(dtype, torch.dtype):\n return dtype\n if dtype in _TORCH_DTYPES:\n return _TORCH_DTYPES[dtype]\n return torch.empty(0).to(dtype).dtype"
},
{
"identifier": "dtype_cast",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def dtype_cast(A, B):\n \"\"\" Casts A and B to the same dtype, preferring complex dtypes over real dtypes. \"\"\"\n if A.dtype in (torch.complex64, torch.complex128):\n B = B.to(A.dtype)\n if B.dtype in (torch.complex64, torch.complex128):\n A = A.to(B.dtype)\n return A, B"
},
{
"identifier": "torch_device",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def torch_device(device):\n \"\"\" Get the device from a string or torch.device \"\"\"\n if device is None:\n device = 'cpu'\n return torch.empty(0, device=device).device"
},
{
"identifier": "device_cast",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def device_cast(A, B):\n \"\"\" Casts A and B to the same device, preferring GPU over CPU. \"\"\"\n if A.device.type == 'cuda':\n B = B.to(A.device)\n if B.device.type == 'cuda':\n A = A.to(B.device)\n return A, B"
},
{
"identifier": "get_dtype",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def get_dtype(operators, dtypes=None):\n \"\"\" Returns the dtype of the first operator that has a dtype attribute. \"\"\"\n if dtypes is None:\n dtypes = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'dtype'):\n dtypes.append(obj.dtype)\n return dtypes[0]"
}
] | import warnings
import torch
from torch import nn
from .utils import torch_dtype, dtype_cast, torch_device, device_cast, get_dtype | 2,797 |
def _matvec(self, v):
return self.A._rmatvec(v)
def _rmatvec(self, v):
return self.A._matvec(v)
def _matmat(self, V):
return self.A._rmatmat(V)
def _rmatmat(self, V):
return self.A._matmat(V)
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary linear operator"""
def __init__(self, A):
super().__init__()
self.A = A
self.init(dtype=A.dtype, shape=(A.size(1), A.size(0)), device=A.device)
def _matvec(self, v):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatvec(torch.conj(v)))
def _rmatvec(self, v):
return torch.conj(self.A._matvec(torch.conj(v)))
def _matmat(self, V):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatmat(torch.conj(V)))
def _rmatmat(self, V):
return torch.conj(self.A._matmat(torch.conj(V)))
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _SumLinearOperator(LinearOperator):
""" Sum of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), A.shape, A.device)
def _matvec(self, v):
return self.A.matvec(v) + self.B.matvec(v)
def _rmatvec(self, v):
return self.A.rmatvec(v) + self.B.rmatvec(v)
def _rmatmat(self, V):
return self.A.rmatmat(V) + self.B.rmatmat(V)
def _matmat(self, V):
return self.A.matmat(V) + self.B.matmat(V)
def _adjoint(self):
return self.A.H() + self.B.H()
def invt(self):
""" Inverse transpose this linear operator. """
return self.A.invt() + self.B.invt()
def to(self, device):
self.A = self.A.to(device)
self.B = self.B.to(device)
self.device = self.A.device
return self
class _ProductLinearOperator(LinearOperator):
""" Product of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.size(1) != B.size(0):
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), (A.size(0), B.size(1)), A.device)
def _matvec(self, v):
return self.A.matvec(self.B.matvec(v))
def _rmatvec(self, v):
return self.B.rmatvec(self.A.rmatvec(v))
def _rmatmat(self, V):
return self.B.rmatmat(self.A.rmatmat(V))
def _matmat(self, V):
return self.A.matmat(self.B.matmat(V))
def _adjoint(self):
return self.B.H() * self.A.H()
def invt(self):
return self.A.invt()*self.B.invt()
def to_dense(self):
A = self.A.to_dense() if isinstance(self.A, LinearOperator) else self.A
B = self.B.to_dense() if isinstance(self.B, LinearOperator) else self.B
| # pylint: disable=W0212:protected-access
""" Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import torch
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super(Ones, self).__init__(dtype=None, shape=shape)
... def _matvec(self, v):
... return x.sum().repeat(self.size(0))
Instances of this class emulate ``torch.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> offsets = torch.tensor([[1, 0, 2], [0, -1, 0], [0, 0, 3]]).to_sparse()
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot(torch.tensor([1, 2, 3]))
tensor([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (torch.ones(A.shape, A.dtype) + offsets.to_dense()).dot(torch.tensor([1, 2, 3]))
tensor([13, 4, 15])
Several algorithms in the ``torch.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
def isscalar(x):
""" Is x a scalar? """
return isinstance(x, (int, float, complex))
def isintlike(x):
""" Is x an integer-like object? """
return isinstance(x, int)
def isshape(x, nonneg=False):
"""Is x a valid 2-tuple of dimensions?
If nonneg, also checks that the dimensions are non-negative.
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except Exception:
return False
else:
if (isscalar(M) and isscalar(N)) or (isintlike(M) and isintlike(N)):
if not nonneg or (M >= 0 and N >= 0):
return True
return False
class LinearOperator(nn.Module):
""" Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M, N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N, K).
dtype : dtype
Data type of the matrix.
rmatmat : callable f(V)
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
ndim : int
Number of dimensions (this is always 2)
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
More details regarding how to subclass a LinearOperator and several
examples of concrete LinearOperator instances can be found in the
external project `PyLops <https://pylops.readthedocs.io>`_.
Examples
--------
>>> def mv(v):
... return torch.tensor([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(torch.ones(2))
tensor([ 2., 3.])
>>> A * torch.ones(2)
tensor([ 2., 3.])
"""
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
warnings.warn("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.",
category=RuntimeWarning, stacklevel=2)
return obj
def __init__(self):
super().__init__()
self.ndim = 2
self.dtype = None
self.shape = None
self.device = None
def init(self, dtype, shape, device):
""" Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
Called from subclasses at the end of the __init__ routine.
"""
if dtype is None:
dtype = torch.float # force float 32
else:
if not isinstance(dtype, torch.dtype):
dtype = torch_dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError(f"invalid shape {(shape,)} (must be 2-d)")
self.dtype = dtype
self.shape = torch.Size(shape)
self.device = torch_device(device)
def size(self, dim=None):
""" Return the size of this LinearOperator.
This is a synonym for ``shape``.
"""
return self.shape if dim is None else self.shape[dim]
def _matmat(self, V):
""" Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])
def _matvec(self, v):
""" Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(v.reshape(-1, 1))
def matvec(self, v):
""" Matrix-vector multiplication.
Performs the operation y=A*v where A is an MxN linear
operator and v is a column vector or 1-d array.
Parameters
----------
v : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
M, N = self.shape
if v.shape != (N,) and v.shape != (N, 1):
raise ValueError('dimension mismatch')
y = self._matvec(v)
if v.ndim == 1:
y = y.reshape(M)
elif v.ndim == 2:
y = y.reshape(M, 1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, v):
""" Adjoint matrix-vector multiplication.
Performs the operation y = A^H * v where A is an MxN linear
operator and v is a column vector or 1-d array.
Parameters
----------
v : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the v argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
M, N = self.shape
if v.shape != (M,) and v.shape != (M, 1):
raise ValueError('dimension mismatch')
y = self._rmatvec(v)
if v.ndim == 1:
y = y.reshape(N)
elif v.ndim == 2:
y = y.reshape(N, 1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, v):
""" Default implementation of _rmatvec; defers to adjoint. """
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
return self.H().matvec(v)
def matmat(self, V):
""" Matrix-matrix multiplication.
Performs the operation y=A*V where A is an MxN linear
operator and V dense N*K matrix or ndarray.
Parameters
----------
V : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the V argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
if V.ndim != 2:
raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')
if V.size(0) != self.size(1):
raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')
Y = self._matmat(V)
return Y
def rmatmat(self, V):
""" Adjoint matrix-matrix multiplication.
Performs the operation y = A^H * V where A is an MxN linear
operator and V is a column vector or 1-d array, or 2-d array.
The default implementation defers to the adjoint.
Parameters
----------
V : {matrix, ndarray}
A matrix or 2D array.
Returns
-------
Y : {matrix, ndarray}
A matrix or 2D array depending on the type of the input.
Notes
-----
This rmatmat wraps the user-specified rmatmat routine.
"""
if V.ndim != 2:
raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')
if V.size(0) != self.size(0):
raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')
Y = self._rmatmat(V)
return Y
def _rmatmat(self, V):
""" Default implementation of _rmatmat defers to rmatvec or adjoint. """
if type(self)._adjoint == LinearOperator._adjoint:
return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])
return self.H().matmat(V)
def forward(self, v):
""" Matrix-vector or matrix-matrix multiplication. """
return self*v
def __mul__(self, v):
return self.dot(v)
def dot(self, v):
""" Matrix-matrix or matrix-vector multiplication.
Parameters
----------
v : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Av : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(v, LinearOperator):
return _ProductLinearOperator(self, v)
if torch.is_tensor(v):
if v.ndim == 0:
return _ScaledLinearOperator(self, v)
if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:
return self.matvec(v)
if v.ndim == 2:
return self.matmat(v)
raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')
def __matmul__(self, other):
if isscalar(other):
raise ValueError("Scalar operands are not allowed, use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if isscalar(other):
raise ValueError("Scalar operands are not allowed, use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if isscalar(x):
return _ScaledLinearOperator(self, x)
return NotImplemented
def __pow__(self, p):
if isscalar(p):
return _PowerLinearOperator(self, p)
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
if torch.is_tensor(x) and x.ndim == 2:
return _SumLinearOperator(self, Lazy(x))
return NotImplemented
def __radd__(self, x):
return self.__add__(x)
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M, N = self.shape
if self.dtype is None:
dtype = 'unspecified dtype'
else:
dtype = 'dtype=' + str(self.dtype)
return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'
def adjoint(self):
""" Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
def H(self):
""" Hermitian adjoint. """
return self.adjoint()
def transpose(self):
""" Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
def t(self):
""" Transpose this linear operator. """
return self.transpose()
def _adjoint(self):
""" Default implementation of _adjoint; defers to rmatvec. """
return _AdjointLinearOperator(self)
def _transpose(self):
""" Default implementation of _transpose; defers to rmatvec + conj"""
return _TransposedLinearOperator(self)
def invt(self):
""" Default implementation of inverse transpose; defers to inv + T """
return (self ** -1).transpose()
def to_dense(self):
""" Default implementation of to_dense which produces the dense
matrix corresponding to the given lazy matrix. Defaults to
multiplying by the identity """
return [email protected](self.size(-1), device=self.device)
def to(self, device):
""" Move this linear operator to a new device. """
self.device = torch.empty(0).to(device).device
return self
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
dtype=None, device=None, rmatmat=None):
super().__init__()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__rmatmat_impl = rmatmat
self.__matmat_impl = matmat
self.init(dtype, shape, device)
def _matmat(self, V):
if self.__matmat_impl is not None:
return self.__matmat_impl(V)
return super()._matmat(V)
def _matvec(self, v):
return self.__matvec_impl(v)
def _rmatvec(self, v):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(v)
def _rmatmat(self, V):
if self.__rmatmat_impl is not None:
return self.__rmatmat_impl(V)
return super()._rmatmat(V)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.size(1), self.size(0)),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
matmat=self.__rmatmat_impl,
rmatmat=self.__matmat_impl,
dtype=self.dtype,
device=self.device)
class _AdjointLinearOperator(LinearOperator):
"""Adjoint of arbitrary linear operator"""
def __init__(self, A):
super().__init__()
self.A = A
self.init(dtype=A.dtype, shape=(A.size(1), A.size(0)), device=A.device)
def _matvec(self, v):
return self.A._rmatvec(v)
def _rmatvec(self, v):
return self.A._matvec(v)
def _matmat(self, V):
return self.A._rmatmat(V)
def _rmatmat(self, V):
return self.A._matmat(V)
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary linear operator"""
def __init__(self, A):
super().__init__()
self.A = A
self.init(dtype=A.dtype, shape=(A.size(1), A.size(0)), device=A.device)
def _matvec(self, v):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatvec(torch.conj(v)))
def _rmatvec(self, v):
return torch.conj(self.A._matvec(torch.conj(v)))
def _matmat(self, V):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatmat(torch.conj(V)))
def _rmatmat(self, V):
return torch.conj(self.A._matmat(torch.conj(V)))
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _SumLinearOperator(LinearOperator):
""" Sum of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), A.shape, A.device)
def _matvec(self, v):
return self.A.matvec(v) + self.B.matvec(v)
def _rmatvec(self, v):
return self.A.rmatvec(v) + self.B.rmatvec(v)
def _rmatmat(self, V):
return self.A.rmatmat(V) + self.B.rmatmat(V)
def _matmat(self, V):
return self.A.matmat(V) + self.B.matmat(V)
def _adjoint(self):
return self.A.H() + self.B.H()
def invt(self):
""" Inverse transpose this linear operator. """
return self.A.invt() + self.B.invt()
def to(self, device):
self.A = self.A.to(device)
self.B = self.B.to(device)
self.device = self.A.device
return self
class _ProductLinearOperator(LinearOperator):
""" Product of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.size(1) != B.size(0):
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), (A.size(0), B.size(1)), A.device)
def _matvec(self, v):
return self.A.matvec(self.B.matvec(v))
def _rmatvec(self, v):
return self.B.rmatvec(self.A.rmatvec(v))
def _rmatmat(self, V):
return self.B.rmatmat(self.A.rmatmat(V))
def _matmat(self, V):
return self.A.matmat(self.B.matmat(V))
def _adjoint(self):
return self.B.H() * self.A.H()
def invt(self):
return self.A.invt()*self.B.invt()
def to_dense(self):
A = self.A.to_dense() if isinstance(self.A, LinearOperator) else self.A
B = self.B.to_dense() if isinstance(self.B, LinearOperator) else self.B | A, B = device_cast(A, B) | 3 | 2023-11-01 07:19:02+00:00 | 4k |
AnonCatalyst/Scavenger | scavenger.py | [
{
"identifier": "get_system_info",
"path": "src/inf.py",
"snippet": "def get_system_info(target_text_widget=None):\n if target_text_widget is None:\n return \"Error: target_text_widget not provided.\"\n\n # System information\n system_info_text = \"System Information:\\n\"\n system_info_text += f\" Operating System: {platform.system()} {platform.version()}\\n\"\n system_info_text += f\" Processor: {platform.processor()} ({os.cpu_count()} cores)\\n\"\n system_info_text += f\" Architecture: {platform.architecture()[0]}\\n\"\n system_info_text += get_memory_info() + \"\\n\"\n system_info_text += get_disk_info() + \"\\n\"\n\n # Network information\n system_info_text += \"\\nNetwork Information:\\n\"\n system_info_text += f\" Hostname: {socket.gethostname()}\\n\"\n system_info_text += f\" Internal IP: {socket.gethostbyname(socket.gethostname())}\\n\"\n system_info_text += f\" External IP: {get_external_ip()}\\n\\n\"\n\n target_text_widget.setPlainText(system_info_text)"
},
{
"identifier": "check_user_in_urls",
"path": "src/usr.py",
"snippet": "def check_user_in_urls(target_username, urls):\n results = {}\n\n for url in urls:\n full_url = urljoin(url, target_username)\n\n try:\n response = requests.get(full_url, timeout=10)\n if response.status_code == 200:\n results[full_url] = \"Username Found\"\n else:\n results[full_url] = \"Username Not Found\"\n except requests.RequestException:\n results[full_url] = \"Error\"\n\n return results"
}
] | import sys
import re
import json
import httpx
import urllib3
import urllib.parse
import asyncio
import serpapi
import time
import psutil
import subprocess
import os
import requests
import logging
import warnings
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QTabWidget, QPlainTextEdit
from PyQt5.QtCore import Qt
from PyQt5 import QtGui
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QStackedWidget, QStackedLayout, QSizePolicy
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QSpacerItem, QSizePolicy
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QPushButton, QHBoxLayout
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QLineEdit
from src.inf import get_system_info
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QMainWindow, QDesktopWidget
from src.usr import check_user_in_urls
from requests.exceptions import RequestException, ConnectionError, TooManyRedirects, SSLError
from colorama import Fore
from datetime import datetime
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton, QGraphicsBlurEffect
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QFrame
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QTimer, QPropertyAnimation, QEasingCurve, QPoint
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QPushButton, QStackedWidget, QLabel, QDesktopWidget, QMainWindow, QApplication
from PyQt5.QtGui import QPixmap, QPalette, QBrush, QImage, QDesktopServices
from PyQt5.QtWidgets import QMenu, QAction
from PyQt5.QtWidgets import QGridLayout | 3,235 | # Add widgets to the layouts
results_layout.addWidget(self.result_text)
errors_layout.addWidget(self.error_text)
log_layout.addWidget(self.log_text)
# Set the background color for the text boxes in all tabs
for text_edit in [self.result_text, self.error_text, self.log_text]:
text_edit.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add layouts to the corresponding tabs
results_tab.setLayout(results_layout)
errors_tab.setLayout(errors_layout)
log_tab.setLayout(log_layout)
# Add the tab widget to the main layout
layout.addWidget(tabs)
self.setLayout(layout)
for widget in [self.username_input, self.result_text, self.error_text, self.log_text]:
widget.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
def run_user_search(self):
target_username = self.username_input.text()
if not target_username:
QMessageBox.warning(self, "Warning", "Please enter a target username.")
return
# Create an instance of the username search thread and pass the target_username and url_list
url_list = self.load_urls_from_file()
self.search_thread = UserSearchThread(target_username, url_list)
self.search_thread.search_result.connect(self.display_username_search_result)
self.search_thread.error.connect(self.display_error)
self.search_thread.log.connect(self.display_log)
# Start the search thread
self.search_thread.start()
self.display_username_search_result("Searching for user in URLs...")
def display_username_search_result(self, result):
self.result_text.append(result)
def display_error(self, error):
self.error_text.append(error)
def display_log(self, log):
self.log_text.append(log)
def load_urls_from_file(self):
try:
with open("src/urls.txt", "r") as f:
return [x.strip() for x in f.readlines()]
except FileNotFoundError:
QMessageBox.warning(self, "Warning", "URLs file (src/urls.txt) not found.")
return []
class HomeWindow(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
main_layout = QVBoxLayout()
# Create a QHBoxLayout for the widgets on the right side (image, name, and bio)
right_layout = QHBoxLayout()
# Create a QLabel for displaying the image
image_label = QLabel(self)
pixmap = QPixmap('img/discord.jpg') # Replace 'img/profile_image.jpg' with the actual path to your image
pixmap = pixmap.scaledToWidth(100) # Set the desired width
image_label.setPixmap(pixmap)
image_label.setAlignment(Qt.AlignCenter) # Center the image
# Create a QVBoxLayout for the right side (name and bio)
text_layout = QVBoxLayout()
# Create a QLabel for displaying the name
name_label = QLabel('Scavenger Osint GUI')
name_label.setAlignment(Qt.AlignCenter) # Center the text
# Create a QTextEdit for displaying the bio
bio_box = QTextEdit()
bio_box.setReadOnly(True)
bio_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Read content from bio.txt file and set it to the bio box
try:
with open('src/bio.txt', 'r') as file:
bio_text = file.read()
bio_box.setPlainText(bio_text)
except FileNotFoundError:
bio_box.setPlainText("Bio file not found.")
# Add name and bio widgets to the text layout
text_layout.addWidget(name_label)
text_layout.addWidget(bio_box)
# Add image and text layout to the right layout
right_layout.addWidget(image_label)
right_layout.addLayout(text_layout)
# Add the right layout to the main layout
main_layout.addLayout(right_layout)
# Create a scrollable box for displaying system information
info_box = QPlainTextEdit()
info_box.setReadOnly(True)
info_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add the info box to the main layout
main_layout.addWidget(info_box)
self.setLayout(main_layout)
# Get and display system information
|
# Add these lines before the class definitions where the warnings occur
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Configure the logging module
logging.basicConfig(filename='src/maigret.log', level=logging.INFO, format='%(asctime)s [%(levelname)s]: %(message)s')
logger = logging.getLogger(__name__)
os.system("clear")
# Initialize UserAgent object
user_agent = UserAgent()
# Define headers with a fake user agent
headers = {
'User-Agent': user_agent.random,
'Accept-Language': 'en-US,en;q=0.5',
# Add any other headers you may need
}
# Set up the 'header' variable
header = headers
# Disable urllib3 warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Load social platform patterns from a JSON file
with open("src/social_platforms.json", "r") as json_file:
social_platforms = json.load(json_file)
found_social_profiles = set()
found_forum_pages = set()
class GoogleSearchError(Exception):
pass
class MaigretSearchThread(QThread):
maigret_finished = pyqtSignal(str)
log_message = pyqtSignal(str)
def __init__(self, username):
super().__init__()
self.username = username
self.start_time = None
def run(self):
self.start_time = datetime.now()
# Log the start of the Maigret process
self.log_message.emit(f"Maigret process started for username: {self.username}")
try:
# Run the Maigret command with the inputted username
command = f"python3 src/maigret/maigret.py {self.username} -a"
result = os.popen(command).read()
# Log the end of the Maigret process
self.log_message.emit(f"Maigret process ended for username: {self.username}")
# Log the duration of the Maigret process
end_time = datetime.now()
duration = end_time - self.start_time
self.log_message.emit(f"Maigret process took {duration}")
self.maigret_finished.emit(result)
except Exception as e:
error_message = f"Error in MaigretSearchThread: {str(e)}"
self.log_message.emit(error_message)
self.maigret_finished.emit(error_message)
class MaigretSearchGUI(QWidget):
def __init__(self):
super().__init__()
self.username_input = QLineEdit()
self.maigret_result_text = QTextEdit()
self.log_text = QTextEdit()
self.maigret_thread = None # Initialize maigret_thread as None
self.maigret_timer = QTimer()
self.maigret_timer.timeout.connect(self.update_maigret_status)
# Set the interval to 15 seconds (15000 milliseconds)
self.maigret_timer.start(15000)
self.init_ui()
def init_ui(self):
layout = QVBoxLayout()
tab_widget = QTabWidget()
# Create tabs
maigret_tab = QWidget()
log_tab = QWidget()
tab_widget.addTab(maigret_tab, "Maigret Results")
tab_widget.addTab(log_tab, "Logs")
# Layouts for each tab
maigret_layout = QVBoxLayout(maigret_tab)
log_layout = QVBoxLayout(log_tab)
# Maigret tab content
label_username = QLabel("Enter target username:")
maigret_layout.addWidget(label_username)
maigret_layout.addWidget(self.username_input)
search_button = QPushButton("- ᴄʟɪᴄᴋ ʜᴇʀᴇ ᴛᴏ ꜱᴛᴀʀᴛ -")
search_button.clicked.connect(self.run_maigret_search)
maigret_layout.addWidget(search_button)
maigret_layout.addWidget(self.maigret_result_text)
# Log tab content
log_layout.addWidget(self.log_text)
# Set the background color and border style for the input boxes and result box
for widget in [self.username_input, self.maigret_result_text, self.log_text]:
widget.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
layout.addWidget(tab_widget)
self.setLayout(layout)
def run_maigret_search(self):
username = self.username_input.text()
if not username:
QMessageBox.warning(self, "Warning", "Please enter a username.")
return
# Create an instance of the Maigret search thread and pass the username
self.maigret_thread = MaigretSearchThread(username)
self.maigret_thread.maigret_finished.connect(self.display_maigret_results)
self.maigret_thread.log_message.connect(self.display_log)
# Start the Maigret search thread
self.maigret_thread.start()
# Start the timer to update the Maigret status in the log every 15 seconds
self.maigret_timer.start()
self.display_maigret_results("""Searching with Maigret...
~~~~~~~~~~~~~~~~~~~~~~~~~
This can take a while depending on your network speed
the estimated wait time is around 5 to 7 minutes.""")
print("""
{Scavenger-Osint-GUI] User Interaction: (Maigret Usersearch) Started...
- Esimated wait time is about 5 to 7 minutes!""")
def update_maigret_status(self):
if self.maigret_thread and self.maigret_thread.isRunning():
# Calculate the duration and notify the user
current_time = datetime.now()
duration = current_time - self.maigret_thread.start_time
self.display_log(f"Maigret is still running. Please wait. Duration: {duration}")
else:
# If the thread is not running, stop the timer
self.maigret_timer.stop()
def display_maigret_results(self, result):
# Display the result in the Maigret results tab
self.maigret_result_text.setPlainText(result)
def display_log(self, log_message):
# Display log messages in the "Logs" tab
self.log_text.append(log_message)
def closeEvent(self, event):
# Save the Maigret results when the window is closed
maigret_results = self.maigret_result_text.toPlainText()
with open("reports/maigret_results.txt", "w") as f:
f.write(maigret_results)
event.accept()
def showEvent(self, event):
# Load the saved Maigret results when the window is shown
try:
with open("reports/maigret_results.txt", "r") as f:
maigret_results = f.read()
self.maigret_result_text.setPlainText(maigret_results)
except FileNotFoundError:
pass
event.accept()
os.system("rm -rf reports")
class UserSearchThread(QThread):
# Add error signal
search_result = pyqtSignal(str)
error = pyqtSignal(str)
log = pyqtSignal(str)
def __init__(self, username, url_list):
super().__init__()
self.username = username
self.url_list = url_list
def run(self):
for url in self.url_list:
url = urllib.parse.urljoin(url, self.username)
try:
s = requests.Session()
s.headers.update(headers)
response = s.get(url, allow_redirects=False, timeout=5)
if response.status_code == 200 and self.username.lower() in response.text.lower():
result = f"• {self.username} | [✓] URL: {url} {response.status_code}"
# Emit the search result through the signal
self.search_result.emit(result)
except (ConnectionError, TooManyRedirects, RequestException, SSLError, TimeoutError) as e:
# Emit the error through the signal
self.error.emit(f"Error during search for user in {url}: {str(e)}")
except Exception as e:
# Emit the error through the signal
self.error.emit(f"Unexpected error during search for user in {url}: {str(e)}")
finally:
# Emit log message
self.log.emit(f"Search for user in {url} completed.")
class UserSearchGUI(QWidget):
def __init__(self):
super().__init__()
self.username_input = QLineEdit()
self.result_text = QTextEdit()
self.error_text = QTextEdit()
self.log_text = QTextEdit()
self.search_thread = None # Initialize search_thread as None
self.init_ui()
def init_ui(self):
layout = QVBoxLayout()
label_username = QLabel("Enter target username:")
layout.addWidget(label_username)
layout.addWidget(self.username_input)
search_button = QPushButton("- ᴄʟɪᴄᴋ ʜᴇʀᴇ ᴛᴏ ꜱᴛᴀʀᴛ -")
search_button.clicked.connect(self.run_user_search)
layout.addWidget(search_button)
# Create a tab widget
tabs = QTabWidget()
# Create tabs for results, errors, and logging
results_tab = QWidget()
errors_tab = QWidget()
log_tab = QWidget()
# Add the tabs to the tab widget
tabs.addTab(results_tab, "Results")
tabs.addTab(errors_tab, "Errors")
tabs.addTab(log_tab, "Logging")
# Set layouts for tabs
results_layout = QVBoxLayout(results_tab)
errors_layout = QVBoxLayout(errors_tab)
log_layout = QVBoxLayout(log_tab)
# Add widgets to the layouts
results_layout.addWidget(self.result_text)
errors_layout.addWidget(self.error_text)
log_layout.addWidget(self.log_text)
# Set the background color for the text boxes in all tabs
for text_edit in [self.result_text, self.error_text, self.log_text]:
text_edit.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add layouts to the corresponding tabs
results_tab.setLayout(results_layout)
errors_tab.setLayout(errors_layout)
log_tab.setLayout(log_layout)
# Add the tab widget to the main layout
layout.addWidget(tabs)
self.setLayout(layout)
for widget in [self.username_input, self.result_text, self.error_text, self.log_text]:
widget.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
def run_user_search(self):
target_username = self.username_input.text()
if not target_username:
QMessageBox.warning(self, "Warning", "Please enter a target username.")
return
# Create an instance of the username search thread and pass the target_username and url_list
url_list = self.load_urls_from_file()
self.search_thread = UserSearchThread(target_username, url_list)
self.search_thread.search_result.connect(self.display_username_search_result)
self.search_thread.error.connect(self.display_error)
self.search_thread.log.connect(self.display_log)
# Start the search thread
self.search_thread.start()
self.display_username_search_result("Searching for user in URLs...")
def display_username_search_result(self, result):
self.result_text.append(result)
def display_error(self, error):
self.error_text.append(error)
def display_log(self, log):
self.log_text.append(log)
def load_urls_from_file(self):
try:
with open("src/urls.txt", "r") as f:
return [x.strip() for x in f.readlines()]
except FileNotFoundError:
QMessageBox.warning(self, "Warning", "URLs file (src/urls.txt) not found.")
return []
class HomeWindow(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
main_layout = QVBoxLayout()
# Create a QHBoxLayout for the widgets on the right side (image, name, and bio)
right_layout = QHBoxLayout()
# Create a QLabel for displaying the image
image_label = QLabel(self)
pixmap = QPixmap('img/discord.jpg') # Replace 'img/profile_image.jpg' with the actual path to your image
pixmap = pixmap.scaledToWidth(100) # Set the desired width
image_label.setPixmap(pixmap)
image_label.setAlignment(Qt.AlignCenter) # Center the image
# Create a QVBoxLayout for the right side (name and bio)
text_layout = QVBoxLayout()
# Create a QLabel for displaying the name
name_label = QLabel('Scavenger Osint GUI')
name_label.setAlignment(Qt.AlignCenter) # Center the text
# Create a QTextEdit for displaying the bio
bio_box = QTextEdit()
bio_box.setReadOnly(True)
bio_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Read content from bio.txt file and set it to the bio box
try:
with open('src/bio.txt', 'r') as file:
bio_text = file.read()
bio_box.setPlainText(bio_text)
except FileNotFoundError:
bio_box.setPlainText("Bio file not found.")
# Add name and bio widgets to the text layout
text_layout.addWidget(name_label)
text_layout.addWidget(bio_box)
# Add image and text layout to the right layout
right_layout.addWidget(image_label)
right_layout.addLayout(text_layout)
# Add the right layout to the main layout
main_layout.addLayout(right_layout)
# Create a scrollable box for displaying system information
info_box = QPlainTextEdit()
info_box.setReadOnly(True)
info_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add the info box to the main layout
main_layout.addWidget(info_box)
self.setLayout(main_layout)
# Get and display system information | get_system_info(info_box) | 0 | 2023-11-02 06:46:11+00:00 | 4k |
xenxxxx/BitPay-Crypto-Signal-Trading-Bot | tests/test_configuration.py | [
{
"identifier": "CURRENT_TEST_STRATEGY",
"path": "tests/conftest.py",
"snippet": "CURRENT_TEST_STRATEGY = 'StrategyTestV3'"
},
{
"identifier": "log_has",
"path": "tests/conftest.py",
"snippet": "def log_has(line, logs):\n \"\"\"Check if line is found on some caplog's message.\"\"\"\n return any(line == message for message in logs.messages)"
},
{
"identifier": "log_has_re",
"path": "tests/conftest.py",
"snippet": "def log_has_re(line, logs):\n \"\"\"Check if line matches some caplog's message.\"\"\"\n return any(re.match(line, message) for message in logs.messages)"
},
{
"identifier": "patched_configuration_load_config_file",
"path": "tests/conftest.py",
"snippet": "def patched_configuration_load_config_file(mocker, config) -> None:\n mocker.patch(\n 'freqtrade.configuration.load_config.load_config_file',\n lambda *args, **kwargs: config\n )"
}
] | import json
import warnings
import pytest
from copy import deepcopy
from pathlib import Path
from unittest.mock import MagicMock
from jsonschema import ValidationError
from freqtrade.commands import Arguments
from freqtrade.configuration import Configuration, validate_config_consistency
from freqtrade.configuration.config_validation import validate_config_schema
from freqtrade.configuration.deprecated_settings import (check_conflicting_settings,
process_deprecated_setting,
process_removed_setting,
process_temporary_deprecated_settings)
from freqtrade.configuration.environment_vars import flat_vars_to_nested_dict
from freqtrade.configuration.load_config import (load_config_file, load_file, load_from_files,
log_config_error_range)
from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from tests.conftest import (CURRENT_TEST_STRATEGY, log_has, log_has_re,
patched_configuration_load_config_file) | 2,864 |
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
with pytest.raises(OperationalException,
match='Market exit orders require exit_pricing.price_side = "other".'):
validate_config_consistency(conf)
# Validate inversed case
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
conf['order_types']['entry'] = 'market'
conf['exit_pricing']['price_side'] = 'bid'
conf['entry_pricing']['price_side'] = 'ask'
validate_config_consistency(conf)
def test_validate_tsl(default_conf):
default_conf['stoploss'] = 0.0
with pytest.raises(OperationalException, match='The config stoploss needs to be different '
'from 0 to avoid problems with sell orders.'):
validate_config_consistency(default_conf)
default_conf['stoploss'] = -0.10
default_conf['trailing_stop'] = True
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0
default_conf['trailing_only_offset_is_reached'] = True
with pytest.raises(OperationalException,
match=r'The config trailing_only_offset_is_reached needs '
'trailing_stop_positive_offset to be more than 0 in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive_offset'] = 0.01
default_conf['trailing_stop_positive'] = 0.015
with pytest.raises(OperationalException,
match=r'The config trailing_stop_positive_offset needs '
'to be greater than trailing_stop_positive in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive'] = 0.01
default_conf['trailing_stop_positive_offset'] = 0.015
validate_config_consistency(default_conf)
# 0 trailing stop positive - results in "Order would trigger immediately"
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0.02
default_conf['trailing_only_offset_is_reached'] = False
with pytest.raises(OperationalException,
match='The config trailing_stop_positive needs to be different from 0 '
'to avoid problems with sell orders'):
validate_config_consistency(default_conf)
def test_validate_edge2(edge_conf):
edge_conf.update({
"use_exit_signal": True,
})
# Passes test
validate_config_consistency(edge_conf)
edge_conf.update({
"use_exit_signal": False,
})
with pytest.raises(OperationalException, match="Edge requires `use_exit_signal` to be True, "
"otherwise no sells will happen."):
validate_config_consistency(edge_conf)
def test_validate_whitelist(default_conf):
default_conf['runmode'] = RunMode.DRY_RUN
# Test regular case - has whitelist and uses StaticPairlist
validate_config_consistency(default_conf)
conf = deepcopy(default_conf)
del conf['exchange']['pair_whitelist']
# Test error case
with pytest.raises(OperationalException,
match="StaticPairList requires pair_whitelist to be set."):
validate_config_consistency(conf)
conf = deepcopy(default_conf)
conf.update({"pairlists": [{
"method": "VolumePairList",
}]})
# Dynamic whitelist should not care about pair_whitelist
validate_config_consistency(conf)
del conf['exchange']['pair_whitelist']
validate_config_consistency(conf)
@pytest.mark.parametrize('protconf,expected', [
([], None),
([{"method": "StoplossGuard", "lookback_period": 2000, "stop_duration_candles": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "lookback_period": 2000,
"stop_duration": 10}], r'Protections must specify either `lookback_period`.*'),
([{"method": "StoplossGuard", "lookback_period": 20, "stop_duration": 10,
"stop_duration_candles": 10}], r'Protections must specify either `stop_duration`.*'),
])
def test_validate_protections(default_conf, protconf, expected):
conf = deepcopy(default_conf)
conf['protections'] = protconf
if expected:
with pytest.raises(OperationalException, match=expected):
validate_config_consistency(conf)
else:
validate_config_consistency(conf)
def test_validate_ask_orderbook(default_conf, caplog) -> None:
conf = deepcopy(default_conf)
conf['exit_pricing']['use_order_book'] = True
conf['exit_pricing']['order_book_min'] = 2
conf['exit_pricing']['order_book_max'] = 2
validate_config_consistency(conf)
| # pragma pylint: disable=missing-docstring, protected-access, invalid-name
@pytest.fixture(scope="function")
def all_conf():
config_file = Path(__file__).parents[1] / "config_examples/config_full.example.json"
conf = load_config_file(str(config_file))
return conf
def test_load_config_missing_attributes(default_conf) -> None:
conf = deepcopy(default_conf)
conf.pop('exchange')
with pytest.raises(ValidationError, match=r".*'exchange' is a required property.*"):
validate_config_schema(conf)
conf = deepcopy(default_conf)
conf.pop('stake_currency')
conf['runmode'] = RunMode.DRY_RUN
with pytest.raises(ValidationError, match=r".*'stake_currency' is a required property.*"):
validate_config_schema(conf)
def test_load_config_incorrect_stake_amount(default_conf) -> None:
default_conf['stake_amount'] = 'fake'
with pytest.raises(ValidationError, match=r".*'fake' does not match 'unlimited'.*"):
validate_config_schema(default_conf)
def test_load_config_file(default_conf, mocker, caplog) -> None:
del default_conf['user_data_dir']
default_conf['datadir'] = str(default_conf['datadir'])
file_mock = mocker.patch('freqtrade.configuration.load_config.Path.open', mocker.mock_open(
read_data=json.dumps(default_conf)
))
validated_conf = load_config_file('somefile')
assert file_mock.call_count == 1
assert validated_conf.items() >= default_conf.items()
def test_load_config_file_error(default_conf, mocker, caplog) -> None:
del default_conf['user_data_dir']
default_conf['datadir'] = str(default_conf['datadir'])
filedata = json.dumps(default_conf).replace(
'"stake_amount": 0.001,', '"stake_amount": .001,')
mocker.patch('freqtrade.configuration.load_config.Path.open',
mocker.mock_open(read_data=filedata))
mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
with pytest.raises(OperationalException, match=r".*Please verify the following segment.*"):
load_config_file('somefile')
def test_load_config_file_error_range(default_conf, mocker, caplog) -> None:
del default_conf['user_data_dir']
default_conf['datadir'] = str(default_conf['datadir'])
filedata = json.dumps(default_conf).replace(
'"stake_amount": 0.001,', '"stake_amount": .001,')
mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
x = log_config_error_range('somefile', 'Parse error at offset 64: Invalid value.')
assert isinstance(x, str)
assert (x == '{"max_open_trades": 1, "stake_currency": "BTC", '
'"stake_amount": .001, "fiat_display_currency": "USD", '
'"timeframe": "5m", "dry_run": true, "cance')
filedata = json.dumps(default_conf, indent=2).replace(
'"stake_amount": 0.001,', '"stake_amount": .001,')
mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
x = log_config_error_range('somefile', 'Parse error at offset 4: Invalid value.')
assert isinstance(x, str)
assert (x == ' "max_open_trades": 1,\n "stake_currency": "BTC",\n'
' "stake_amount": .001,')
x = log_config_error_range('-', '')
assert x == ''
def test_load_file_error(tmpdir):
testpath = Path(tmpdir) / 'config.json'
with pytest.raises(OperationalException, match=r"File .* not found!"):
load_file(testpath)
def test__args_to_config(caplog):
arg_list = ['trade', '--strategy-path', 'TestTest']
args = Arguments(arg_list).get_parsed_arg()
configuration = Configuration(args)
config = {}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# No warnings ...
configuration._args_to_config(config, argname="strategy_path", logstring="DeadBeef")
assert len(w) == 0
assert log_has("DeadBeef", caplog)
assert config['strategy_path'] == "TestTest"
configuration = Configuration(args)
config = {}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Deprecation warnings!
configuration._args_to_config(config, argname="strategy_path", logstring="DeadBeef",
deprecated_msg="Going away soon!")
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "DEPRECATED: Going away soon!" in str(w[-1].message)
assert log_has("DeadBeef", caplog)
assert config['strategy_path'] == "TestTest"
def test_load_config_max_open_trades_zero(default_conf, mocker, caplog) -> None:
default_conf['max_open_trades'] = 0
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf['max_open_trades'] == 0
assert 'internals' in validated_conf
def test_load_config_combine_dicts(default_conf, mocker, caplog) -> None:
conf1 = deepcopy(default_conf)
conf2 = deepcopy(default_conf)
del conf1['exchange']['key']
del conf1['exchange']['secret']
del conf2['exchange']['name']
conf2['exchange']['pair_whitelist'] += ['NANO/BTC']
config_files = [conf1, conf2]
configsmock = MagicMock(side_effect=config_files)
mocker.patch(
'freqtrade.configuration.load_config.load_config_file',
configsmock
)
arg_list = ['trade', '-c', 'test_conf.json', '--config', 'test2_conf.json', ]
args = Arguments(arg_list).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
exchange_conf = default_conf['exchange']
assert validated_conf['exchange']['name'] == exchange_conf['name']
assert validated_conf['exchange']['key'] == exchange_conf['key']
assert validated_conf['exchange']['secret'] == exchange_conf['secret']
assert validated_conf['exchange']['pair_whitelist'] != conf1['exchange']['pair_whitelist']
assert validated_conf['exchange']['pair_whitelist'] == conf2['exchange']['pair_whitelist']
assert 'internals' in validated_conf
def test_from_config(default_conf, mocker, caplog) -> None:
conf1 = deepcopy(default_conf)
conf2 = deepcopy(default_conf)
del conf1['exchange']['key']
del conf1['exchange']['secret']
del conf2['exchange']['name']
conf2['exchange']['pair_whitelist'] += ['NANO/BTC']
conf2['fiat_display_currency'] = "EUR"
config_files = [conf1, conf2]
mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x)
configsmock = MagicMock(side_effect=config_files)
mocker.patch('freqtrade.configuration.load_config.load_config_file', configsmock)
validated_conf = Configuration.from_files(['test_conf.json', 'test2_conf.json'])
exchange_conf = default_conf['exchange']
assert validated_conf['exchange']['name'] == exchange_conf['name']
assert validated_conf['exchange']['key'] == exchange_conf['key']
assert validated_conf['exchange']['secret'] == exchange_conf['secret']
assert validated_conf['exchange']['pair_whitelist'] != conf1['exchange']['pair_whitelist']
assert validated_conf['exchange']['pair_whitelist'] == conf2['exchange']['pair_whitelist']
assert validated_conf['fiat_display_currency'] == "EUR"
assert 'internals' in validated_conf
assert isinstance(validated_conf['user_data_dir'], Path)
def test_from_recursive_files(testdatadir) -> None:
files = testdatadir / "testconfigs/testconfig.json"
conf = Configuration.from_files([files])
assert conf
# Exchange comes from "the first config"
assert conf['exchange']
# Pricing comes from the 2nd config
assert conf['entry_pricing']
assert conf['entry_pricing']['price_side'] == "same"
assert conf['exit_pricing']
# The other key comes from pricing2, which is imported by pricing.json.
# pricing.json is a level higher, therefore wins.
assert conf['exit_pricing']['price_side'] == "same"
assert len(conf['config_files']) == 4
assert 'testconfig.json' in conf['config_files'][0]
assert 'test_pricing_conf.json' in conf['config_files'][1]
assert 'test_base_config.json' in conf['config_files'][2]
assert 'test_pricing2_conf.json' in conf['config_files'][3]
files = testdatadir / "testconfigs/recursive.json"
with pytest.raises(OperationalException, match="Config loop detected."):
load_from_files([files])
def test_print_config(default_conf, mocker, caplog) -> None:
conf1 = deepcopy(default_conf)
# Delete non-json elements from default_conf
del conf1['user_data_dir']
conf1['datadir'] = str(conf1['datadir'])
config_files = [conf1]
configsmock = MagicMock(side_effect=config_files)
mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x)
mocker.patch('freqtrade.configuration.configuration.load_from_files', configsmock)
validated_conf = Configuration.from_files(['test_conf.json'])
assert isinstance(validated_conf['user_data_dir'], Path)
assert "user_data_dir" in validated_conf
assert "original_config" in validated_conf
assert isinstance(json.dumps(validated_conf['original_config']), str)
def test_load_config_max_open_trades_minus_one(default_conf, mocker, caplog) -> None:
default_conf['max_open_trades'] = -1
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf['max_open_trades'] > 999999999
assert validated_conf['max_open_trades'] == float('inf')
assert "runmode" in validated_conf
assert validated_conf['runmode'] == RunMode.DRY_RUN
def test_load_config_file_exception(mocker) -> None:
mocker.patch(
'freqtrade.configuration.configuration.Path.open',
MagicMock(side_effect=FileNotFoundError('File not found'))
)
with pytest.raises(OperationalException, match=r'.*Config file "somefile" not found!*'):
load_config_file('somefile')
def test_load_config(default_conf, mocker) -> None:
del default_conf['strategy_path']
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('strategy_path') is None
assert 'edge' not in validated_conf
def test_load_config_with_params(default_conf, mocker) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path',
'--db-url', 'sqlite:///someurl',
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('strategy') == 'TestStrategy'
assert validated_conf.get('strategy_path') == '/some/path'
assert validated_conf.get('db_url') == 'sqlite:///someurl'
# Test conf provided db_url prod
conf = default_conf.copy()
conf["dry_run"] = False
conf["db_url"] = "sqlite:///path/to/db.sqlite"
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == "sqlite:///path/to/db.sqlite"
# Test conf provided db_url dry_run
conf = default_conf.copy()
conf["dry_run"] = True
conf["db_url"] = "sqlite:///path/to/db.sqlite"
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == "sqlite:///path/to/db.sqlite"
# Test args provided db_url prod
conf = default_conf.copy()
conf["dry_run"] = False
del conf["db_url"]
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == DEFAULT_DB_PROD_URL
assert "runmode" in validated_conf
assert validated_conf['runmode'] == RunMode.LIVE
# Test args provided db_url dry_run
conf = default_conf.copy()
conf["dry_run"] = True
conf["db_url"] = DEFAULT_DB_PROD_URL
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == DEFAULT_DB_DRYRUN_URL
@pytest.mark.parametrize("config_value,expected,arglist", [
(True, True, ['trade', '--dry-run']), # Leave config untouched
(False, True, ['trade', '--dry-run']), # Override config untouched
(False, False, ['trade']), # Leave config untouched
(True, True, ['trade']), # Leave config untouched
])
def test_load_dry_run(default_conf, mocker, config_value, expected, arglist) -> None:
default_conf['dry_run'] = config_value
patched_configuration_load_config_file(mocker, default_conf)
configuration = Configuration(Arguments(arglist).get_parsed_arg())
validated_conf = configuration.load_config()
assert validated_conf['dry_run'] is expected
assert validated_conf['runmode'] == (RunMode.DRY_RUN if expected else RunMode.LIVE)
def test_load_custom_strategy(default_conf, mocker) -> None:
default_conf.update({
'strategy': 'CustomStrategy',
'strategy_path': '/tmp/strategies',
})
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('strategy') == 'CustomStrategy'
assert validated_conf.get('strategy_path') == '/tmp/strategies'
def test_show_info(default_conf, mocker, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--db-url', 'sqlite:///tmp/testdb',
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
configuration.get_config()
assert log_has('Using DB: "sqlite:///tmp/testdb"', caplog)
assert log_has('Dry run is enabled', caplog)
def test_setup_configuration_without_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'backtesting',
'--config', 'config.json',
'--strategy', CURRENT_TEST_STRATEGY,
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
config = configuration.get_config()
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert 'user_data_dir' in config
assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
assert 'timeframe' in config
assert not log_has('Parameter -i/--timeframe detected ...', caplog)
assert 'position_stacking' not in config
assert not log_has('Parameter --enable-position-stacking detected ...', caplog)
assert 'timerange' not in config
def test_setup_configuration_with_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
mocker.patch(
'freqtrade.configuration.configuration.create_datadir',
lambda c, x: x
)
mocker.patch(
'freqtrade.configuration.configuration.create_userdata_dir',
lambda x, *args, **kwargs: Path(x)
)
arglist = [
'backtesting',
'--config', 'config.json',
'--strategy', CURRENT_TEST_STRATEGY,
'--datadir', '/foo/bar',
'--userdir', "/tmp/freqtrade",
'--timeframe', '1m',
'--enable-position-stacking',
'--disable-max-market-positions',
'--timerange', ':100',
'--export', 'trades',
'--stake-amount', 'unlimited'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
config = configuration.get_config()
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert log_has('Using data directory: {} ...'.format("/foo/bar"), caplog)
assert log_has('Using user-data directory: {} ...'.format(Path("/tmp/freqtrade")), caplog)
assert 'user_data_dir' in config
assert 'timeframe' in config
assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
caplog)
assert 'position_stacking' in config
assert log_has('Parameter --enable-position-stacking detected ...', caplog)
assert 'use_max_market_positions' in config
assert log_has('Parameter --disable-max-market-positions detected ...', caplog)
assert log_has('max_open_trades set to unlimited ...', caplog)
assert 'timerange' in config
assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog)
assert 'export' in config
assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
assert 'stake_amount' in config
assert config['stake_amount'] == 'unlimited'
def test_setup_configuration_with_stratlist(mocker, default_conf, caplog) -> None:
"""
Test setup_configuration() function
"""
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'backtesting',
'--config', 'config.json',
'--timeframe', '1m',
'--export', 'trades',
'--strategy-list',
CURRENT_TEST_STRATEGY,
'TestStrategy'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args, RunMode.BACKTEST)
config = configuration.get_config()
assert config['runmode'] == RunMode.BACKTEST
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
assert 'timeframe' in config
assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
caplog)
assert 'strategy_list' in config
assert log_has('Using strategy list of 2 strategies', caplog)
assert 'position_stacking' not in config
assert 'use_max_market_positions' not in config
assert 'timerange' not in config
assert 'export' in config
assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
def test_hyperopt_with_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'hyperopt',
'--epochs', '10',
'--spaces', 'all',
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args, RunMode.HYPEROPT)
config = configuration.get_config()
assert 'epochs' in config
assert int(config['epochs']) == 10
assert log_has('Parameter --epochs detected ... Will run Hyperopt with for 10 epochs ...',
caplog)
assert 'spaces' in config
assert config['spaces'] == ['all']
assert log_has("Parameter -s/--spaces detected: ['all']", caplog)
assert "runmode" in config
assert config['runmode'] == RunMode.HYPEROPT
def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
# Prevent setting loggers
mocker.patch('freqtrade.loggers.set_loggers', MagicMock)
arglist = ['trade', '-vvv']
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('verbosity') == 3
assert log_has('Verbosity set to 3', caplog)
def test_set_logfile(default_conf, mocker, tmpdir):
patched_configuration_load_config_file(mocker, default_conf)
f = Path(tmpdir / "test_file.log")
assert not f.is_file()
arglist = [
'trade', '--logfile', str(f),
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf['logfile'] == str(f)
assert f.is_file()
try:
f.unlink()
except Exception:
pass
def test_load_config_warn_forcebuy(default_conf, mocker, caplog) -> None:
default_conf['force_entry_enable'] = True
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('force_entry_enable')
assert log_has('`force_entry_enable` RPC message enabled.', caplog)
def test_validate_default_conf(default_conf) -> None:
# Validate via our validator - we allow setting defaults!
validate_config_schema(default_conf)
def test_validate_max_open_trades(default_conf):
default_conf['max_open_trades'] = float('inf')
default_conf['stake_amount'] = 'unlimited'
with pytest.raises(OperationalException, match='`max_open_trades` and `stake_amount` '
'cannot both be unlimited.'):
validate_config_consistency(default_conf)
def test_validate_price_side(default_conf):
default_conf['order_types'] = {
"entry": "limit",
"exit": "limit",
"stoploss": "limit",
"stoploss_on_exchange": False,
}
# Default should pass
validate_config_consistency(default_conf)
conf = deepcopy(default_conf)
conf['order_types']['entry'] = 'market'
with pytest.raises(OperationalException,
match='Market entry orders require entry_pricing.price_side = "other".'):
validate_config_consistency(conf)
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
with pytest.raises(OperationalException,
match='Market exit orders require exit_pricing.price_side = "other".'):
validate_config_consistency(conf)
# Validate inversed case
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
conf['order_types']['entry'] = 'market'
conf['exit_pricing']['price_side'] = 'bid'
conf['entry_pricing']['price_side'] = 'ask'
validate_config_consistency(conf)
def test_validate_tsl(default_conf):
default_conf['stoploss'] = 0.0
with pytest.raises(OperationalException, match='The config stoploss needs to be different '
'from 0 to avoid problems with sell orders.'):
validate_config_consistency(default_conf)
default_conf['stoploss'] = -0.10
default_conf['trailing_stop'] = True
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0
default_conf['trailing_only_offset_is_reached'] = True
with pytest.raises(OperationalException,
match=r'The config trailing_only_offset_is_reached needs '
'trailing_stop_positive_offset to be more than 0 in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive_offset'] = 0.01
default_conf['trailing_stop_positive'] = 0.015
with pytest.raises(OperationalException,
match=r'The config trailing_stop_positive_offset needs '
'to be greater than trailing_stop_positive in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive'] = 0.01
default_conf['trailing_stop_positive_offset'] = 0.015
validate_config_consistency(default_conf)
# 0 trailing stop positive - results in "Order would trigger immediately"
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0.02
default_conf['trailing_only_offset_is_reached'] = False
with pytest.raises(OperationalException,
match='The config trailing_stop_positive needs to be different from 0 '
'to avoid problems with sell orders'):
validate_config_consistency(default_conf)
def test_validate_edge2(edge_conf):
edge_conf.update({
"use_exit_signal": True,
})
# Passes test
validate_config_consistency(edge_conf)
edge_conf.update({
"use_exit_signal": False,
})
with pytest.raises(OperationalException, match="Edge requires `use_exit_signal` to be True, "
"otherwise no sells will happen."):
validate_config_consistency(edge_conf)
def test_validate_whitelist(default_conf):
default_conf['runmode'] = RunMode.DRY_RUN
# Test regular case - has whitelist and uses StaticPairlist
validate_config_consistency(default_conf)
conf = deepcopy(default_conf)
del conf['exchange']['pair_whitelist']
# Test error case
with pytest.raises(OperationalException,
match="StaticPairList requires pair_whitelist to be set."):
validate_config_consistency(conf)
conf = deepcopy(default_conf)
conf.update({"pairlists": [{
"method": "VolumePairList",
}]})
# Dynamic whitelist should not care about pair_whitelist
validate_config_consistency(conf)
del conf['exchange']['pair_whitelist']
validate_config_consistency(conf)
@pytest.mark.parametrize('protconf,expected', [
([], None),
([{"method": "StoplossGuard", "lookback_period": 2000, "stop_duration_candles": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "lookback_period": 2000,
"stop_duration": 10}], r'Protections must specify either `lookback_period`.*'),
([{"method": "StoplossGuard", "lookback_period": 20, "stop_duration": 10,
"stop_duration_candles": 10}], r'Protections must specify either `stop_duration`.*'),
])
def test_validate_protections(default_conf, protconf, expected):
conf = deepcopy(default_conf)
conf['protections'] = protconf
if expected:
with pytest.raises(OperationalException, match=expected):
validate_config_consistency(conf)
else:
validate_config_consistency(conf)
def test_validate_ask_orderbook(default_conf, caplog) -> None:
conf = deepcopy(default_conf)
conf['exit_pricing']['use_order_book'] = True
conf['exit_pricing']['order_book_min'] = 2
conf['exit_pricing']['order_book_max'] = 2
validate_config_consistency(conf) | assert log_has_re(r"DEPRECATED: Please use `order_book_top` instead of.*", caplog) | 2 | 2023-11-07 18:46:03+00:00 | 4k |
thedataninja1786/shallowgrad | examples/mnist.py | [
{
"identifier": "nn",
"path": "shallowgrad/nn.py",
"snippet": "class nn:\n class loss:\n instances = [] \n @staticmethod\n def backward_prop(g_loss): # g of loss \n delta = np.copy(g_loss)\n for i in reversed(range(len(nn.loss.instances))):\n delta = nn.loss.instances[i].backward(delta)\n\n # Activation functions\n class Softmax:\n @staticmethod\n def __call__(x):\n x = x - np.max(x, axis=1, keepdims=True)\n exp_x = np.exp(x)\n return exp_x / np.sum(exp_x, axis=1, keepdims=True)\n @staticmethod\n def __repr__():\n return \"Softmax applied in place\"\n @staticmethod\n def backward(x):\n return x * (1 - x)\n\n class ReLU:\n @staticmethod\n def __call__(x):\n return np.maximum(0,x)\n @staticmethod\n def __repr__():\n return \"ReLU applied in place\"\n @staticmethod\n def backward(x): \n return (x > 0).astype(int)\n\n class LeakyReLU:\n @staticmethod\n def __call__(x,a=0.5):\n return np.maximum(a * x, x)\n @staticmethod\n def __repr__():\n return \"LeakyReLU applied in place\"\n @staticmethod\n def backward(x,a=0.5):\n return (x > 0).astype(int) + (a * (x <= 0)).astype(int)\n \n class Tanh:\n @staticmethod\n def __call__(x):\n return np.tanh(x)\n @staticmethod\n def __repr__():\n return \"Tanh applied in place\"\n @staticmethod\n def backward(x):\n return 1 - np.tanh(x) ** 2\n \n class Sigmoid:\n @staticmethod\n def __call__(x):\n if x >= 0:\n return 1 / (1 + np.exp(-x))\n else:\n return np.exp(x) / (1 + np.exp(x)) \n @staticmethod\n def __repr__():\n return \"Sigmoid applied in place\"\n @staticmethod\n def backward(x):\n return (1 / (1 + np.exp(-x))) * (1 - (1 / (1 + np.exp(-x))))\n\n # Loss functions\n class MeanSquaredLoss(loss):\n def __init__(self):\n self.y_pred = None\n self.y_true = None\n super().__init__()\n\n def __call__(self,y_pred,y_true):\n self.y_pred = y_pred; self.y_true = y_true\n return ((self.y_true - self.y_pred) ** 2).mean()\n\n def backwards(self):\n g = 2 * (self.y_pred - self.y_true) / len(self.y_true) \n self.backward_prop(g)\n\n class CrossEntropyLoss(loss): \n def __init__(self):\n self.y_pred = None\n self.y_true = None\n self.e = 1e-10\n super().__init__()\n\n def __call__(self, y_pred, y_true):\n self.y_true = y_true\n \n if len(y_true.shape) == 1 or y_true.shape[1] == 1: # convert to one-hot\n num_classes = np.max(y_true) + 1\n one_hot = np.eye(num_classes)[y_true.flatten()]\n one_hot = one_hot.reshape(-1, num_classes)\n self.y_true = one_hot\n \n self.y_pred = nn.Softmax()(y_pred)\n self.y_pred = np.clip(self.y_pred, self.e, 1 - self.e)\n return np.mean(-np.sum(self.y_true * np.log(self.y_pred + self.e), axis=1))\n\n def backwards(self):\n g = (self.y_pred - self.y_true) / self.y_true.shape[0]\n self.backward_prop(g)\n\n class BinaryCrossEntropyLoss(loss):\n def __init__(self):\n self.y_pred = None\n self.y_true = None\n super().__init__()\n self.e = 1e-15 \n\n def __call__(self,y_pred,y_true):\n self.y_pred = y_pred; self.y_true = y_true\n return -np.mean((self.y_true * np.log(self.y_pred + self.e)) \\\n + ((1 - self.y_true) * np.log(1 - self.y_pred + self.e)))\n\n def backwards(self):\n g = (self.y_pred - self.y_true) / ((self.y_pred + self.e) * (1 - self.y_pred + self.e))\n self.backward_prop(g)\n\n\n class Linear:\n def __init__(self,in_features,out_features,bias=True,activation=None):\n self.in_features = in_features\n self.out_features = out_features\n self.use_bias = bias\n self.bias = None\n self.activation_func = activation\n self.weights = self._weight_init()\n self.grad = None\n self.forward_pass = None\n self.x = None\n self.grad_bias = None\n nn.loss.instances.append(self) # record instance for backprop\n super().__init__()\n\n if activation == 'ReLU':\n self.activation_func = nn.ReLU()\n elif activation == 'LeakyReLU':\n self.activation_func = nn.LeakyReLU()\n elif activation == 'Tanh':\n self.activation_func = nn.Tanh()\n elif activation == 'Sigmoid':\n self.activation_func = nn.Sigmoid()\n elif activation == 'Softmax':\n self.activation_func = nn.Softmax()\n else: self.activation_func = None\n\n def __call__(self,x): # forward\n self.x = x\n if self.activation_func is not None:\n self.forward_pass = self.activation_func((self.x.dot(self.weights) + self.bias))\n else:\n self.forward_pass = self.x.dot(self.weights) + self.bias\n return self.forward_pass\n \n def __repr__(self):\n return f\"Linear(in_features={self.in_features}, out_features={self.out_features}, activation={self.activation_func})\"\n\n def _weight_init(self):\n \"\"\"Glorot uniform initialization\"\"\"\n self.bias = np.random.uniform(size=(1,self.out_features)) * 0.05 if self.bias else 0\n v = np.sqrt(2.0 / (self.in_features + self.out_features))\n return np.random.normal(0, v, size=(self.in_features, self.out_features)) \n\n def backward(self, delta):\n if self.activation_func is not None:\n # Calculate gradient of the activation function\n delta = delta * self.activation_func.backward(self.forward_pass)\n # Compute gradients for weights and bias\n self.grad = np.dot(self.x.T, delta)\n if self.use_bias: self.grad_bias = np.sum(delta, axis=0, keepdims=True)\n # Compute gradient with respect to x\n d_x = np.dot(delta, self.weights.T)\n return d_x"
},
{
"identifier": "Adam",
"path": "optimizers/optimizers.py",
"snippet": "class Adam:\n def __init__(self,layers=[],lr=0.001,b1=0.9,b2=0.999,e=1e-10):\n self.layers = layers\n self.lr = lr\n self.b1 = b1 \n self.b2 = b2\n self.e = e\n self.t = 0 \n\n self.m = [np.zeros_like(l.weights) for l in self.layers]\n self.v = [np.zeros_like(l.weights) for l in self.layers]\n self.m_bias = [np.zeros_like(l.bias) for l in self.layers if l.use_bias]\n self.v_bias = [np.zeros_like(l.bias) for l in self.layers if l.use_bias]\n\n def step(self):\n for i,l in enumerate(self.layers):\n self.t += 1\n self.m[i] = self.b1 * self.m[i] + (1 - self.b1) * l.grad\n self.v[i] = self.b2 * self.v[i] + (1 - self.b2) * np.square(l.grad)\n m_hat = self.m[i] / (1 - self.b1 ** self.t)\n v_hat = self.v[i] / (1 - self.b2 ** self.t)\n l.weights -= self.lr * m_hat / (np.sqrt(v_hat) + self.e)\n \n if l.use_bias:\n self.m_bias[i] = self.b1 * self.m_bias[i] + (1 - self.b1) * l.grad_bias\n self.v_bias[i] = self.b2 * self.v_bias[i] + (1 - self.b2) * np.square(l.grad_bias)\n m_hat_bias = self.m_bias[i] / (1 - self.b1 ** self.t)\n v_hat_bias = self.v_bias[i] / (1 - self.b2 ** self.t)\n l.bias -= self.lr * m_hat_bias / (np.sqrt(v_hat_bias) + self.e)"
}
] | from shallowgrad.nn import nn
from optimizers.optimizers import Adam
import gzip
import numpy as np | 2,375 |
def read_file(fp):
with open(fp, "rb") as f:
dat = f.read()
return np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
X_train = read_file(r"datasets\b0cdab8e37ae7c1c5560ee858afaac1d")[0x10:]
Y_train = read_file(r"datasets\d4fdde61aca9f72d5fe2315410bb46a5")[8:]
X_train = X_train.reshape((-1,784))
Y = Y_train.reshape(-1,1)
X = np.array(X_train / 255)
l1 = nn.Linear(784,2500,activation='ReLU',bias=True)
l2 = nn.Linear(2500,1000,activation='ReLU',bias=True)
l3 = nn.Linear(1000,10,bias=True)
loss = nn.CrossEntropyLoss()
|
def read_file(fp):
with open(fp, "rb") as f:
dat = f.read()
return np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
X_train = read_file(r"datasets\b0cdab8e37ae7c1c5560ee858afaac1d")[0x10:]
Y_train = read_file(r"datasets\d4fdde61aca9f72d5fe2315410bb46a5")[8:]
X_train = X_train.reshape((-1,784))
Y = Y_train.reshape(-1,1)
X = np.array(X_train / 255)
l1 = nn.Linear(784,2500,activation='ReLU',bias=True)
l2 = nn.Linear(2500,1000,activation='ReLU',bias=True)
l3 = nn.Linear(1000,10,bias=True)
loss = nn.CrossEntropyLoss() | optim = Adam(layers=[l1,l2,l3],lr=3e-4) | 1 | 2023-11-07 18:13:43+00:00 | 4k |
ssajedi/SAiF-GPT | app.py | [
{
"identifier": "extract_pdf_text",
"path": "utils.py",
"snippet": "def extract_pdf_text(file):\n \"\"\"\n Extracts text paragraphs from a PDF file.\n \"\"\"\n pdf_reader = PyPDF2.PdfReader(file)\n pdf_dict={}\n for ip in range(len(pdf_reader.pages)):\n pdf_dict[ip] = pdf_reader.pages[ip].extract_text()\n dataset = [pdf_dict[ip] for ip in range(len(pdf_reader.pages))]\n return pdf_dict,dataset"
},
{
"identifier": "highlight_phrases_in_paragraph",
"path": "text_effects.py",
"snippet": "def highlight_phrases_in_paragraph(paragraph, phrases_to_colors):\n \"\"\"\n Highlights specific phrases within a paragraph in Streamlit markdown using generated pale colors and rounded edges.\n \n Args:\n - paragraph (str): The paragraph of text where phrases will be highlighted.\n - phrases_to_colors (dict): Dictionary where keys are phrases to be highlighted. Colors will be generated automatically.\n \n Returns:\n - None: Directly renders the HTML in Streamlit using markdown.\n \"\"\"\n # Filter out phrases that don't exist in the paragraph\n phrases_present = {phrase: color for phrase, color in phrases_to_colors.items() if re.search(re.escape(phrase), paragraph, re.IGNORECASE)}\n\n # Sort phrases by length in descending order to handle nested phrases\n phrases_sorted = sorted(phrases_present.keys(), key=len, reverse=True)\n\n # Initialize a hue value\n hue = 0\n hue_increment = 1 / len(phrases_sorted) if phrases_sorted else 0 # Prevent division by zero\n \n # Escape phrases for regex and replace them with highlighted HTML\n for phrase in phrases_sorted:\n color_code = generate_pale_color(hue)\n hue += hue_increment # Increment hue to get a different color\n \n escaped_phrase = re.escape(phrase)\n pattern = r'\\b' + escaped_phrase + r'\\b' # Use word boundaries\n replacement = (\n f'<span style=\"background-color: {color_code}; '\n f'border-radius: 0.5em; padding: 0.3em 0.6em;\">{phrase}🔒</span>'\n )\n paragraph = re.sub(pattern, replacement, paragraph, flags=re.IGNORECASE)\n \n # Render the HTML in Streamlit using the markdown function with unsafe_allow_html set to True\n # st.markdown(paragraph, unsafe_allow_html=True)\n return paragraph"
},
{
"identifier": "Anonymizer",
"path": "ner.py",
"snippet": "class Anonymizer:\n def __init__(self, model_name=\"Babelscape/wikineural-multilingual-ner\"):\n self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n self.model = AutoModelForTokenClassification.from_pretrained(model_name)\n self.nlp = pipeline(\"ner\", model=self.model, tokenizer=self.tokenizer, grouped_entities=True)\n self.entity_counters = {}\n self.anonymization_map = {}\n self.deanonymization_map = {}\n self.phone_regex = re.compile(r\"\"\"\n (\\+?[\\d\\s-]{0,3}) # International prefix like +1 or 001\n (\\(?\\d+\\)?[\\s-]?) # Area code with optional parentheses and optional separator\n (\\d+[\\s-]?){2,} # The phone number itself with at least two groups of digits, separated by optional spaces or hyphens\n (\\s*(ext|x|ext.)\\s*\\d+)? # Optional extension prefixed by 'ext', 'x', or 'ext.'\n \"\"\", re.VERBOSE)\n self.email_regex = re.compile(r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}')\n\n def _split_text(self, text, chunk_size=60):\n words = text.split()\n for i in range(0, len(words), chunk_size):\n yield ' '.join(words[i:i+chunk_size])\n\n def _get_anonymized_label(self, entity_group):\n if entity_group not in self.entity_counters:\n self.entity_counters[entity_group] = 1\n anonymized_label = f\"{entity_group.lower()}{self.entity_counters[entity_group]}\"\n self.entity_counters[entity_group] += 1\n return anonymized_label\n\n def _anonymize_with_regex(self, text, regex, entity_group):\n matches = regex.finditer(text)\n shift = 0\n for match in matches:\n entity_text = match.group()\n if entity_text not in self.anonymization_map:\n anonymized_label = self._get_anonymized_label(entity_group)\n self.anonymization_map[entity_text] = anonymized_label\n self.deanonymization_map[anonymized_label] = entity_text\n\n start, end = match.span()\n start += shift\n end += shift\n text = text[:start] + self.anonymization_map[entity_text] + text[end:]\n shift += len(self.anonymization_map[entity_text]) - (end - start)\n\n return text\n\n def _anonymize_chunk(self, chunk, entity_types):\n # Anonymize phone numbers and emails first\n if 'PHONE' in entity_types:\n chunk = self._anonymize_with_regex(chunk, self.phone_regex, 'PHONE')\n if 'EMAIL' in entity_types:\n chunk = self._anonymize_with_regex(chunk, self.email_regex, 'EMAIL')\n\n # Proceed with NER-based anonymization\n ner_results = self.nlp(chunk)\n shift = 0\n for entity in ner_results:\n if entity['entity_group'] in entity_types:\n entity_text = entity['word']\n if entity_text not in self.anonymization_map:\n anonymized_label = self._get_anonymized_label(entity['entity_group'])\n self.anonymization_map[entity_text] = anonymized_label\n self.deanonymization_map[anonymized_label] = entity_text\n\n start = entity['start'] + shift\n end = entity['end'] + shift\n chunk = chunk[:start] + self.anonymization_map[entity_text] + chunk[end:]\n shift += len(self.anonymization_map[entity_text]) - (end - start)\n\n return chunk\n\n def anonymize(self, text, entity_types, chunk_size=60):\n chunks = list(self._split_text(text, chunk_size=chunk_size))\n anonymized_chunks = []\n\n for chunk in chunks:\n anonymized_chunk = self._anonymize_chunk(chunk, entity_types)\n anonymized_chunks.append(anonymized_chunk)\n\n return ' '.join(anonymized_chunks)\n\n # def deanonymize(self, anonymized_text):\n # for anonymized_entity, original_entity in self.deanonymization_map.items():\n # anonymized_text = anonymized_text.replace(anonymized_entity, original_entity)\n # return anonymized_text\n\n def deanonymize(self, anonymized_text):\n # Sort keys by length in descending order to replace longer keys first\n for anonymized_entity in sorted(self.deanonymization_map.keys(), key=len, reverse=True):\n original_entity = self.deanonymization_map[anonymized_entity]\n # Use regular expressions to match whole words\n anonymized_text = re.sub(r'\\b' + re.escape(anonymized_entity) + r'\\b', original_entity, anonymized_text)\n return anonymized_text\n\n def get_anonymization_map(self):\n return self.anonymization_map"
}
] | import streamlit as st
import openai
import streamlit as st
from utils import extract_pdf_text
from text_effects import highlight_phrases_in_paragraph
from ner import Anonymizer | 2,978 |
st.set_page_config(page_title="🔒 SAiF-GPT", page_icon="🤫",layout="wide")
st.title("SAiF-GPT")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
ent_types_select = st.sidebar.multiselect("Entity list", ["LOC", "PER","ORG",'EMAIL','PHONE'], ["LOC", "PER","ORG"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.anonymizer = None
st.session_state.ref_doc = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None:
_,chunks = extract_pdf_text(uploaded_file)
# st.write(chunks)
with open("hack_secret.txt") as f:
# st.write("Using OpenAI API key:", f.read())
openai.api_key = f.read()
# Building a front end with streamlit
# ref: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.chat_hist = []
for message in st.session_state.chat_hist:
with st.chat_message(message["role"]):
st.markdown(message["content"], unsafe_allow_html=True)
if prompt := st.chat_input("What is up?"):
if len(st.session_state.chat_hist)==0:
# ref_doc = "\n".join(chunks)
# ref_doc = chunks[0]
ref_doc = "\n".join(chunks)
# ref_doc = """ExxonMobil Infrastructure Development Proposal Executive Summary: This comprehensive proposal envisions the construction of ExxonMobil's new operational hub, designed to bolster its strategic expansion and operational excellence within the energy sector. Introduction: We propose to construct a state-of-the-art facility that reflects ExxonMobil's commitment to innovation, sustainability, and global leadership in energy. The project will span a meticulously selected 35,000-square-foot site in Houston, Texas, with the potential to become a landmark of industrial prowess and architectural ingenuity. Project Team: Leading the project will be Chief Project Engineer, Thomas Booker, with over two decades of experience in industrial construction. Architectural design will be spearheaded by Ava Clarke, whose portfolio includes several LEED-certified buildings across Dallas. Our environmental engineering efforts will be led by Dylan Rhodes in Austin, ensuring adherence to the most stringent ecological standards. Site and Structure: The facility will be located in the heart of Houston’s Energy Corridor, taking advantage of the area's rich infrastructure and proximity to ExxonMobil’s main operations. Geotechnical assessments and site preparation will be undertaken by San Antonio-based expert, Nora Quintana. The building's framework, designed for resilience and adaptability, will be overseen by structural engineer Alex Johnson from Fort Worth. Sustainability and Environment: Sus##tainability Coordinator, Rachel Santos from Corpus Christi, will implement cutting-edge green technologies, including a state-of-the-art HVAC system designed by El Paso's mechanical engineer, Omar Fernandez. Rainwater harvesting and waste management systems will be developed in collaboration with environmental specialists from Galveston Email address: [email protected] 123-456-7890"""
# llm_prompt = augment_prompt(prompt,chunks[0])
anmz = Anonymizer()
safe_prompt = anmz.anonymize(prompt,ent_types_select)
safe_doc = anmz.anonymize(ref_doc,ent_types_select)
st.session_state.anonymizer = anmz
st.session_state.ref_doc = ref_doc
llm_prompt = f"***{safe_prompt}***+```{safe_doc}```"
st.write(safe_prompt)
else:
safe_prompt = st.session_state.anonymizer.anonymize(prompt,ent_types_select)
llm_prompt = safe_prompt
st.write(llm_prompt)
st.session_state.messages.append({"role": "user", "content": llm_prompt})
st.session_state.chat_hist.append({'role':'user', 'content':prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# entities
decoded_message = st.session_state.anonymizer.deanonymize(full_response)
phrases_to_highlight = {}
ent_data = st.session_state.anonymizer.deanonymization_map
st.session_state.phrases_to_highlight = phrases_to_highlight
# get values of diciotnary and save as list
ent_data = list(ent_data.values())
for ent in ent_data:
phrases_to_highlight[ent] = None
# st.write(phrases_to_highlight)
|
st.set_page_config(page_title="🔒 SAiF-GPT", page_icon="🤫",layout="wide")
st.title("SAiF-GPT")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
ent_types_select = st.sidebar.multiselect("Entity list", ["LOC", "PER","ORG",'EMAIL','PHONE'], ["LOC", "PER","ORG"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.anonymizer = None
st.session_state.ref_doc = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None:
_,chunks = extract_pdf_text(uploaded_file)
# st.write(chunks)
with open("hack_secret.txt") as f:
# st.write("Using OpenAI API key:", f.read())
openai.api_key = f.read()
# Building a front end with streamlit
# ref: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.chat_hist = []
for message in st.session_state.chat_hist:
with st.chat_message(message["role"]):
st.markdown(message["content"], unsafe_allow_html=True)
if prompt := st.chat_input("What is up?"):
if len(st.session_state.chat_hist)==0:
# ref_doc = "\n".join(chunks)
# ref_doc = chunks[0]
ref_doc = "\n".join(chunks)
# ref_doc = """ExxonMobil Infrastructure Development Proposal Executive Summary: This comprehensive proposal envisions the construction of ExxonMobil's new operational hub, designed to bolster its strategic expansion and operational excellence within the energy sector. Introduction: We propose to construct a state-of-the-art facility that reflects ExxonMobil's commitment to innovation, sustainability, and global leadership in energy. The project will span a meticulously selected 35,000-square-foot site in Houston, Texas, with the potential to become a landmark of industrial prowess and architectural ingenuity. Project Team: Leading the project will be Chief Project Engineer, Thomas Booker, with over two decades of experience in industrial construction. Architectural design will be spearheaded by Ava Clarke, whose portfolio includes several LEED-certified buildings across Dallas. Our environmental engineering efforts will be led by Dylan Rhodes in Austin, ensuring adherence to the most stringent ecological standards. Site and Structure: The facility will be located in the heart of Houston’s Energy Corridor, taking advantage of the area's rich infrastructure and proximity to ExxonMobil’s main operations. Geotechnical assessments and site preparation will be undertaken by San Antonio-based expert, Nora Quintana. The building's framework, designed for resilience and adaptability, will be overseen by structural engineer Alex Johnson from Fort Worth. Sustainability and Environment: Sus##tainability Coordinator, Rachel Santos from Corpus Christi, will implement cutting-edge green technologies, including a state-of-the-art HVAC system designed by El Paso's mechanical engineer, Omar Fernandez. Rainwater harvesting and waste management systems will be developed in collaboration with environmental specialists from Galveston Email address: [email protected] 123-456-7890"""
# llm_prompt = augment_prompt(prompt,chunks[0])
anmz = Anonymizer()
safe_prompt = anmz.anonymize(prompt,ent_types_select)
safe_doc = anmz.anonymize(ref_doc,ent_types_select)
st.session_state.anonymizer = anmz
st.session_state.ref_doc = ref_doc
llm_prompt = f"***{safe_prompt}***+```{safe_doc}```"
st.write(safe_prompt)
else:
safe_prompt = st.session_state.anonymizer.anonymize(prompt,ent_types_select)
llm_prompt = safe_prompt
st.write(llm_prompt)
st.session_state.messages.append({"role": "user", "content": llm_prompt})
st.session_state.chat_hist.append({'role':'user', 'content':prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# entities
decoded_message = st.session_state.anonymizer.deanonymize(full_response)
phrases_to_highlight = {}
ent_data = st.session_state.anonymizer.deanonymization_map
st.session_state.phrases_to_highlight = phrases_to_highlight
# get values of diciotnary and save as list
ent_data = list(ent_data.values())
for ent in ent_data:
phrases_to_highlight[ent] = None
# st.write(phrases_to_highlight) | highlighted_Text = highlight_phrases_in_paragraph(decoded_message,phrases_to_highlight) | 1 | 2023-11-04 18:14:49+00:00 | 4k |
awslabs/optimizing-multitask-training-through-dynamic-pipelines | dynapipe/schedule_opt/ofob_schedule.py | [
{
"identifier": "DEBUG_PRINT_EXECUTORS",
"path": "dynapipe/schedule_opt/schedule_common.py",
"snippet": "DEBUG_REPLACE_OP_NAMES_WITH_INSTRS = False\nDEBUG_REPLACE_OP_NAMES_WITH_SCH_STATS = False\nDEBUG_PRINT_EXECUTORS = []\n COMP_THREAD = 0\n COMM_THREAD = 1\ndef _is_last_fw_or_bw_stage(\n flattened_stage_id, n_orig_layers, comm_added=True\n):\ndef _is_fw_stage(flattened_stage_id, n_orig_layers):\ndef _is_comm_stage(flattened_stage_id, n_orig_layers):\ndef _is_first_bw_layer(flattened_stage_id, n_orig_layers):\ndef _get_comp_only_stage_index(flattened_stage_id, n_orig_layers):\n def __init__(\n self,\n name: str,\n fw_times: List[float],\n fw_comm_times: List[float],\n fw_stored_activation_size: List[float],\n fw_peak_activation_size: List[float],\n activation_shapes: List[List[Tuple[int, int, int]]],\n bw_times: List[int],\n bw_comm_times: List[int],\n ):\n def _validate_spec(self):\n def _merge_layers(self, merged2orig: Dict[int, int]):\n def _flatten(self):\n def initialize(self):\n def is_layers_merged(self):\n def __post_init__(self):\n def _merge_layers(self):\n def _flatten(self):\n def initialize(self):\n def __repr__(self) -> str:\n def from_operation(\n cls,\n operation: \"ScheduleOperation\",\n **kwargs,\n ):\n def __init__(\n self,\n executor_id: int,\n thread_id: int,\n n_orig_layers: int,\n assigned_stages: List[Tuple[int, float, bool]],\n is_comm_stage: bool = False,\n include_memory_stats: bool = True,\n parent_executor: Optional[\"ScheduleExecutor\"] = None,\n logger: Optional[logging.Logger] = None,\n ) -> None:\n def reset(self):\n def process_name(self):\n def thread_name(self):\n def full_name(self):\n def debug_print(self, *args):\n def get_metadata(self):\n def _get_duration_event(self, name, start_time, duration):\n def get_exec_event(\n self,\n op: ScheduleOperation,\n start_time,\n duration,\n ):\n def get_comm_event(\n self,\n op: ScheduleOperation,\n start_time,\n duration,\n ):\n def get_memory_event(self, time, memory, event_type):\n def update_memory(\n self, peak_time, peak_memory, stored_time, stored_memory\n ):\n def __init__(\n self,\n minibatch_spec: SchedulerMinibatchSpec,\n include_memory_stats: bool = True,\n memory_limit: float = float(\"inf\"),\n logger: Optional[logging.Logger] = None,\n ) -> None:\n def _get_executor(\n self,\n executor_id,\n thread_id,\n n_orig_layers,\n assigned_stages,\n is_comm_stage,\n include_memory_stats,\n memory_limit=float(\"inf\"),\n separate_comm_stage=False,\n parent_executor=None,\n ):\n def _initialize(self):\n def _get_next_executor(self, flattened_stage_id):\n def _get_op(self, flattened_stage_id: int, microbatch_id: int):\n def _get_metadata(self):\n def get_executor_peak_memory(self) -> Dict[str, float]:\n def get_makespan(self) -> float:\n def _get_trace_events(self) -> Dict[str, Any]:\n def schedule(self, **kwargs):\n def get_instructions(self):\nclass ExecutorThread:\nclass SchedulerMicrobatchSpec:\nclass SchedulerMinibatchSpec:\nclass ScheduleOperation:\nclass ScheduleExecutor:\nclass Scheduler:"
},
{
"identifier": "WaitFreeExecutor",
"path": "dynapipe/schedule_opt/wait_free_schedule.py",
"snippet": "class WaitFreeExecutor(ScheduleExecutor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.available_queue: List[ScheduleOperation] = []\n\n def add_operation(self, op: ScheduleOperation):\n raise NotImplementedError\n\n def try_execute(self, current_time):\n raise NotImplementedError\n\n def finish_execute(self):\n raise NotImplementedError"
},
{
"identifier": "WaitFreeScheduler",
"path": "dynapipe/schedule_opt/wait_free_schedule.py",
"snippet": "class WaitFreeScheduler(Scheduler):\n def __init__(\n self,\n minibatch_spec: SchedulerMinibatchSpec,\n include_memory_stats: bool = True,\n memory_limit: float = float(\"inf\"),\n logger: Optional[logging.Logger] = None,\n ) -> None:\n super().__init__(\n minibatch_spec,\n include_memory_stats,\n memory_limit,\n logger=logger,\n )\n self._initialize()\n self._pending_events: PriorityQueue[CompleteEvent] = PriorityQueue()\n self.executors: Dict[ExecutorIndex, WaitFreeExecutor]\n\n def _get_executor(\n self,\n executor_id,\n thread_id,\n n_orig_layers,\n assigned_stages,\n is_comm_stage,\n include_memory_stats,\n memory_limit=float(\"inf\"),\n ):\n # overrides Scheduler\n raise NotImplementedError\n\n def _init_executors(self, n_microbatches, **kwargs):\n for executor in self.executors.values():\n executor.reset()\n self.communication_executors = [\n executor\n for executor in self.executors.values()\n if executor.is_comm_stage\n ]\n self.computation_executors = [\n executor\n for executor in self.executors.values()\n if not executor.is_comm_stage\n ]\n return True\n\n def _inject_microbatches(\n self, microbatch_offset: int, n_microbatches: int\n ):\n for microbatch_id in range(\n microbatch_offset, microbatch_offset + n_microbatches\n ):\n executor = self.executors[\n self.minibatch_spec.flattened_executor_assignment[0]\n ]\n op = self._get_op(0, microbatch_id)\n executor.add_operation(op)\n\n def _on_op_finish(self, executor: WaitFreeExecutor, op: ScheduleOperation):\n executor.finish_execute()\n if op.flattened_stage < self.n_flattened_stages - 1:\n next_layer = op.flattened_stage + 1\n next_executor = self.minibatch_spec.flattened_executor_assignment[\n next_layer\n ]\n self.executors[next_executor].add_operation(\n self._get_op(next_layer, op.microbatch)\n )\n\n def _push_end_event(self, op, executor, end_time):\n self._pending_events.put(CompleteEvent(end_time, op, executor))\n\n def schedule(self, **kwargs):\n n_microbatches = len(self.minibatch_spec.microbatches)\n status = self._init_executors(n_microbatches, **kwargs)\n if not status:\n return None\n self._inject_microbatches(0, n_microbatches)\n trace_events = self._get_trace_events()\n current_time = 0\n\n def __try_execute():\n # priortize communication executors\n for executor in (\n self.communication_executors + self.computation_executors\n ):\n end_time, launched_op, events = executor.try_execute(\n current_time\n )\n if launched_op:\n self._push_end_event(launched_op, executor, end_time)\n trace_events[\"traceEvents\"].extend(events)\n\n while True:\n __try_execute()\n if self._pending_events.empty():\n break\n else:\n next_event = self._pending_events.get()\n current_time = next_event.completion_time\n ready_events = [next_event]\n while not self._pending_events.empty():\n # try to process all events that finish at the same time\n another_event = self._pending_events.get()\n if another_event.completion_time <= current_time + 1e-6:\n ready_events.append(another_event)\n else:\n self._pending_events.put(another_event)\n break\n for event in ready_events:\n self._on_op_finish(event.executor, event.op)\n self.makespan = current_time\n # make sure all executors are empty\n for executor_idx, executor in self.executors.items():\n if hasattr(executor, \"available_queue\"):\n assert len(executor.available_queue) == 0, (\n f\"Executor {executor_idx} has non-empty ready queue \"\n f\"at end of scheduling: {executor.available_queue}\"\n )\n if hasattr(executor, \"next_op_idx\"):\n assert executor.next_op_idx == len(executor.operator_order), (\n f\"Executor {executor_idx} has not finished all operations \"\n f\"at end of scheduling: {executor.available_queue}\"\n )\n return trace_events"
}
] | import logging
from collections import defaultdict
from typing import Dict, List, Optional, Tuple
from .schedule_common import (
DEBUG_PRINT_EXECUTORS,
ExecutorIndex,
ScheduleExecutor,
ScheduleOperation,
SchedulerMinibatchSpec,
)
from .wait_free_schedule import WaitFreeExecutor, WaitFreeScheduler | 2,827 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
class OFOBExecutor(WaitFreeExecutor):
def __init__(
self,
executor_id: int,
thread_id: int,
n_orig_layers: int,
assigned_stages: List[Tuple[int, float, bool]],
n_executors: int,
is_comm_stage: bool = False,
include_memory_stats: bool = True,
parent_executor: Optional[ScheduleExecutor] = None,
logger: Optional[logging.Logger] = None,
) -> None:
super().__init__(
executor_id,
thread_id,
n_orig_layers,
assigned_stages,
is_comm_stage,
include_memory_stats,
parent_executor,
logger,
)
if not self.is_comm_stage:
assert len(self.fw_stages) == len(
self.bw_stages
), "Mismatched number of forward and backward layers"
self.is_executing = False
self.next_op = (0, 0, True) # (microbatch, chunk_id, is_forward)
self.n_executors = n_executors
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
self._increment_next_op_fn = None
self._try_execute_fn = None
def register_increment_next_op_fn(self, fn):
self._increment_next_op_fn = fn
def register_try_execute_fn(self, fn):
self._try_execute_fn = fn
def reset(self):
super().reset()
self.available_queue = []
self.next_op = (0, 0, True)
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
def set_n_microbatches(self, n_microbatches):
self.n_microbatches = n_microbatches
def add_operation(self, op: ScheduleOperation):
if op.is_forward:
assert (
op.flattened_stage in self.fw_stages
), "Operation {} not in executor".format(op)
else:
assert (
op.flattened_stage in self.bw_stages
), "Operation {} not in executor".format(op)
self.available_queue.append(op)
def _increment_next_op(self):
assert self._increment_next_op_fn is not None
return self._increment_next_op_fn(self)
def try_execute(self, current_time):
assert self._try_execute_fn is not None
if (
self.executed_fw_ops == 0
and self.is_comm_stage
and len(self.fw_stages) == 0
):
# no fw layers assigned, skip once
self.executed_fw_ops = 1
self._increment_next_op()
return self._try_execute_fn(self, current_time)
def finish_execute(self):
self.is_executing = False
def debug_print(self, *args):
# overrides parent debug_print
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
class OFOBExecutor(WaitFreeExecutor):
def __init__(
self,
executor_id: int,
thread_id: int,
n_orig_layers: int,
assigned_stages: List[Tuple[int, float, bool]],
n_executors: int,
is_comm_stage: bool = False,
include_memory_stats: bool = True,
parent_executor: Optional[ScheduleExecutor] = None,
logger: Optional[logging.Logger] = None,
) -> None:
super().__init__(
executor_id,
thread_id,
n_orig_layers,
assigned_stages,
is_comm_stage,
include_memory_stats,
parent_executor,
logger,
)
if not self.is_comm_stage:
assert len(self.fw_stages) == len(
self.bw_stages
), "Mismatched number of forward and backward layers"
self.is_executing = False
self.next_op = (0, 0, True) # (microbatch, chunk_id, is_forward)
self.n_executors = n_executors
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
self._increment_next_op_fn = None
self._try_execute_fn = None
def register_increment_next_op_fn(self, fn):
self._increment_next_op_fn = fn
def register_try_execute_fn(self, fn):
self._try_execute_fn = fn
def reset(self):
super().reset()
self.available_queue = []
self.next_op = (0, 0, True)
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
def set_n_microbatches(self, n_microbatches):
self.n_microbatches = n_microbatches
def add_operation(self, op: ScheduleOperation):
if op.is_forward:
assert (
op.flattened_stage in self.fw_stages
), "Operation {} not in executor".format(op)
else:
assert (
op.flattened_stage in self.bw_stages
), "Operation {} not in executor".format(op)
self.available_queue.append(op)
def _increment_next_op(self):
assert self._increment_next_op_fn is not None
return self._increment_next_op_fn(self)
def try_execute(self, current_time):
assert self._try_execute_fn is not None
if (
self.executed_fw_ops == 0
and self.is_comm_stage
and len(self.fw_stages) == 0
):
# no fw layers assigned, skip once
self.executed_fw_ops = 1
self._increment_next_op()
return self._try_execute_fn(self, current_time)
def finish_execute(self):
self.is_executing = False
def debug_print(self, *args):
# overrides parent debug_print | if self.executor_id in DEBUG_PRINT_EXECUTORS and self.logger: | 0 | 2023-11-08 07:58:20+00:00 | 4k |
lich0821/ShaDiaoRobot | actions/chitchat/seq2seq.py | [
{
"identifier": "Data",
"path": "actions/chitchat/data_processing.py",
"snippet": "class Data(object):\n def __init__(self, config) -> None:\n self.config = config\n self.seq_path = config[\"data_path\"] + config[\"dataset\"] + \".data\"\n self.conv_path = config[\"data_path\"] + config[\"dataset\"] + \".conv\"\n self.conv_size = os.path.getsize(self.conv_path)\n self.vacab_path_in = config[\"data_path\"] + config[\"dataset\"] + \".vin\"\n self.vacab_path_out = config[\"data_path\"] + config[\"dataset\"] + \".vout\"\n self.max_length = config[\"max_length\"]\n self.batch_size = config[\"batch_size\"]\n self.LOG = logging.getLogger(\"Data\")\n logging.basicConfig(level=logging.INFO)\n jieba.setLogLevel(logging.INFO) # Disable debug info\n\n def create_sequences(self):\n if os.path.exists(self.seq_path): # Skip if processed data exists\n return\n\n # 判断训练语料文件是否存在,如果不存在则进行提醒\n if not os.path.exists(self.conv_path):\n self.LOG.info(\"找不到语料文件,请检查路径\")\n exit()\n\n self.LOG.info(\"正在处理语料\")\n # 打开需要处理的语料,逐条读取并进行数据处理, 新建一个文件,用于存放处理后的对话语料\n with tqdm(total=self.conv_size) as pbar, open(self.conv_path, encoding=\"utf-8\") as fin, open(self.seq_path, \"w\") as fout:\n one_conv = \"\" # 存储一次完整对话\n for line in fin:\n pbar.update(len(line.encode(\"utf-8\")))\n line = line.strip(\"\\n\")\n # line = re.sub(r\"[%s]+\" % punctuation, \"\", line) # 去除标点符号\n if not line:\n continue\n # 判断是否为一段对话的开始,如果是则把刚刚处理的语料保存下来\n if line[0] == self.config[\"e\"]:\n if one_conv:\n fout.write(one_conv[:-1] + \"\\n\")\n one_conv = \"\"\n # 判断是否正在处理对话语句,如果是则进行语料的拼接处理 以及分词\n elif line[0] == self.config[\"m\"]:\n one_conv = one_conv + str(\" \".join(jieba.cut(line.split(\" \")[1]))) + \"\\t\" # 存储一次问或答\n\n def _create_vacab(self, lang, vocab_path, vocab_size):\n if os.path.exists(vocab_path): # Skip if exists\n return\n\n tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size, oov_token=\"<UNK>\")\n tokenizer.fit_on_texts(lang)\n with open(vocab_path, \"w\", encoding=\"utf-8\") as f:\n f.write(tokenizer.to_json(ensure_ascii=False))\n\n self.LOG.info(f\"正在保存: {vocab_path}\")\n\n def create_vacabularies(self):\n if os.path.exists(self.vacab_path_in) and os.path.exists(self.vacab_path_out): # Skip if exists\n return\n\n self.LOG.info(f\"正在创建字典\")\n lines = io.open(self.seq_path, encoding=\"UTF-8\").readlines()\n word_pairs = [[add_flag(w) for w in l.split(\"\\t\")] for l in lines]\n input, target = zip(*word_pairs)\n self._create_vacab(input, self.vacab_path_in, self.config[\"vacab_size_in\"])\n self._create_vacab(target, self.vacab_path_out, self.config[\"vacab_size_out\"])\n\n def _tokenize(self, path):\n # 定义word2number函数,通过对语料的处理提取词典,并进行word2number处理以及padding补全\n # 从词典中读取预先生成tokenizer的config,构建词典矩阵\n with open(path, \"r\", encoding=\"utf-8\") as f:\n tokenize_config = json.dumps(json.load(f), ensure_ascii=False)\n tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(tokenize_config)\n # 利用词典进行word2number的转换以及padding处理\n return tokenizer\n\n def process(self):\n self.create_sequences()\n self.create_vacabularies()\n\n def load(self):\n self.process()\n lines = io.open(self.seq_path, encoding=\"UTF-8\").readlines()\n word_pairs = [[add_flag(w) for w in l.split(\"\\t\")] for l in lines]\n words_in, words_out = zip(*word_pairs)\n tokenizer_in = self._tokenize(self.vacab_path_in)\n tokenizer_out = self._tokenize(self.vacab_path_out)\n\n tensor_in = tokenizer_in.texts_to_sequences(words_in)\n tensor_out = tokenizer_out.texts_to_sequences(words_out)\n\n tensor_in = tf.keras.preprocessing.sequence.pad_sequences(tensor_in, maxlen=self.max_length, padding=\"post\")\n tensor_out = tf.keras.preprocessing.sequence.pad_sequences(tensor_out, maxlen=self.max_length, padding=\"post\")\n\n self.steps_per_epoch = len(tensor_in) // self.batch_size\n BUFFER_SIZE = len(tensor_in)\n dataset = tf.data.Dataset.from_tensor_slices((tensor_in, tensor_out)).shuffle(BUFFER_SIZE)\n dataset = dataset.batch(self.batch_size, drop_remainder=True)\n\n return dataset, tokenizer_in, tokenizer_out"
},
{
"identifier": "add_flag",
"path": "actions/chitchat/data_processing.py",
"snippet": "def add_flag(w):\n return \"<bos> \" + w + \" <eos>\""
}
] | import logging
import os
import sys
import jieba
import tensorflow as tf
from tqdm import tqdm
from .data_processing import Data, add_flag | 3,353 |
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
# 初始化batch_sz、dec_units、embedding 、gru 、fc、attention
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, y, hidden, enc_output):
# 首先对enc_output、以及decoder的hidden计算attention,输出上下文语境向量
context_vector, attention_weights = self.attention(hidden, enc_output)
# 对decoder的输入进行embedding
y = self.embedding(y)
# 拼接上下文语境与decoder的输入embedding,并送入gru中
y = tf.concat([tf.expand_dims(context_vector, 1), y], axis=-1)
output, state = self.gru(y)
# 将gru的输出进行维度转换,送入全连接神经网络 得到最后的结果
output = tf.reshape(output, (-1, output.shape[2]))
y = self.fc(output)
return y, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_size, self.dec_units))
class Seq2Seq(object):
def __init__(self, config) -> None:
self.config = config
vacab_size_in = config['vacab_size_in']
vacab_size_out = config['vacab_size_out']
embedding_dim = config['embedding_dim']
self.units = config['layer_size']
self.batch_size = config['batch_size']
self.encoder = Encoder(vacab_size_in, embedding_dim, self.units, self.batch_size)
self.decoder = Decoder(vacab_size_out, embedding_dim, self.units, self.batch_size)
self.optimizer = tf.keras.optimizers.Adam()
# self.optimizer = tf.keras.optimizers.legacy.Adam()
self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, encoder=self.encoder, decoder=self.decoder)
self.ckpt_dir = self.config["model_data"]
logging.basicConfig(level=logging.INFO)
self.LOG = logging.getLogger("Seq2Seq")
if tf.io.gfile.listdir(self.ckpt_dir):
self.LOG.info("正在加载模型...")
self.checkpoint.restore(tf.train.latest_checkpoint(self.ckpt_dir))
data = Data(config)
self.dataset, self.tokenizer_in, self.tokenizer_out = data.load()
self.steps_per_epoch = data.steps_per_epoch
def loss_function(self, real, pred):
# 定义损失函数
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# mask掉start,去除start对于loss的干扰
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype) # 将bool型转换成数值
loss_ *= mask
return tf.reduce_mean(loss_)
@tf.function
def training_step(self, inp, targ, targ_lang, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['bos']] * self.batch_size, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
loss += self.loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
step_loss = (loss / int(targ.shape[1]))
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return step_loss
def train(self):
# 定义训练函数
# 从训练语料中读取数据并使用预生成词典word2number的转换
enc_hidden = self.encoder.initialize_hidden_state()
writer = tf.summary.create_file_writer(self.config["log_dir"])
self.LOG.info(f"数据目录: {self.config['data_path']}")
self.LOG.info(f"每个 epoch 训练步数: {self.steps_per_epoch}")
epoch = 0
train_epoch = self.config["epochs"]
while epoch < train_epoch:
total_loss = 0
# 进行一个epoch的训练,训练的步数为steps_per_epoch
iter_data = tqdm(self.dataset.take(self.steps_per_epoch))
for batch, (inp, targ) in enumerate(iter_data):
batch_loss = self.training_step(inp, targ, self.tokenizer_out, enc_hidden)
total_loss += batch_loss
iter_data.set_postfix_str(f"batch_loss: {batch_loss:.4f}")
step_loss = total_loss / self.steps_per_epoch
self.LOG.info(f"Epoch: {epoch+1}/{train_epoch} Loss: {total_loss:.4f} 平均每步 loss {step_loss:.4f}")
# 将本epoch训练的模型进行保存,更新模型文件
self.checkpoint.save(file_prefix=os.path.join(self.ckpt_dir, "ckpt"))
sys.stdout.flush()
epoch = epoch + 1
with writer.as_default():
tf.summary.scalar("loss", step_loss, step=epoch)
def predict(self, sentence):
# 定义预测函数,用于根据上文预测下文对话
# 对输入的语句进行处理,加上start end标示
max_length = self.config["max_length"]
sentence = " ".join(jieba.cut(sentence))
| # -*- coding: utf-8 -*-
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" # Disable Tensorflow debug message
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, enable=True)
class Encoder(tf.keras.Model):
# 定义Encoder类
# 初始化函数,对默认参数进行初始化
def __init__(self, vocab_size, embedding_dim, enc_units, batch_size):
super(Encoder, self).__init__()
self.enc_units = enc_units
self.batch_size = batch_size
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
# 定义调用函数,实现逻辑计算
def call(self, x, hidden):
x_emb = self.embedding(x)
output, state = self.gru(x_emb, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_size, self.enc_units))
class BahdanauAttention(tf.keras.Model):
# 定义bahdanauAttention类,bahdanauAttention是常用的attention实现方法之一
def __init__(self, units):
super(BahdanauAttention, self).__init__()
# 注意力网络的初始化
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# 将query增加一个维度,以便可以与values进行线性相加
hidden_with_time_axis = tf.expand_dims(query, 1)
# 将quales与hidden_with_time_axis进行线性相加后,使用tanh进行非线性变换,最后输出一维的score
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# 使用softmax将score进行概率化转换,转为为概率空间
attention_weights = tf.nn.softmax(score, axis=1)
# 将权重与values(encoder_out)进行相乘,得到context_vector
context_vector = attention_weights * values
# 将乘机后的context_vector按行相加,进行压缩得到最终的context_vector
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
# 初始化batch_sz、dec_units、embedding 、gru 、fc、attention
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, y, hidden, enc_output):
# 首先对enc_output、以及decoder的hidden计算attention,输出上下文语境向量
context_vector, attention_weights = self.attention(hidden, enc_output)
# 对decoder的输入进行embedding
y = self.embedding(y)
# 拼接上下文语境与decoder的输入embedding,并送入gru中
y = tf.concat([tf.expand_dims(context_vector, 1), y], axis=-1)
output, state = self.gru(y)
# 将gru的输出进行维度转换,送入全连接神经网络 得到最后的结果
output = tf.reshape(output, (-1, output.shape[2]))
y = self.fc(output)
return y, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_size, self.dec_units))
class Seq2Seq(object):
def __init__(self, config) -> None:
self.config = config
vacab_size_in = config['vacab_size_in']
vacab_size_out = config['vacab_size_out']
embedding_dim = config['embedding_dim']
self.units = config['layer_size']
self.batch_size = config['batch_size']
self.encoder = Encoder(vacab_size_in, embedding_dim, self.units, self.batch_size)
self.decoder = Decoder(vacab_size_out, embedding_dim, self.units, self.batch_size)
self.optimizer = tf.keras.optimizers.Adam()
# self.optimizer = tf.keras.optimizers.legacy.Adam()
self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, encoder=self.encoder, decoder=self.decoder)
self.ckpt_dir = self.config["model_data"]
logging.basicConfig(level=logging.INFO)
self.LOG = logging.getLogger("Seq2Seq")
if tf.io.gfile.listdir(self.ckpt_dir):
self.LOG.info("正在加载模型...")
self.checkpoint.restore(tf.train.latest_checkpoint(self.ckpt_dir))
data = Data(config)
self.dataset, self.tokenizer_in, self.tokenizer_out = data.load()
self.steps_per_epoch = data.steps_per_epoch
def loss_function(self, real, pred):
# 定义损失函数
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# mask掉start,去除start对于loss的干扰
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype) # 将bool型转换成数值
loss_ *= mask
return tf.reduce_mean(loss_)
@tf.function
def training_step(self, inp, targ, targ_lang, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['bos']] * self.batch_size, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
loss += self.loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
step_loss = (loss / int(targ.shape[1]))
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return step_loss
def train(self):
# 定义训练函数
# 从训练语料中读取数据并使用预生成词典word2number的转换
enc_hidden = self.encoder.initialize_hidden_state()
writer = tf.summary.create_file_writer(self.config["log_dir"])
self.LOG.info(f"数据目录: {self.config['data_path']}")
self.LOG.info(f"每个 epoch 训练步数: {self.steps_per_epoch}")
epoch = 0
train_epoch = self.config["epochs"]
while epoch < train_epoch:
total_loss = 0
# 进行一个epoch的训练,训练的步数为steps_per_epoch
iter_data = tqdm(self.dataset.take(self.steps_per_epoch))
for batch, (inp, targ) in enumerate(iter_data):
batch_loss = self.training_step(inp, targ, self.tokenizer_out, enc_hidden)
total_loss += batch_loss
iter_data.set_postfix_str(f"batch_loss: {batch_loss:.4f}")
step_loss = total_loss / self.steps_per_epoch
self.LOG.info(f"Epoch: {epoch+1}/{train_epoch} Loss: {total_loss:.4f} 平均每步 loss {step_loss:.4f}")
# 将本epoch训练的模型进行保存,更新模型文件
self.checkpoint.save(file_prefix=os.path.join(self.ckpt_dir, "ckpt"))
sys.stdout.flush()
epoch = epoch + 1
with writer.as_default():
tf.summary.scalar("loss", step_loss, step=epoch)
def predict(self, sentence):
# 定义预测函数,用于根据上文预测下文对话
# 对输入的语句进行处理,加上start end标示
max_length = self.config["max_length"]
sentence = " ".join(jieba.cut(sentence)) | sentence = add_flag(sentence) | 1 | 2023-11-05 12:56:38+00:00 | 4k |
ryanchen01/sing-box-utils | gen_config.py | [
{
"identifier": "surge2singbox",
"path": "surge2singbox.py",
"snippet": "def surge2singbox(surge_config_path):\n singbox_rules = []\n\n with open(surge_config_path, 'r', encoding='utf-8') as f:\n surge_config = f.readlines()\n reg = r'^\\[(.+)\\]$'\n sections = {}\n for linenum, line in enumerate(surge_config):\n if re.match(reg, line):\n sections[re.match(reg, line).group(1)] = linenum\n if 'Rule' not in sections:\n print('Error: Rule section not found')\n sys.exit(1)\n \n if list(sections.keys()).index('Rule') == len(sections.keys()) - 1:\n surge_config = surge_config[sections['Rule'] + 1:]\n else:\n surge_config = surge_config[sections['Rule'] + 1:sections[list(sections.keys())[list(sections.keys()).index('Rule') + 1]]]\n hasLAN = False\n for line in surge_config:\n if line.startswith('#') or len(line.strip()) == 0 or len(line.split(',')) < 2:\n continue\n if line.upper().split(',')[1].strip() == 'LAN':\n hasLAN = True\n break\n extras = []\n for line in surge_config:\n if line.startswith('#') or len(line.strip()) == 0:\n continue\n if line.upper().startswith('RULE-SET'):\n singbox_rule = get_singbox_ruleset(line.split(',')[1].strip(), line.split(',')[2].strip())\n if singbox_rule != -1:\n singbox_rules.append(singbox_rule)\n extras.append(line.split(',')[2].strip())\n elif line.upper().startswith('DOMAIN-SET'):\n singbox_rule = get_singbox_domainset(line.split(',')[1].strip(), line.split(',')[2].strip())\n if singbox_rule != -1:\n singbox_rules.append(singbox_rule)\n extras.append(line.split(',')[2].strip())\n elif line.upper().startswith('AND') or line.upper().startswith('OR'):\n singbox_rule = get_singbox_logical(line, line.split(',')[2].strip())\n if singbox_rule != -1:\n singbox_rules.append(singbox_rule)\n extras.append(line.split(',')[2].strip())\n else:\n singbox_rule = get_singbox_rule(line, line.split(',')[2].strip(), hasLAN=hasLAN)\n if singbox_rule != -1:\n singbox_rules.append(singbox_rule)\n extras.append(line.split(',')[2].strip())\n\n singbox_rules = np.flip(singbox_rules, axis=0).tolist()\n singbox_rules.append({'protocol': 'dns', 'outbound': 'dns-out'})\n config = {}\n config['route'] = {}\n config['route']['rules'] = singbox_rules\n\n return config, extras"
},
{
"identifier": "clash2singbox",
"path": "clash2singbox.py",
"snippet": "def clash2singbox(clash_config_path, policies, extras=[]):\n bUS = policies[0]\n bHK = policies[1]\n bSG = policies[2]\n bJP = policies[3]\n bTW = policies[4]\n\n with open(clash_config_path, 'r', encoding='utf-8') as stream:\n data_loaded = yaml.safe_load(stream)\n data_loaded = data_loaded['proxies']\n sb_proxies = []\n names = []\n for proxy in data_loaded:\n if proxy['type'] not in supported_types:\n continue\n sb_proxy = {}\n if proxy['type'] == 'ss':\n sb_proxy['type'] = 'shadowsocks'\n sb_proxy['tag'] = proxy['name']\n names.append(proxy['name'])\n sb_proxy['server'] = proxy['server']\n sb_proxy['server_port'] = proxy['port']\n sb_proxy['method'] = proxy['cipher']\n sb_proxy['password'] = proxy['password']\n if 'plugin' in proxy and proxy['plugin'] == 'obfs':\n sb_proxy['plugin'] = 'obfs-local'\n if 'plugin-opts' in proxy:\n opts = ''\n for key in proxy['plugin-opts']:\n if key == 'mode':\n opts += 'obfs=' + proxy['plugin-opts'][key] + ';'\n elif key == 'host':\n opts += 'obfs-host=' + proxy['plugin-opts'][key]\n sb_proxy['plugin_opts'] = opts\n elif proxy['type'] == 'vmess':\n sb_proxy['type'] = 'vmess'\n sb_proxy['tag'] = proxy['name']\n names.append(proxy['name'])\n sb_proxy['server'] = proxy['server']\n sb_proxy['server_port'] = proxy['port']\n sb_proxy['uuid'] = proxy['uuid']\n sb_proxy['alter_id'] = proxy['alterId']\n sb_proxy['security'] = proxy['cipher']\n elif proxy['type'] == 'trojan':\n sb_proxy['type'] = 'trojan'\n sb_proxy['tag'] = proxy['name']\n names.append(proxy['name'])\n sb_proxy['server'] = proxy['server']\n sb_proxy['server_port'] = proxy['port']\n sb_proxy['password'] = proxy['password']\n if 'sni' in proxy:\n tls = {}\n tls['enabled'] = True\n tls['server_name'] = proxy['sni']\n if 'skip-cert-verify' in proxy:\n tls['insecure'] = proxy['skip-cert-verify']\n if 'alpn' in proxy:\n tls['alpn'] = proxy['alpn']\n sb_proxy['tls'] = tls\n\n sb_proxies.append(sb_proxy)\n\n selector = {}\n selector['type'] = 'selector'\n selector['tag'] = 'OutSide'\n selector['outbounds'] = names\n selector['default'] = names[0]\n sb_proxies.append(selector)\n us_policy = {}\n us_policy['type'] = 'selector'\n us_policy['tag'] = 'United States'\n us_policy['outbounds'] = []\n hk_policy = {}\n hk_policy['type'] = 'selector'\n hk_policy['tag'] = 'Hong Kong'\n hk_policy['outbounds'] = []\n sg_policy = {}\n sg_policy['type'] = 'selector'\n sg_policy['tag'] = 'Singapore'\n sg_policy['outbounds'] = []\n jp_policy = {}\n jp_policy['type'] = 'selector'\n jp_policy['tag'] = 'Japan'\n jp_policy['outbounds'] = []\n tw_policy = {}\n tw_policy['type'] = 'selector'\n tw_policy['tag'] = 'Taiwan'\n tw_policy['outbounds'] = []\n for name in names:\n if re.search(us_regex, name, re.IGNORECASE):\n us_policy['outbounds'].append(name)\n elif re.search(hk_regex, name, re.IGNORECASE):\n hk_policy['outbounds'].append(name)\n elif re.search(sg_regex, name, re.IGNORECASE):\n sg_policy['outbounds'].append(name)\n elif re.search(jp_regex, name, re.IGNORECASE):\n jp_policy['outbounds'].append(name)\n elif re.search(tw_regex, name, re.IGNORECASE):\n tw_policy['outbounds'].append(name)\n \n if bUS:\n sb_proxies.append(us_policy)\n if bHK:\n sb_proxies.append(hk_policy)\n if bSG:\n sb_proxies.append(sg_policy)\n if bJP:\n sb_proxies.append(jp_policy)\n if bTW:\n sb_proxies.append(tw_policy)\n\n if len(extras) > 0:\n countries = []\n if bUS:\n countries.append('United States')\n if bHK:\n countries.append('Hong Kong')\n if bSG:\n countries.append('Singapore')\n if bJP:\n countries.append('Japan')\n if bTW:\n countries.append('Taiwan')\n countries.append('direct')\n countries.append('block')\n for extra in extras:\n policy = {}\n policy['type'] = 'selector'\n policy['tag'] = extra\n policy['outbounds'] = countries\n sb_proxies.append(policy)\n\n out_conf = {}\n out_conf['outbounds'] = sb_proxies\n\n return out_conf"
}
] | import yaml
import json
import sys
import regex as re
import requests
import argparse
import pathlib
from surge2singbox import surge2singbox
from clash2singbox import clash2singbox | 2,324 |
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate singbox config from clash config proxies and surge config rules")
parser.add_argument("clash_config_path", type=pathlib.Path, help="clash config path")
parser.add_argument("surge_config_path", type=pathlib.Path, help="surge config path")
parser.add_argument("-o", "--output", help="output file name")
parser.add_argument("-us", "--us", action="store_true", help="include US policy")
parser.add_argument("-hk", "--hk", action="store_true", help="include Hong Kong policy")
parser.add_argument("-sg", "--sg", action="store_true", help="include Singapore policy")
parser.add_argument("-jp", "--jp", action="store_true", help="include Japan policy")
parser.add_argument("-tw", "--tw", action="store_true", help="include Taiwan policy")
args = parser.parse_args()
if args.output:
outname = args.output
else:
outname = "singbox.json"
clash_config_path = args.clash_config_path
surge_config_path = args.surge_config_path
bUS = args.us
bHK = args.hk
bSG = args.sg
bJP = args.jp
bTW = args.tw
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate singbox config from clash config proxies and surge config rules")
parser.add_argument("clash_config_path", type=pathlib.Path, help="clash config path")
parser.add_argument("surge_config_path", type=pathlib.Path, help="surge config path")
parser.add_argument("-o", "--output", help="output file name")
parser.add_argument("-us", "--us", action="store_true", help="include US policy")
parser.add_argument("-hk", "--hk", action="store_true", help="include Hong Kong policy")
parser.add_argument("-sg", "--sg", action="store_true", help="include Singapore policy")
parser.add_argument("-jp", "--jp", action="store_true", help="include Japan policy")
parser.add_argument("-tw", "--tw", action="store_true", help="include Taiwan policy")
args = parser.parse_args()
if args.output:
outname = args.output
else:
outname = "singbox.json"
clash_config_path = args.clash_config_path
surge_config_path = args.surge_config_path
bUS = args.us
bHK = args.hk
bSG = args.sg
bJP = args.jp
bTW = args.tw
| rule_config, extras = surge2singbox(surge_config_path) | 0 | 2023-11-05 12:35:50+00:00 | 4k |
apple/ml-reed | reed/models/self_supervised_consistency_model.py | [
{
"identifier": "EnvironmentContrastiveBatch",
"path": "reed/data/environment_transition_dataset.py",
"snippet": "class EnvironmentContrastiveBatch:\n \"\"\"\n A batch of triplets where two states/observations are given and one is an augmented version of the other.\n\n The augmentation may be along the lines of random crop, jitter, etc or may be a temporal augmentation where the\n augmented state occurs in the future\n \"\"\"\n states = attr.ib(type=t.Union[torch.Tensor, PackedSequence])\n actions = attr.ib(type=t.Union[torch.Tensor, PackedSequence])\n augmented_states = attr.ib(type=t.Union[torch.Tensor, PackedSequence])\n\n def to_dict(self) -> t.Mapping[str, t.Union[torch.Tensor, PackedSequence]]:\n \"\"\"\n Return the attr as a dictionary\n \"\"\"\n return {\"states\": self.states,\n \"actions\": self.actions,\n \"augmented_states\": self.augmented_states}"
},
{
"identifier": "get_image_encoder",
"path": "reed/models/image_encoder.py",
"snippet": "def get_image_encoder(architecture: str, obs_dim: t.List[int], out_size: int = 1,\n hidden_dim: int = 128, hidden_depth: int = 3,\n image_hidden_num_channels: int = 32,\n *kwargs) -> nn.Module:\n \"\"\"\n Return the specified architecture initialized\n\n Args:\n architecture: which image encoder architecture to use\n obs_dim: dimensionality of the state images (height, width, channels)\n out_size: the size of the output\n hidden_dim: the size of the hidden layer(s)\n hidden_depth: the number of hidden layers\n image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the image encoder\n Returns:\n initialized image encoder\n \"\"\"\n if architecture == \"pixl2r\":\n # from PixL2R: https://arxiv.org/pdf/2007.15543.pdf & https://github.com/prasoongoyal/PixL2R/blob/b0691be6b27e705a62534b58f97ff7b8b6655c7d/src/supervised/model.py#L52\n return PixL2RImageEncoder(obs_dim=obs_dim, out_size=out_size,\n hidden_dim=hidden_dim, hidden_depth=hidden_depth,\n image_hidden_num_channels=image_hidden_num_channels)\n elif architecture == \"drqv2\":\n # from drqv2: https://github.com/facebookresearch/drqv2/blob/c0c650b76c6e5d22a7eb5f2edffd1440fe94f8ef/drqv2.py#L55\n return DRQv2ImageEncoder(obs_dim=obs_dim, out_size=out_size,\n hidden_dim=hidden_dim, hidden_depth=hidden_depth,\n image_hidden_num_channels=image_hidden_num_channels)\n else:\n raise NotImplementedError(f\"{architecture} is not an implemented image \"\n f\"encoder architecture\")"
},
{
"identifier": "StateActionSelfPredictiveRepresentationsNetworkEnsemble",
"path": "reed/models/self_predictive_representations_model.py",
"snippet": "class StateActionSelfPredictiveRepresentationsNetworkEnsemble(nn.Module):\n def __init__(self,\n device: torch.device,\n networks: t.Sequence[nn.Module]):\n \"\"\"\n Initial pass at an ensemble of networks used to train state-action representations that are consistent with\n the network's encoding of the state that results from applying the given action in the given state\n\n Args:\n device: which GPU or CPU device the network is to be run on\n networks: the networks that will make up the ensemble\n \"\"\"\n super(StateActionSelfPredictiveRepresentationsNetworkEnsemble, self).__init__()\n\n # convert the list of networks into a pytorch network list\n self._ensemble = nn.ModuleList(networks)\n\n # track the device\n self.device = device\n\n def __len__(self) -> int:\n \"\"\"\n The number of networks in the ensemble\n \"\"\"\n return len(self._ensemble)\n\n def __getitem__(self, item: int) -> nn.Module:\n return self._ensemble[item]\n\n def forward(self,\n transitions: t.List[EnvironmentContrastiveBatch]) -> t.Tuple[t.Sequence[torch.Tensor], t.Sequence[torch.Tensor]]:\n \"\"\"\n For each network, predict the representation of the next state and encode the given next state\n\n Args:\n transitions: a batch of environment transitions composed of states, actions, and next states for each\n network in the ensemble\n Returns:\n predicted embedding of the next state - p in the SimSiam paper\n next state embedding (detached from the tensor graph) - z in the SimSiam paper\n dimensionality: (batch, time step)\n \"\"\"\n next_state_preds = []\n projected_next_state_embeds = []\n for net_indx, net_batch in enumerate(transitions):\n net = self._ensemble[net_indx]\n # we need to convert the batch object a dictionary in case we are using nn.DataParallel\n next_state_pred, projected_next_state_embed = net(attr.asdict(net_batch))\n next_state_preds.append(next_state_pred)\n projected_next_state_embeds.append(projected_next_state_embed)\n\n # from the SimSiam paper, this is p and z\n return next_state_preds, projected_next_state_embeds\n\n def save(self, model_dir: Path, env_id: str, step: int):\n \"\"\"\n Save the ensemble to disk\n Args:\n model_dir: location to save the SFC nets\n env_id: the string identifier for the environment\n step: number of overall training steps taken before this save\n\n Returns:\n\n \"\"\"\n for net_indx, net in enumerate(self._ensemble):\n torch.save(net.state_dict(), f'{model_dir.as_posix()}/{env_id}_sfc_model_{step}_{net_indx}.pt')"
}
] | import typing as t
import attr
import torch
import torch.nn as nn
from pathlib import Path
from collections import OrderedDict
from reed.data.environment_transition_dataset import EnvironmentContrastiveBatch
from reed.models.image_encoder import get_image_encoder
from reed.models.self_predictive_representations_model import StateActionSelfPredictiveRepresentationsNetworkEnsemble | 2,845 | image_encoder_architecture: str = "pixl2r",
consistency_comparison_dim: int = 32,
consistency_projection_size: int = 128,
consistency_comparison_hidden_size: int = 256,
consistency_architecture: str = "mosaic",
with_consistency_prediction_head: bool = True,
image_hidden_num_channels: int = 32,
num_layers: int = 3):
"""
Learns embeddings such that the representations of an image and an augmented image are consistent with
one another in the latent space.
Args:
state_size: dimensionality of the states
out_size: the size of the output
state_embed_size: the size of the state's embedding
hidden_size: the size of the hidden layer(s)
ssl_state_encoder_mimics_reward_model: whether the state encoder mimics the reward model's
architecture
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
consistency_comparison_dim: the number of dimensions to use when comparing the predicted augmented state
representation and the actual augmented state representation
consistency_projection_size: the number of hidden units the state representations are projected
consistency_comparison_hidden_size: the number of dimensions to use when comparing the predicted
augmented state representation and the actual augmented state
representation
consistency_architecture: (default = "mosaic") controls the architecture used to predict the augmented
state representation and then to project the current and augmented state
representations before comparing.The name of the architecture references
the source paper. The options are "simsiam" and "mosaic"
with_consistency_prediction_head: (default = True) whether to include a prediction head to
prediction the target representation. When we train with SimCLR we do not
use the prediction head
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the
image encoder
num_layers: the number of hidden layers
"""
super(ImageStateConsistencyNetwork, self).__init__()
assert image_encoder_architecture in {"pixl2r", "drqv2"}
assert consistency_architecture in {"simsiam", "mosaic"}
# track the dimensionality of the input, the output, and the hidden dimensions
self._state_size = state_size
self._out_size = out_size
self._hidden_size = hidden_size
self._num_layers = num_layers
self._image_encoder_architecture = image_encoder_architecture
self._image_hidden_num_channels = image_hidden_num_channels
self._state_embed_size = state_embed_size
self._ssl_state_encoder_mimics_reward_model = ssl_state_encoder_mimics_reward_model
self._consistency_projection_size = consistency_projection_size
self._consistency_comparison_dim = consistency_comparison_dim
self._consistency_comparison_hidden_size = consistency_comparison_hidden_size
self._consistency_architecture = consistency_architecture
self._with_consistency_prediction_head = with_consistency_prediction_head
self._build()
def _build_consistency_comparison_architecture(self) -> t.Tuple[nn.Module, nn.Module]:
"""
Builds the network architecture used to project the current and augmented state representations and then predict
the augmented state representation from the current state representation.
"""
predictor = None
if self._consistency_architecture == "simsiam":
# architecture from the SimSiam code base
# project the predicted and true augmented state representation
projector = nn.Linear(256, self._consistency_projection_size)
# build a 2-layer consistency predictor following:
# https://github.com/facebookresearch/simsiam/blob/a7bc1772896d0dad0806c51f0bb6f3b16d290468/simsiam/builder.py#L39
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.Linear(self._consistency_projection_size,
self._consistency_comparison_hidden_size,
bias=False),
nn.BatchNorm1d(self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True), # hidden layer
nn.Linear(self._consistency_comparison_hidden_size,
self._consistency_projection_size)) # output layer
elif self._consistency_architecture == "mosaic":
# project the predicted and true augmented state representation
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L63
projector = nn.Sequential(
# Rearrange('B T d H W -> (B T) d H W'),
nn.BatchNorm1d(self._state_embed_size), nn.ReLU(inplace=True),
# Rearrange('BT d H W -> BT (d H W)'),
nn.Linear(self._state_embed_size, self._consistency_comparison_hidden_size), nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size)
)
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L118
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.ReLU(inplace=True),
nn.Linear(self._consistency_projection_size, self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size))
else:
raise NotImplementedError(f"{self._consistency_architecture} is not an implemented consistency "
f"comparison architecture.")
return projector, predictor
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
state convolution encoder
augmented state predictor
augmented state projector
"""
# the observations are first encoded with a CNN and then projected to an embedding
# space where they are combined with the action embedding
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
class ImageStateConsistencyNetwork(nn.Module):
def __init__(self,
state_size: t.List[int],
out_size: int = 1,
state_embed_size: int = 256,
hidden_size: int = 256,
ssl_state_encoder_mimics_reward_model: bool = True,
image_encoder_architecture: str = "pixl2r",
consistency_comparison_dim: int = 32,
consistency_projection_size: int = 128,
consistency_comparison_hidden_size: int = 256,
consistency_architecture: str = "mosaic",
with_consistency_prediction_head: bool = True,
image_hidden_num_channels: int = 32,
num_layers: int = 3):
"""
Learns embeddings such that the representations of an image and an augmented image are consistent with
one another in the latent space.
Args:
state_size: dimensionality of the states
out_size: the size of the output
state_embed_size: the size of the state's embedding
hidden_size: the size of the hidden layer(s)
ssl_state_encoder_mimics_reward_model: whether the state encoder mimics the reward model's
architecture
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
consistency_comparison_dim: the number of dimensions to use when comparing the predicted augmented state
representation and the actual augmented state representation
consistency_projection_size: the number of hidden units the state representations are projected
consistency_comparison_hidden_size: the number of dimensions to use when comparing the predicted
augmented state representation and the actual augmented state
representation
consistency_architecture: (default = "mosaic") controls the architecture used to predict the augmented
state representation and then to project the current and augmented state
representations before comparing.The name of the architecture references
the source paper. The options are "simsiam" and "mosaic"
with_consistency_prediction_head: (default = True) whether to include a prediction head to
prediction the target representation. When we train with SimCLR we do not
use the prediction head
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the
image encoder
num_layers: the number of hidden layers
"""
super(ImageStateConsistencyNetwork, self).__init__()
assert image_encoder_architecture in {"pixl2r", "drqv2"}
assert consistency_architecture in {"simsiam", "mosaic"}
# track the dimensionality of the input, the output, and the hidden dimensions
self._state_size = state_size
self._out_size = out_size
self._hidden_size = hidden_size
self._num_layers = num_layers
self._image_encoder_architecture = image_encoder_architecture
self._image_hidden_num_channels = image_hidden_num_channels
self._state_embed_size = state_embed_size
self._ssl_state_encoder_mimics_reward_model = ssl_state_encoder_mimics_reward_model
self._consistency_projection_size = consistency_projection_size
self._consistency_comparison_dim = consistency_comparison_dim
self._consistency_comparison_hidden_size = consistency_comparison_hidden_size
self._consistency_architecture = consistency_architecture
self._with_consistency_prediction_head = with_consistency_prediction_head
self._build()
def _build_consistency_comparison_architecture(self) -> t.Tuple[nn.Module, nn.Module]:
"""
Builds the network architecture used to project the current and augmented state representations and then predict
the augmented state representation from the current state representation.
"""
predictor = None
if self._consistency_architecture == "simsiam":
# architecture from the SimSiam code base
# project the predicted and true augmented state representation
projector = nn.Linear(256, self._consistency_projection_size)
# build a 2-layer consistency predictor following:
# https://github.com/facebookresearch/simsiam/blob/a7bc1772896d0dad0806c51f0bb6f3b16d290468/simsiam/builder.py#L39
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.Linear(self._consistency_projection_size,
self._consistency_comparison_hidden_size,
bias=False),
nn.BatchNorm1d(self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True), # hidden layer
nn.Linear(self._consistency_comparison_hidden_size,
self._consistency_projection_size)) # output layer
elif self._consistency_architecture == "mosaic":
# project the predicted and true augmented state representation
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L63
projector = nn.Sequential(
# Rearrange('B T d H W -> (B T) d H W'),
nn.BatchNorm1d(self._state_embed_size), nn.ReLU(inplace=True),
# Rearrange('BT d H W -> BT (d H W)'),
nn.Linear(self._state_embed_size, self._consistency_comparison_hidden_size), nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size)
)
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L118
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.ReLU(inplace=True),
nn.Linear(self._consistency_projection_size, self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size))
else:
raise NotImplementedError(f"{self._consistency_architecture} is not an implemented consistency "
f"comparison architecture.")
return projector, predictor
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
state convolution encoder
augmented state predictor
augmented state projector
"""
# the observations are first encoded with a CNN and then projected to an embedding
# space where they are combined with the action embedding | self._state_conv_encoder = get_image_encoder( | 1 | 2023-11-06 23:14:20+00:00 | 4k |
ApolloAuto/apollo-model-yolox | yolox/models/darknet.py | [
{
"identifier": "BaseConv",
"path": "yolox/models/network_blocks.py",
"snippet": "class BaseConv(nn.Module):\n \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n def __init__(\n self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n ):\n super().__init__()\n # same padding\n pad = (ksize - 1) // 2\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=ksize,\n stride=stride,\n padding=pad,\n groups=groups,\n bias=bias,\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.act = get_activation(act, inplace=True)\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))"
},
{
"identifier": "CSPLayer",
"path": "yolox/models/network_blocks.py",
"snippet": "class CSPLayer(nn.Module):\n \"\"\"C3 in yolov5, CSP Bottleneck with 3 convolutions\"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n n=1,\n shortcut=True,\n expansion=0.5,\n depthwise=False,\n act=\"silu\",\n ):\n \"\"\"\n Args:\n in_channels (int): input channels.\n out_channels (int): output channels.\n n (int): number of Bottlenecks. Default value: 1.\n \"\"\"\n # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n hidden_channels = int(out_channels * expansion) # hidden channels\n self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)\n module_list = [\n Bottleneck(\n hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act\n )\n for _ in range(n)\n ]\n self.m = nn.Sequential(*module_list)\n\n def forward(self, x):\n x_1 = self.conv1(x)\n x_2 = self.conv2(x)\n x_1 = self.m(x_1)\n x = torch.cat((x_1, x_2), dim=1)\n return self.conv3(x)"
},
{
"identifier": "DWConv",
"path": "yolox/models/network_blocks.py",
"snippet": "class DWConv(nn.Module):\n \"\"\"Depthwise Conv + Conv\"\"\"\n\n def __init__(self, in_channels, out_channels, ksize, stride=1, act=\"silu\"):\n super().__init__()\n self.dconv = BaseConv(\n in_channels,\n in_channels,\n ksize=ksize,\n stride=stride,\n groups=in_channels,\n act=act,\n )\n self.pconv = BaseConv(\n in_channels, out_channels, ksize=1, stride=1, groups=1, act=act\n )\n\n def forward(self, x):\n x = self.dconv(x)\n return self.pconv(x)"
},
{
"identifier": "Focus",
"path": "yolox/models/network_blocks.py",
"snippet": "class Focus(nn.Module):\n \"\"\"Focus width and height information into channel space.\"\"\"\n\n def __init__(self, in_channels, out_channels, ksize=1, stride=1, act=\"silu\"):\n super().__init__()\n self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act)\n\n def forward(self, x):\n # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)\n patch_top_left = x[..., ::2, ::2]\n patch_top_right = x[..., ::2, 1::2]\n patch_bot_left = x[..., 1::2, ::2]\n patch_bot_right = x[..., 1::2, 1::2]\n x = torch.cat(\n (\n patch_top_left,\n patch_bot_left,\n patch_top_right,\n patch_bot_right,\n ),\n dim=1,\n )\n return self.conv(x)"
},
{
"identifier": "ResLayer",
"path": "yolox/models/network_blocks.py",
"snippet": "class ResLayer(nn.Module):\n \"Residual layer with `in_channels` inputs.\"\n\n def __init__(self, in_channels: int):\n super().__init__()\n mid_channels = in_channels // 2\n self.layer1 = BaseConv(\n in_channels, mid_channels, ksize=1, stride=1, act=\"lrelu\"\n )\n self.layer2 = BaseConv(\n mid_channels, in_channels, ksize=3, stride=1, act=\"lrelu\"\n )\n\n def forward(self, x):\n out = self.layer2(self.layer1(x))\n return x + out"
},
{
"identifier": "SPPBottleneck",
"path": "yolox/models/network_blocks.py",
"snippet": "class SPPBottleneck(nn.Module):\n \"\"\"Spatial pyramid pooling layer used in YOLOv3-SPP\"\"\"\n\n def __init__(\n self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation=\"silu\"\n ):\n super().__init__()\n hidden_channels = in_channels // 2\n self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation)\n self.m = nn.ModuleList(\n [\n nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)\n for ks in kernel_sizes\n ]\n )\n conv2_channels = hidden_channels * (len(kernel_sizes) + 1)\n self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation)\n\n def forward(self, x):\n x = self.conv1(x)\n x = torch.cat([x] + [m(x) for m in self.m], dim=1)\n x = self.conv2(x)\n return x"
}
] | from torch import nn
from .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck | 2,178 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
class Darknet(nn.Module):
# number of blocks from dark2 to dark5.
depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}
def __init__(
self,
depth,
in_channels=3,
stem_out_channels=32,
out_features=("dark3", "dark4", "dark5"),
):
"""
Args:
depth (int): depth of darknet used in model, usually use [21, 53] for this param.
in_channels (int): number of input channels, for example, use 3 for RGB image.
stem_out_channels (int): number of output channels of darknet stem.
It decides channels of darknet layer2 to layer5.
out_features (Tuple[str]): desired output layer name.
"""
super().__init__()
assert out_features, "please provide output features of Darknet"
self.out_features = out_features
self.stem = nn.Sequential(
BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"),
*self.make_group_layer(stem_out_channels, num_blocks=1, stride=2),
)
in_channels = stem_out_channels * 2 # 64
num_blocks = Darknet.depth2blocks[depth]
# create darknet with `stem_out_channels` and `num_blocks` layers.
# to make model structure more clear, we don't use `for` statement in python.
self.dark2 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[0], stride=2)
)
in_channels *= 2 # 128
self.dark3 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[1], stride=2)
)
in_channels *= 2 # 256
self.dark4 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[2], stride=2)
)
in_channels *= 2 # 512
self.dark5 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[3], stride=2),
*self.make_spp_block([in_channels, in_channels * 2], in_channels * 2),
)
def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1):
"starts with conv layer then has `num_blocks` `ResLayer`"
return [
BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"),
*[(ResLayer(in_channels * 2)) for _ in range(num_blocks)],
]
def make_spp_block(self, filters_list, in_filters):
m = nn.Sequential(
*[
BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"),
BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"),
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
class Darknet(nn.Module):
# number of blocks from dark2 to dark5.
depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}
def __init__(
self,
depth,
in_channels=3,
stem_out_channels=32,
out_features=("dark3", "dark4", "dark5"),
):
"""
Args:
depth (int): depth of darknet used in model, usually use [21, 53] for this param.
in_channels (int): number of input channels, for example, use 3 for RGB image.
stem_out_channels (int): number of output channels of darknet stem.
It decides channels of darknet layer2 to layer5.
out_features (Tuple[str]): desired output layer name.
"""
super().__init__()
assert out_features, "please provide output features of Darknet"
self.out_features = out_features
self.stem = nn.Sequential(
BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"),
*self.make_group_layer(stem_out_channels, num_blocks=1, stride=2),
)
in_channels = stem_out_channels * 2 # 64
num_blocks = Darknet.depth2blocks[depth]
# create darknet with `stem_out_channels` and `num_blocks` layers.
# to make model structure more clear, we don't use `for` statement in python.
self.dark2 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[0], stride=2)
)
in_channels *= 2 # 128
self.dark3 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[1], stride=2)
)
in_channels *= 2 # 256
self.dark4 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[2], stride=2)
)
in_channels *= 2 # 512
self.dark5 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[3], stride=2),
*self.make_spp_block([in_channels, in_channels * 2], in_channels * 2),
)
def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1):
"starts with conv layer then has `num_blocks` `ResLayer`"
return [
BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"),
*[(ResLayer(in_channels * 2)) for _ in range(num_blocks)],
]
def make_spp_block(self, filters_list, in_filters):
m = nn.Sequential(
*[
BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"),
BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), | SPPBottleneck( | 5 | 2023-11-08 07:07:24+00:00 | 4k |
indiefan/king_smith | custom_components/king_smith/number.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/king_smith/const.py",
"snippet": "DOMAIN = \"king_smith\""
},
{
"identifier": "WalkingPadCoordinator",
"path": "custom_components/king_smith/coordinator.py",
"snippet": "class WalkingPadCoordinator(DataUpdateCoordinator[None]):\n \"\"\"Data coordinator for receiving Walking Pad updates.\"\"\"\n\n def __init__(self, hass: HomeAssistant, walking_pad_api: WalkingPadApi) -> None:\n \"\"\"Initialise the coordinator.\"\"\"\n super().__init__(\n hass,\n _LOGGER,\n name=DOMAIN,\n )\n self._walking_pad_api = walking_pad_api\n self._walking_pad_api.register_status_callback(self._async_handle_update)\n self.connected = self._walking_pad_api.connected\n self._last_update_time = NEVER_TIME\n self._debounce_cancel: CALLBACK_TYPE | None = None\n self._debounced_update_job = HassJob(\n self._async_handle_debounced_update,\n f\"Walking Pad {walking_pad_api.mac} BLE debounced update\",\n )\n\n @callback\n def _async_handle_debounced_update(self, _now: datetime) -> None:\n \"\"\"Handle debounced update.\"\"\"\n self._debounce_cancel = None\n self._last_update_time = time.monotonic()\n self.async_set_updated_data(None)\n\n @callback\n def _async_handle_update(self, status: WalkingPadCurStatus) -> None:\n \"\"\"Just trigger the callbacks.\"\"\"\n self.connected = True\n previous_last_updated_time = self._last_update_time\n self._last_update_time = time.monotonic()\n if self._last_update_time - previous_last_updated_time >= DEBOUNCE_SECONDS:\n self.async_set_updated_data(None)\n return\n if self._debounce_cancel is None:\n self._debounce_cancel = async_call_later(\n self.hass, DEBOUNCE_SECONDS, self._debounced_update_job\n )\n\n @callback\n def _async_handle_disconnect(self) -> None:\n \"\"\"Trigger the callbacks for disconnected.\"\"\"\n self.connected = False\n self.async_update_listeners()\n\n async def async_shutdown(self) -> None:\n \"\"\"Shutdown the coordinator.\"\"\"\n if self._debounce_cancel is not None:\n self._debounce_cancel()\n self._debounce_cancel = None\n await super().async_shutdown()"
},
{
"identifier": "WalkingPadEntity",
"path": "custom_components/king_smith/entity.py",
"snippet": "class WalkingPadEntity(CoordinatorEntity[WalkingPadCoordinator]):\n \"\"\"Walking Pad Entity Base Class.\"\"\"\n\n def __init__(self, name: str, walking_pad_api: WalkingPadApi, coordinator) -> None:\n \"\"\"Initialize the entity.\"\"\"\n super().__init__(coordinator)\n self._coordinator = coordinator\n self._walking_pad_api = walking_pad_api\n self.entity_id = generate_entity_id(ENTITY_ID_FORMAT, self._name, [])\n\n @callback\n def _handle_coordinator_update(self) -> None:\n self.async_write_ha_state()\n\n async def async_update(self) -> None:\n \"\"\"Handle an update.\"\"\"\n await self._walking_pad_api.update_state()\n\n @property\n def device_info(self) -> dict[str, Any]:\n \"\"\"Return the device info.\"\"\"\n prop = {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self._name,\n \"manufacturer\": \"King Smith\",\n }\n\n return prop\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return the unique id of the switch.\"\"\"\n return self._walking_pad_api.mac\n\n @property\n def name(self):\n \"\"\"Name.\"\"\"\n return self._name\n\n @property\n def should_poll(self):\n \"\"\"Should poll.\"\"\"\n return True\n\n @property\n def available(self):\n \"\"\"Available.\"\"\"\n return self._walking_pad_api.connected"
},
{
"identifier": "WalkingPadApi",
"path": "custom_components/king_smith/walking_pad.py",
"snippet": "class WalkingPadApi:\n \"\"\"Walkingpad device.\"\"\"\n\n def __init__(self, name: str, ble_device: BLEDevice) -> None:\n \"\"\"Create a new walking pad api instance.\"\"\"\n self._name = name\n self._ble_device = ble_device\n self._ctrl = Controller()\n self._callbacks = []\n self._status_lock = False\n self._last_cmd_time = time.time()\n\n self._connected = False\n self._moving = False\n self._speed = 0\n self._distance = 0\n\n self._register_controller_callbacks()\n\n def _register_controller_callbacks(self):\n self._ctrl.handler_cur_status = self._on_status_update\n\n def _begin_cmd(self) -> asyncio.Lock:\n self._status_lock = True\n return asyncio.Lock()\n\n async def _end_cmd(self):\n await asyncio.sleep(0.75)\n self._last_cmd_time = time.time()\n self._status_lock = False\n\n def _on_status_update(self, sender, status: WalkingPadCurStatus) -> None:\n \"\"\"Update current state.\"\"\"\n # Don't update if we're still running a command or just did (status from device is outdated at first)\n if (\n self._status_lock\n or time.time() - self._last_cmd_time < STATUS_LOCK_ON_CMD_SECONDS\n ):\n return\n\n self._moving = status.speed > 0\n self._speed = status.speed\n self._distance = status.dist\n\n if len(self._callbacks) > 0:\n for callback in self._callbacks:\n callback(status)\n\n def register_status_callback(self, callback) -> None:\n \"\"\"Register a status callback.\"\"\"\n self._callbacks.append(callback)\n\n @property\n def mac(self):\n \"\"\"Mac address.\"\"\"\n return self._ble_device.address\n\n @property\n def name(self):\n \"\"\"Name.\"\"\"\n return self._name\n\n @property\n def connected(self):\n \"\"\"Connected status.\"\"\"\n return self._connected\n\n @property\n def moving(self):\n \"\"\"Whether or not the device is currently moving.\"\"\"\n return self._moving\n\n @property\n def speed(self):\n \"\"\"The current device speed.\"\"\"\n return self._speed\n\n @property\n def distance(self):\n \"\"\"The current device distance.\"\"\"\n return self._distance\n\n async def connect(self) -> None:\n \"\"\"Connect the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.run(self._ble_device)\n self._connected = True\n await self._end_cmd()\n\n async def disconnect(self) -> None:\n \"\"\"Disconnect the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.disconnect()\n self._connected = False\n await self._end_cmd()\n\n async def turn_on(self) -> None:\n \"\"\"Turn on the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.switch_mode(WalkingPad.MODE_MANUAL)\n await self._end_cmd()\n\n async def turn_off(self) -> None:\n \"\"\"Turn off the device.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.switch_mode(WalkingPad.MODE_STANDBY)\n await self._end_cmd()\n\n async def start_belt(self) -> None:\n \"\"\"Start the belt.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.start_belt()\n self._moving = True\n await self._end_cmd()\n\n async def stop_belt(self) -> None:\n \"\"\"Stop the belt.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.stop_belt()\n self._moving = False\n await self._end_cmd()\n\n async def change_speed(self, speed: int) -> None:\n \"\"\"Change the speed.\"\"\"\n lock = self._begin_cmd()\n async with lock:\n await self._ctrl.change_speed(speed)\n self._speed = speed\n await self._end_cmd()\n\n async def update_state(self) -> None:\n \"\"\"Update device state.\"\"\"\n # Grab the lock so we don't run while another command is running\n lock = self._begin_cmd()\n async with lock:\n # Disable status lock so our update triggers a refresh\n self._status_lock = False\n await self._ctrl.ask_stats()\n # Skip callback so we don't reset debouncer"
}
] | from datetime import timedelta
from homeassistant.components.number import (
NumberEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import WalkingPadCoordinator
from .entity import WalkingPadEntity
from .walking_pad import WalkingPadApi | 2,274 |
"""Walking Pad Number Entities."""
SCAN_INTERVAL = timedelta(seconds=5)
KPH_TO_MPH = 0.621371
MIN_VALUE = 0.0
MAX_VALUE = 4.0
STEP = 0.1
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entity."""
name = config_entry.data.get(CONF_NAME) or DOMAIN
data = hass.data[DOMAIN][config_entry.entry_id]
entity = WalkingPadSpeed(name, data["device"], data["coordinator"])
async_add_entities([entity])
|
"""Walking Pad Number Entities."""
SCAN_INTERVAL = timedelta(seconds=5)
KPH_TO_MPH = 0.621371
MIN_VALUE = 0.0
MAX_VALUE = 4.0
STEP = 0.1
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entity."""
name = config_entry.data.get(CONF_NAME) or DOMAIN
data = hass.data[DOMAIN][config_entry.entry_id]
entity = WalkingPadSpeed(name, data["device"], data["coordinator"])
async_add_entities([entity])
| class WalkingPadSpeed(WalkingPadEntity, NumberEntity): | 2 | 2023-11-03 20:45:03+00:00 | 4k |
ndiamant/spice | tests/test_conditional_histogram.py | [
{
"identifier": "select_bins",
"path": "spice/conditional_histogram.py",
"snippet": "def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:\n return unique_quantile(y, n_bins, first_bin_zero=False)"
},
{
"identifier": "discretize",
"path": "spice/conditional_histogram.py",
"snippet": "def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor:\n return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)"
},
{
"identifier": "ConditionalHist",
"path": "spice/conditional_histogram.py",
"snippet": "class ConditionalHist(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, bins: torch.Tensor,\n y_min: float,\n lr: float = 1e-3, wd: float = 0,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.module = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=1, output_dim=bins.shape[0]),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"log bin probabilities\"\"\"\n return torch.log_softmax(self.module(x), dim=-1)\n\n def log_likelihood(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"log likelihood of y | x\"\"\"\n bin_log_probs = self(x)\n return -F.nll_loss(bin_log_probs, y.squeeze(), reduction=\"none\")\n\n def likelihood(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return self.log_likelihood(x, y).exp()\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n loss = -self.log_likelihood(x, y).mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def find_prob_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n \"\"\"\n alpha: mis-classification rate\n anything above threshold in likelihood should be in the prediction set\n https://people.eecs.berkeley.edu/~angelopoulos/publications/downloads/gentle_intro_conformal_dfuq.pdf\n \"\"\"\n n = len(y_val)\n q_level = math.ceil((n + 1) * (1 - alpha)) / n\n cal_scores = 1 - self.likelihood(x_val.to(self.device), y_val.to(self.device))\n q_hat = torch.quantile(cal_scores, q_level, interpolation=\"higher\").item()\n return 1 - q_hat\n\n @torch.no_grad()\n def get_extended_bins(self):\n extended_bins = torch.empty(self.hparams.bins.shape[0] + 1)\n extended_bins[0] = self.hparams.y_min\n extended_bins[1:] = self.hparams.bins\n return extended_bins\n\n @torch.no_grad()\n def get_bin_widths(self) -> torch.Tensor:\n extended_bins = self.get_extended_bins()\n return extended_bins[1:] - extended_bins[:-1]\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_prob = self(x_test.to(self.device)).exp().to(y_test.device)\n prediction_set = test_prob > threshold\n covered = (\n (\n F.one_hot(y_test.squeeze(), num_classes=self.hparams.bins.shape[0])\n & prediction_set\n ).any(dim=1)\n ).float()\n bin_sizes = self.get_bin_widths()\n sizes = (bin_sizes.unsqueeze(0) * prediction_set).sum(dim=1)\n return compute_conformal_metrics(x_test, y_test.float() / y_test.max().item(), sizes, covered)\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n all_probs = self(x_val).exp()\n y_probs = all_probs.gather(index=y_val, dim=1)\n bin_sizes = self.get_bin_widths()\n score = integrate_categorical_below_threshold(all_probs.cpu(), y_probs.cpu(), bin_sizes.cpu())\n return -score_to_q_hat(-score, alpha)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n # HPD\n probs = self(x_test.to(self.device)).exp().cpu()\n bin_sizes = self.get_bin_widths()\n hpd_cutoffs = find_hpd_cutoffs(probs, bin_sizes.cpu(), threshold)\n bin_mask = probs >= hpd_cutoffs.unsqueeze(1)\n # size\n sizes = (bin_sizes.unsqueeze(0) * bin_mask).sum(dim=1)\n y_onehot = F.one_hot(y_test.squeeze(), num_classes=self.hparams.bins.shape[0])\n covered = (y_onehot & bin_mask).any(dim=1).float()\n # coverage\n metrics = compute_conformal_metrics(x_test, y_test.float() / y_test.max().item(), sizes, covered)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics"
},
{
"identifier": "integrate_categorical_below_threshold",
"path": "spice/conditional_histogram.py",
"snippet": "@torch.no_grad()\ndef integrate_categorical_below_threshold(\n probs: torch.Tensor, thresholds: torch.Tensor,\n bin_sizes: torch.Tensor,\n) -> torch.Tensor:\n assert thresholds.shape == (probs.shape[0], 1)\n assert bin_sizes.shape == (probs.shape[1],)\n integral_below = probs * (probs <= thresholds) * bin_sizes.unsqueeze(0)\n return integral_below.sum(dim=1)"
},
{
"identifier": "find_hpd_cutoffs",
"path": "spice/conditional_histogram.py",
"snippet": "@torch.no_grad()\ndef find_hpd_cutoffs(\n probs: torch.Tensor, bin_sizes: torch.Tensor, target_integral: float,\n) -> torch.Tensor:\n \"\"\"\n our goal is to find T s.t.:\n (probs[probs < T] * bin_sizes[probs < T]).sum() > target_integral\n \"\"\"\n bin_densities = probs * bin_sizes.unsqueeze(0)\n sorted_probs, sort_idx = probs.sort(dim=1)\n sorted_bin_densities = bin_densities.gather(index=sort_idx, dim=1)\n integrated_bin_densities = sorted_bin_densities.cumsum(dim=1)\n first_integral_above_idx = (integrated_bin_densities > target_integral).float().argmax(dim=1, keepdim=True)\n return sorted_probs.gather(index=first_integral_above_idx, dim=1).squeeze()"
}
] | import torch
from spice.conditional_histogram import (
select_bins, discretize, ConditionalHist, integrate_categorical_below_threshold, find_hpd_cutoffs,
) | 1,832 |
def test_select_bins():
y = torch.linspace(0, 1, 100)
bins = select_bins(y, n_bins=5)
binned = discretize(y, bins)
_, counts = torch.unique(binned, return_counts=True)
# make sure bins equally divide the data
assert len(set(counts.numpy())) == 1
def test_discretize():
n_bins = 5
bins = torch.linspace(0, 1, n_bins)
y = torch.tensor([
[0, 0.3, 0.9],
[0.05, 0.31, 0.91],
])
assert (discretize(y, bins) == torch.tensor([
[0, 2, 4],
[1, 2, 4],
])).all()
def test_conditional_hist():
d = 5
bsz = 2
n_bins = 7
|
def test_select_bins():
y = torch.linspace(0, 1, 100)
bins = select_bins(y, n_bins=5)
binned = discretize(y, bins)
_, counts = torch.unique(binned, return_counts=True)
# make sure bins equally divide the data
assert len(set(counts.numpy())) == 1
def test_discretize():
n_bins = 5
bins = torch.linspace(0, 1, n_bins)
y = torch.tensor([
[0, 0.3, 0.9],
[0.05, 0.31, 0.91],
])
assert (discretize(y, bins) == torch.tensor([
[0, 2, 4],
[1, 2, 4],
])).all()
def test_conditional_hist():
d = 5
bsz = 2
n_bins = 7 | m = ConditionalHist(d, 3, max_iter=10, bins=torch.linspace(0, 1, n_bins), y_min=0) | 2 | 2023-11-01 18:04:29+00:00 | 4k |
4darsh-Dev/AyurVichar | home/views.py | [
{
"identifier": "PrakrutiResult",
"path": "home/models.py",
"snippet": "class PrakrutiResult(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n vata_score = models.IntegerField()\n pitta_score = models.IntegerField()\n kapha_score = models.IntegerField()\n prakruti_type = models.CharField(max_length=20)\n\n def __str__(self):\n return f'Prakruti Result for {self.user.username}'"
},
{
"identifier": "PrakrutiForm",
"path": "home/forms.py",
"snippet": "class PrakrutiForm(forms.ModelForm):\n class Meta:\n model = PrakrutiResult\n fields = [] # Exclude fields not needed in the form"
}
] | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth import authenticate,login, logout
from django.contrib import messages
from .models import PrakrutiResult
from .forms import PrakrutiForm | 2,386 | },
{
'id': 'kapha_question15',
'question': 'In comparison to others do you pass urine & stool in large quantities and do you perspire more? ',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question16',
'question': 'Do your friends complain about bad smell being emitted from mouth or body?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question17',
'question': 'Do you think you have intense sexual desire? ',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'vata_question1',
'question': 'Whether your skin remains dry throughout the year in comparison to others?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question2',
'question': 'Is your body undernourished/emaciated ?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question3',
'question': 'Have you got rough, low, broken or obstructed voice Does Your sleep last less than 6 hours per day Or you sleep can be disturbed easily?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question4',
'question': 'Do you change walking speed & style from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question5',
'question': 'Do you keep changing your food habits from time to time? ',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question6',
'question': 'Do you keep changingyour walking / jogging habit from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question7',
'question': 'Do you keep your joints, eyes, eyebrows, jaw, lips. tongue, head, Shoulder, hands & feet frequently moving?',
'attribute': 'vata',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'vata_question8',
'question': 'Are you considered a talkative among your friends?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question9',
'question': 'Do you have prominent veins & tendons all over the body?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question10',
'question': 'Do you generally start the work assigned to you immediately?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
{
'id': 'vata_question11',
'question': 'Do you get irritated easily? (E.g., when you do not get breakfast on time in your hostel or when the power goes off while watching a cricket match on your TV ?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
]
if request.method == 'POST':
|
# added by adarsh
# from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, "index.html")
def about(request):
return render(request, "about.html")
def services(request):
return render(request, "services.html" )
def contact(request):
return render(request, "contact.html")
def loginUser(request):
if request.method == 'POST':
username = request.POST.get("username")
password = request.POST.get("password")
# Matching user credentials
user = authenticate(username = username, password = password)
if user is not None:
# dj authenticating id and pass
login(request, user)
return redirect('/prakruti')
else:
# No backend authenticated the credentials
error_message = "Invalid username or password! Please try again."
messages.error(request, error_message)
# return render(request, 'login.html')
return render(request, "login.html", {"error_message" : error_message} )
return render(request, "login.html")
def registerUser(request):
if request.method == "POST":
username = request.POST.get("username")
email = request.POST.get("email")
pass1 = request.POST.get("password")
pass2 = request.POST.get("cnf-password")
# Check for username
if len(username) >10:
error_msg1 = "Username must not be more than 10 characters"
messages.error(request, error_msg1)
return render(request, "signup.html", {"error_message" : error_msg1})
# check for alphanumeric
if (not username.isalnum()):
error_msg2 = "Username must be alpha-numeric"
messages.error(request, error_msg2)
return render(request, "register.html", {"error_message": error_msg2})
# Checking for passwords match
if pass1 != pass2:
error_msg3 = "Passwords don't match!"
messages.error(request, error_msg3)
return render(request, "register.html", {"error_message" : error_msg3})
# Checking for already existing users
if (User.objects.filter(username=username).exists()):
error_msg4 = "Username already taken! Please choose different one."
messages.error(request, error_msg4)
return render(request, "register.html", {"error_message": error_msg4})
# Check for duplicated email
if (User.objects.filter(email=email).exists()):
error_msg5 = "Email already taken! Please choose different one."
messages.error(request, error_msg5)
return render(request, "register.html", {"error_message ": error_msg5})
# Creating user
myUser = User.objects.create_user(username, email, pass2)
myUser.save()
success_msg = "Your a/c has been created successfully! "
messages.success(request, success_msg)
return redirect('/prakruti')
return render(request, "register.html")
def logoutUser(request):
auth.logout(request)
return redirect("home")
def profile(request):
return render(request, "profile.html", {"prakruti_type" : prakruti_type})
# def prakruti(request):
# return render(request, "prakruti.html")
def prakriti_request(request):
questions = [
{
'id': 'kapha_question1',
'question': 'Whether your skin remains oily throughout the year in comparison to others?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question2',
'question': 'Are your body-hairs & skin shiny, even no oil or moisturizer is used?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question3',
'question': 'Are you considered attractive among your friends?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question4',
'question': 'Do even mild or trivial injuries on your body make you upset?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question5',
'question': 'Among your family members, Is your complexion considered fairer?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question6',
'question': 'Do you think you have intense sexual desire ?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question7',
'question': 'Have got well built muscles?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question8',
'question': 'Do you change your body posture frequently? (You cannot manage yourself in a single posture for longer duration) ?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 60,
},
{
'id': 'kapha_question9',
'question': 'Do you have well-nourished normally developed body? (You are neither malnourished nor obese)',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question10',
'question': 'Are you lazy and disinterested in activities like morning walk/jogging , swimming or any type of outdoor games ?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question11',
'question': 'Are you slow in consuming food?(Even all have left the dining hall, you are still consuming the same amount of food.',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question12',
'question': 'When you to morning walk or college or office, do walk slowly in comparison to others?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question13',
'question': 'If you are assigned any work, do you take some estra time to start it?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question14',
'question': "Do you get imitated easily? (For example, when you don't get breaktast on time in your hostel or when the power goes off while watching a cricket match or your favorite movie on television)",
'attribute': 'kapha',
'yes_points': 0,
'no_points': 40,
},
{
'id': 'kapha_question15',
'question': 'Are you late to develop suffer from symptoms after exposure to common causative factors? (For example, during seasonal changes, when your friends are easily caught up with flu etc., you are still healthy among them)',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question16',
'question': 'Does your gait (style of walking) change with respect to speed or manner frequently?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'kapha_question17',
'question': 'Do you feel hungry more frequently and do you? consume more food in comparison to others?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 30,
},
{
'id': 'kapha_question18',
'question': 'Do you tolerate heat easily?',
'attribute': 'kapha',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'kapha_question19',
'question': 'Do you consume liquids in more quantity and frequency in comparison to others? ',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 30,
},
{
'id': 'kapha_question20',
'question': 'Do you perspire less in comparison to others?',
'attribute': 'kapha',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'kapha_question21',
'question': 'Are sounds produced frequently in your joints on movements?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'kapha_question22',
'question': 'Have you got a good/attractive complexion?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question23',
'question': 'Have you got sweet & pleasant voice?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'pitta_question1',
'question': 'Are you more comfortable in winter than summers?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question2',
'question': 'Among your family members, is your complexion considered fairer? ',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question3',
'question': 'Does your temperature of oral cavity remain towards upper limit of normal range?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question4',
'question': 'Do you have excessive black moles. Freckles etc ou your skin? Or Have you noticed new appearance of black moles often on your skin?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question5',
'question': 'Do you feel excessive hunger & thirst in comparison to others?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question6',
'question': 'Have you experienced premature graying, wrinkling of skin & early baldness?',
'attribute': 'kapha',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'kapha_question7',
'question': 'Do you have soft, scanty, brown hair on your face. body & head?',
'attribute': 'kapha',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'kapha_question8',
'question': 'Do you involve yourself in risky & heroic activities requiring physical strength often?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question9',
'question': 'Do you have ability to digest lage quantities of food easily?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question10',
'question': 'Do you take large quantities of food & drink in comparison to others?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question11',
'question': 'Do you have soft, scanty, brown hair on your face. body & head?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question12',
'question': 'Do you get easily irritated for small/negligible problem in day-to-day life?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question13',
'question': 'Do you consume food more frequently than others? (5-6 times/day)',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question14',
'question': ' Do you have soft & loose muscle bulk especially around the joints?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question15',
'question': 'In comparison to others do you pass urine & stool in large quantities and do you perspire more? ',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question16',
'question': 'Do your friends complain about bad smell being emitted from mouth or body?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question17',
'question': 'Do you think you have intense sexual desire? ',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'vata_question1',
'question': 'Whether your skin remains dry throughout the year in comparison to others?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question2',
'question': 'Is your body undernourished/emaciated ?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question3',
'question': 'Have you got rough, low, broken or obstructed voice Does Your sleep last less than 6 hours per day Or you sleep can be disturbed easily?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question4',
'question': 'Do you change walking speed & style from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question5',
'question': 'Do you keep changing your food habits from time to time? ',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question6',
'question': 'Do you keep changingyour walking / jogging habit from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question7',
'question': 'Do you keep your joints, eyes, eyebrows, jaw, lips. tongue, head, Shoulder, hands & feet frequently moving?',
'attribute': 'vata',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'vata_question8',
'question': 'Are you considered a talkative among your friends?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question9',
'question': 'Do you have prominent veins & tendons all over the body?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question10',
'question': 'Do you generally start the work assigned to you immediately?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
{
'id': 'vata_question11',
'question': 'Do you get irritated easily? (E.g., when you do not get breakfast on time in your hostel or when the power goes off while watching a cricket match on your TV ?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
]
if request.method == 'POST': | form = PrakrutiForm(request.POST) | 1 | 2023-11-04 10:16:05+00:00 | 4k |
nik-sm/com-hom-emg | scripts/plots.py | [
{
"identifier": "CANONICAL_COORDS_STR",
"path": "com_hom_emg/scoring.py",
"snippet": "CANONICAL_COORDS_STR = []"
},
{
"identifier": "PROJECT_PATH",
"path": "com_hom_emg/utils.py",
"snippet": "PROJECT_PATH = Path(__file__).parent.parent"
}
] | import argparse
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from pathlib import Path
from typing import List, Optional
from loguru import logger
from com_hom_emg.scoring import CANONICAL_COORDS_STR
from com_hom_emg.utils import PROJECT_PATH | 3,421 | # We have ~85K fake doubles total, each has 1 median.
# Then we have 50 independent runs. Here we average over all 50*85K items
output_dir = figs_dir / f"{which_expt}.similarity_matrices"
output_dir.mkdir(exist_ok=True)
print(f"Table describing feature similarity, for: {which_expt}")
print()
rows = []
print("group_name, real_to_real, fake_to_fake, real_to_fake, non_matching")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
scalar_sim_values = [summarize_similarity_matrix(m) for m in similarity_matrices]
real_reals = [s[0] for s in scalar_sim_values]
fake_fakes = [s[1] for s in scalar_sim_values]
real_fakes = [s[2] for s in scalar_sim_values]
nonmatches = [s[3] for s in scalar_sim_values]
real_to_real = f"{round(np.mean(real_reals), 2)} ± {round(np.std(real_reals), 2)}"
fake_to_fake = f"{round(np.mean(fake_fakes), 2)} ± {round(np.std(fake_fakes), 2)}"
real_to_fake = f"{round(np.mean(real_fakes), 2)} ± {round(np.std(real_fakes), 2)}"
nonmatch = f"{round(np.mean(nonmatches), 2)} ± {round(np.std(nonmatches), 2)}"
string = ", ".join([str(group_name), real_to_real, fake_to_fake, real_to_fake, nonmatch])
print(string)
rows.append(
{
"group_name": str(group_name),
"real_to_real": real_to_real,
"fake_to_fake": fake_to_fake,
"real_to_fake": real_to_fake,
"non_matching": nonmatch,
}
)
print()
# Save these summary statistics to a CSV so we can emit a latex table later
table_df = pd.DataFrame(rows)
table_df.to_csv(figs_dir / f"{title}.similarity_values.{gamma}.csv", index=False)
print(f"Figures with average similarity heatmap for each group, for: {which_expt}")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
ticktext = group["ticktext"].iloc[0]
avg_similarity_matrix = np.nanmean(similarity_matrices, 0)
fig = make_similarity_heatmap_plot(avg_similarity_matrix, ticktext)
filename = f"{which_expt}.{group_name.replace('<br>', '__')}.similarity_matrix.png"
fig.write_image(output_dir / filename, scale=2)
def main(figs_dir: Path, which_test: str, which_expt: str, suffix: str, gamma: Optional[float]):
logger.info(f"Saving figures to: {figs_dir}")
if suffix is None:
title = f"{which_test}.{which_expt}"
else:
title = f"{which_test}.{which_expt}.{suffix}"
logger.info(f"Loading data for: {title}")
df = pd.read_pickle(figs_dir / f"{title}.pkl")
# Add group name for convenient grouping later
logger.info("NOTE - not including encoder arch in group names (only used basic)")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
# Unify column naming from fine-tuning and fresh-classifier experiments
col_rename_map = {}
for subset in ["single", "double", "overall"]:
for scenario in ["augmented", "lower_bound", "upper_bound", "zero_shot"]:
col_rename_map[f"test_{scenario}/{subset}_bal_acc"] = f"{scenario}.{subset}_bal_acc"
df = df.rename(columns=col_rename_map)
# Make plots
make_confusion_matrices(df, figs_dir, which_test, title)
# make_boxplots(df, figs_dir, which_test, title)
# NOTE - this part will get re-run a few times, but it is fine
# (Because it doesn't depend on fine-tune vs fresh-classifier)
# As long as this script is run once for "regular" and once for "ablation", it is enough
df = pd.read_pickle(figs_dir / f"feature_similarity.{which_expt}.{gamma}.pkl")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
make_feature_similarity_plots(df, figs_dir, which_expt, title, gamma)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--figs_dir", default="figures")
parser.add_argument("--which_test", required=True, choices=["finetune", "fresh-classifier"])
parser.add_argument("--which_expt", required=True, choices=["regular", "ablation"])
parser.add_argument("--suffix", default=None) # e.g. "lda.None" or "logr.1000"
parser.add_argument("--gamma", default=None, type=float)
args = parser.parse_args()
if args.which_test == "fresh-classifier":
if args.suffix is None:
raise ValueError("Must specify suffix for fresh-classifier test")
|
layout_template = "simple_white"
colors = px.colors.qualitative.Plotly
def plot_confusion_matrix(data: np.ndarray, title: Optional[str] = None):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
text = make_text(data)
# Eliminate the final row, which corresponds to actual label = "None, None"
data = data[:-1]
text = text[:-1]
ticktext = CANONICAL_COORDS_STR
fig = go.Figure()
showscale = False
margin = dict(l=20, r=0, t=0, b=20)
if title is not None:
fig.update_layout(title=title)
margin = dict(l=20, r=0, t=20, b=20)
fig.add_trace(
go.Heatmap(
z=data,
text=text,
texttemplate="%{text}",
zmin=0,
zmax=1,
colorscale="Blues",
showscale=showscale,
textfont_size=15,
)
)
fig.update_layout(
margin=margin,
xaxis=dict(
title="Predicted",
tickangle=-45,
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
constrain="domain",
),
yaxis=dict(
title="Actual",
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
font_size=15,
)
full_fig = fig.full_figure_for_development(warn=False)
x_lo, x_hi = full_fig.layout.xaxis.range
y_hi, y_lo = full_fig.layout.yaxis.range # NOTE - y-axis range is reversed for heatmap
box_size = (y_hi - y_lo) / data.shape[0]
# Add horizontal line between single and combo classes
n = 8 # 8 single classes above the line
x = [x_lo, x_hi]
y_value = y_hi - n * box_size
y = [y_value, y_value]
y = [y_hi - y_ + y_lo for y_ in y] # reverse coords
fig.add_trace(go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False))
# Add vertical line between single and combo classes
n = 8 # 8 single classes left of the line
x_value = x_lo + n * box_size
x = [x_value, x_value]
y = [y_hi, y_lo]
fig.add_trace(go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False))
# Add verticla line between combo classes and 'None' class
n = 24 # 24 classes left of the line
x_value = x_lo + n * box_size
x = [x_value, x_value]
y = [y_hi, y_lo]
fig.add_trace(go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False))
# Need to re-set the axis ranges after adding lines
fig.update_layout(xaxis_range=[x_lo, x_hi], yaxis_range=[y_hi, y_lo], yaxis_autorange=False)
return fig
def make_boxplots(df: pd.DataFrame, figs_dir, which_test: str, title: str):
# Make 3 large figures: one on single acc, one on double acc, and one on overall acc
# In each figure:
# The figure contains grouped boxplots.
# Each boxplot group is a particular setting of encoder, classifier, feature combine type, loss type
# Within each boxplot group, we have a box for "augmented", "upper", "lower". In the case of fine-tuning,
# we also have a box for "zero-shot"
# First, unify column naming. In fine-tune, names are "test_augmented/overall_bal_acc", etc
# In fresh-classifier, names are "augmented.overall_bal_acc", etc
# Stick to the latter.
output_dir = figs_dir / f"{title}.boxplots"
output_dir.mkdir(exist_ok=True)
for subset in ["single", "double", "overall"]:
fig = go.Figure()
names_cols = [
("augmented", f"augmented.{subset}_bal_acc"),
("upper_bound", f"upper_bound.{subset}_bal_acc"),
("lower_bound", f"lower_bound.{subset}_bal_acc"),
]
if which_test == "finetune":
names_cols.append(("zero_shot", f"zero_shot.{subset}_bal_acc"))
for i, (name, col) in enumerate(names_cols):
data = df[col]
x = df["group_name"]
kw = dict(jitter=0.5, marker_size=3, marker_color=colors[i])
trace = go.Box(y=data, x=x, name=name, **kw)
fig.add_trace(trace)
fig.update_layout(
boxmode="group",
template=layout_template,
yaxis=dict(range=[0, 1], title="Balanced Test Acc"),
xaxis_title="Classifier // Feature Combine Type // Loss Type",
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
# boxgap=0.25, # Space between groups
# boxgroupgap=0, # Space between boxes in a group
margin=dict(l=0, r=0, t=0, b=0),
font_size=15,
)
fig.write_image(output_dir / f"{title}.{subset}.png", width=1200, height=600, scale=2)
def make_confusion_matrices(df: pd.DataFrame, figs_dir, which_test: str, title: str):
# Group by method details, and then average across folds and seeds
# Create a single confusion matrix using plot_confusion_matrix
# Save to file
output_dir = figs_dir / f"{title}.confusion_matrices"
output_dir.mkdir(exist_ok=True)
names = ["augmented", "upper_bound", "lower_bound"]
if which_test == "finetune":
names.append("zero_shot")
for group_name, group in df.groupby("group_name"):
for name in names:
col = f"{name}.confusion_matrix"
this_group_conf_mats = np.stack(group[col])
avg_conf_mat = np.nanmean(this_group_conf_mats, 0)
fig = plot_confusion_matrix(avg_conf_mat) # , title=f"{group_name} // {name}")
filename = f"{title}.{group_name.replace('<br>', '__')}.{name}.conf_mat.png"
fig.write_image(output_dir / filename, width=1000, height=1000, scale=2)
def plot_heatmap(data: np.ndarray, ticktext: List[str]):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
# Get lower triangular
data = np.copy(data)
data[np.triu_indices(data.shape[0], k=1)] = None
text = make_text(data)
fig = go.Figure()
fig.update_layout(
# margin=margin,
template=layout_template,
xaxis=dict(
tickangle=-45,
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
constrain="domain",
),
yaxis=dict(
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
width=1000,
height=1000,
margin=dict(l=0, r=0, t=0, b=0),
font_size=15,
)
fig.add_trace(
go.Heatmap(z=data, text=text, texttemplate="%{text}", zmin=0, zmax=1, colorscale="Greens", showscale=False)
)
return fig
def make_similarity_heatmap_plot(similarity_matrix, ticktext):
fig = plot_heatmap(similarity_matrix, ticktext)
full_fig = fig.full_figure_for_development(warn=False)
x_lo, x_hi = full_fig.layout.xaxis.range
y_hi, y_lo = full_fig.layout.yaxis.range # NOTE - y-axis range is reversed for heatmap
n_classes = len(ticktext)
box_size = (y_hi - y_lo) / n_classes
# Add a line after the single gesture classes
def add_hline(n):
# Line from the y-axis, travling horizontall, until it hits the diagonal
x = [x_lo, x_lo + n * box_size]
# compute y-values in the normal way
y = [y_hi - n * box_size, y_hi - n * box_size]
# Then adjust y values to account for reversed axis
y = [y_hi - y_ + y_lo for y_ in y]
fig.add_trace(
go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False)
)
def add_vline(n):
# Line from the diagonal, traveling vertically down, until it hits x-axis
# after moving over n boxes, the y value of the diagonal is
x = [x_lo + n * box_size, x_lo + n * box_size]
# compute y-values in the normal way
y = [y_hi - n * box_size, y_lo]
# Then adjust y values to account for reversed axis
y = [y_hi - y_ + y_lo for y_ in y]
fig.add_trace(
go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False)
)
# Add lines for easier interpretation
# p fig.full_figure_for_development(warn=False).layout.yaxis.range
add_hline(16)
add_vline(16)
# Need to re-set the axis ranges after adding lines
fig.update_layout(xaxis_range=[x_lo, x_hi], yaxis_range=[y_hi, y_lo], yaxis_autorange=False)
return fig
def summarize_similarity_matrix(similarity_matrix: np.ndarray):
# Extract 4 numbers of interest:
# - avg of first 16 elements of diag -> describes real-real similarity
# - avg of final 16 elements of diag -> fake-fake sim
# - avg of 16th subdiagonal -> real-fake sim
# - avg of all other below-diagonal elements -> non-matching sim
real_real_sim = np.nanmean(np.diag(similarity_matrix)[:16])
fake_fake_sim = np.nanmean(np.diag(similarity_matrix)[16:])
real_fake_sim = np.nanmean(np.diag(similarity_matrix, k=-16))
# We want to get the avg of below-diagonal entries, except for a certain subdiagonal.
# Add them all up, subtract that subdiagonal, and divide by number of items
tril = similarity_matrix[np.triu_indices(similarity_matrix.shape[0], k=1)]
stripe = np.diag(similarity_matrix, k=-16)
nonmatch_sim = (np.nansum(tril) - np.nansum(stripe)) / (len(tril) - len(stripe))
return real_real_sim, fake_fake_sim, real_fake_sim, nonmatch_sim
def make_feature_similarity_plots(df, figs_dir, which_expt, title: str, gamma: Optional[float]):
# NOTE - For each fake double, we computed the median distance to matching real doubles
# This gives us the median of ~40 distances for each point.
# We have ~85K fake doubles total, each has 1 median.
# Then we have 50 independent runs. Here we average over all 50*85K items
output_dir = figs_dir / f"{which_expt}.similarity_matrices"
output_dir.mkdir(exist_ok=True)
print(f"Table describing feature similarity, for: {which_expt}")
print()
rows = []
print("group_name, real_to_real, fake_to_fake, real_to_fake, non_matching")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
scalar_sim_values = [summarize_similarity_matrix(m) for m in similarity_matrices]
real_reals = [s[0] for s in scalar_sim_values]
fake_fakes = [s[1] for s in scalar_sim_values]
real_fakes = [s[2] for s in scalar_sim_values]
nonmatches = [s[3] for s in scalar_sim_values]
real_to_real = f"{round(np.mean(real_reals), 2)} ± {round(np.std(real_reals), 2)}"
fake_to_fake = f"{round(np.mean(fake_fakes), 2)} ± {round(np.std(fake_fakes), 2)}"
real_to_fake = f"{round(np.mean(real_fakes), 2)} ± {round(np.std(real_fakes), 2)}"
nonmatch = f"{round(np.mean(nonmatches), 2)} ± {round(np.std(nonmatches), 2)}"
string = ", ".join([str(group_name), real_to_real, fake_to_fake, real_to_fake, nonmatch])
print(string)
rows.append(
{
"group_name": str(group_name),
"real_to_real": real_to_real,
"fake_to_fake": fake_to_fake,
"real_to_fake": real_to_fake,
"non_matching": nonmatch,
}
)
print()
# Save these summary statistics to a CSV so we can emit a latex table later
table_df = pd.DataFrame(rows)
table_df.to_csv(figs_dir / f"{title}.similarity_values.{gamma}.csv", index=False)
print(f"Figures with average similarity heatmap for each group, for: {which_expt}")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
ticktext = group["ticktext"].iloc[0]
avg_similarity_matrix = np.nanmean(similarity_matrices, 0)
fig = make_similarity_heatmap_plot(avg_similarity_matrix, ticktext)
filename = f"{which_expt}.{group_name.replace('<br>', '__')}.similarity_matrix.png"
fig.write_image(output_dir / filename, scale=2)
def main(figs_dir: Path, which_test: str, which_expt: str, suffix: str, gamma: Optional[float]):
logger.info(f"Saving figures to: {figs_dir}")
if suffix is None:
title = f"{which_test}.{which_expt}"
else:
title = f"{which_test}.{which_expt}.{suffix}"
logger.info(f"Loading data for: {title}")
df = pd.read_pickle(figs_dir / f"{title}.pkl")
# Add group name for convenient grouping later
logger.info("NOTE - not including encoder arch in group names (only used basic)")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
# Unify column naming from fine-tuning and fresh-classifier experiments
col_rename_map = {}
for subset in ["single", "double", "overall"]:
for scenario in ["augmented", "lower_bound", "upper_bound", "zero_shot"]:
col_rename_map[f"test_{scenario}/{subset}_bal_acc"] = f"{scenario}.{subset}_bal_acc"
df = df.rename(columns=col_rename_map)
# Make plots
make_confusion_matrices(df, figs_dir, which_test, title)
# make_boxplots(df, figs_dir, which_test, title)
# NOTE - this part will get re-run a few times, but it is fine
# (Because it doesn't depend on fine-tune vs fresh-classifier)
# As long as this script is run once for "regular" and once for "ablation", it is enough
df = pd.read_pickle(figs_dir / f"feature_similarity.{which_expt}.{gamma}.pkl")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
make_feature_similarity_plots(df, figs_dir, which_expt, title, gamma)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--figs_dir", default="figures")
parser.add_argument("--which_test", required=True, choices=["finetune", "fresh-classifier"])
parser.add_argument("--which_expt", required=True, choices=["regular", "ablation"])
parser.add_argument("--suffix", default=None) # e.g. "lda.None" or "logr.1000"
parser.add_argument("--gamma", default=None, type=float)
args = parser.parse_args()
if args.which_test == "fresh-classifier":
if args.suffix is None:
raise ValueError("Must specify suffix for fresh-classifier test") | figs_dir = PROJECT_PATH / args.figs_dir | 1 | 2023-11-01 21:12:05+00:00 | 4k |
alengwenus/ha-sma-ev-charger | custom_components/smaev/sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/smaev/const.py",
"snippet": "DOMAIN = \"smaev\""
},
{
"identifier": "SMAEV_COORDINATOR",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_COORDINATOR = \"coordinator\""
},
{
"identifier": "SMAEV_DEVICE_INFO",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_DEVICE_INFO = \"device_info\""
},
{
"identifier": "SMAEV_MEASUREMENT",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_MEASUREMENT = \"measurement\""
},
{
"identifier": "SMAEV_PARAMETER",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_PARAMETER = \"parameter\""
},
{
"identifier": "SMAEV_VALUE",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_VALUE = \"value\""
}
] | from dataclasses import dataclass, field
from typing import TYPE_CHECKING
from pysmaev.const import SmaEvChargerMeasurements
from pysmaev.helpers import get_measurements_channel, get_parameters_channel
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
EntityCategory,
UnitOfElectricCurrent,
UnitOfElectricPotential,
UnitOfEnergy,
UnitOfFrequency,
UnitOfPower,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
DOMAIN,
SMAEV_COORDINATOR,
SMAEV_DEVICE_INFO,
SMAEV_MEASUREMENT,
SMAEV_PARAMETER,
SMAEV_VALUE,
)
import logging | 1,706 | key=f"grid_current_phase_l{load}",
translation_key=f"grid_current_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.A.phs{phase}",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
*(
SmaEvChargerSensorEntityDescription(
key=f"grid_voltage_phase_l{load}",
translation_key=f"grid_voltage_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.PhV.phs{phase}",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
SmaEvChargerSensorEntityDescription(
key="grid_frequency",
translation_key="grid_frequency",
type=SMAEV_MEASUREMENT,
channel="Measurement.GridMs.Hz",
native_unit_of_measurement=UnitOfFrequency.HERTZ,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.FREQUENCY,
entity_registry_enabled_default=False,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_power",
translation_key="charging_station_power",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWIn.ChaSta",
native_unit_of_measurement=UnitOfPower.WATT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_meter_reading",
translation_key="charging_station_meter_reading",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWhIn.ChaSta",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_session_status",
translation_key="charging_session_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.ChaStt",
value_mapping={
SmaEvChargerMeasurements.NOT_CONNECTED: "not_connected",
SmaEvChargerMeasurements.SLEEP_MODE: "sleep_mode",
SmaEvChargerMeasurements.ACTIVE_MODE: "active_mode",
SmaEvChargerMeasurements.STATION_LOCKED: "station_locked",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="connected_vehicle_status",
translation_key="connected_vehicle_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_status",
translation_key="charging_station_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="mac_address",
translation_key="mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.MacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
SmaEvChargerSensorEntityDescription(
key="wifi_mac_address",
translation_key="wifi_mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.WlMacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger sensors."""
data = hass.data[DOMAIN][config_entry.entry_id]
| """Sensor platform for SMA EV Charger integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
@dataclass
class SmaEvChargerSensorEntityDescription(SensorEntityDescription):
"""Describes SMA EV Charger sensor entities."""
type: str = ""
channel: str = ""
value_mapping: dict = field(default_factory=dict)
SENSOR_DESCRIPTIONS: tuple[SmaEvChargerSensorEntityDescription, ...] = (
SmaEvChargerSensorEntityDescription(
key="charging_session_energy",
translation_key="charging_session_energy",
type=SMAEV_MEASUREMENT,
channel="Measurement.ChaSess.WhIn",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="position_of_rotary_switch",
translation_key="position_of_rotary_switch",
type=SMAEV_MEASUREMENT,
channel="Measurement.Chrg.ModSw",
value_mapping={
SmaEvChargerMeasurements.SMART_CHARGING: "smart_charging",
SmaEvChargerMeasurements.BOOST_CHARGING: "boost_charging",
},
entity_registry_enabled_default=True,
),
*(
SmaEvChargerSensorEntityDescription(
key=f"grid_current_phase_l{load}",
translation_key=f"grid_current_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.A.phs{phase}",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
*(
SmaEvChargerSensorEntityDescription(
key=f"grid_voltage_phase_l{load}",
translation_key=f"grid_voltage_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.PhV.phs{phase}",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
SmaEvChargerSensorEntityDescription(
key="grid_frequency",
translation_key="grid_frequency",
type=SMAEV_MEASUREMENT,
channel="Measurement.GridMs.Hz",
native_unit_of_measurement=UnitOfFrequency.HERTZ,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.FREQUENCY,
entity_registry_enabled_default=False,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_power",
translation_key="charging_station_power",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWIn.ChaSta",
native_unit_of_measurement=UnitOfPower.WATT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_meter_reading",
translation_key="charging_station_meter_reading",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWhIn.ChaSta",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_session_status",
translation_key="charging_session_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.ChaStt",
value_mapping={
SmaEvChargerMeasurements.NOT_CONNECTED: "not_connected",
SmaEvChargerMeasurements.SLEEP_MODE: "sleep_mode",
SmaEvChargerMeasurements.ACTIVE_MODE: "active_mode",
SmaEvChargerMeasurements.STATION_LOCKED: "station_locked",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="connected_vehicle_status",
translation_key="connected_vehicle_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_status",
translation_key="charging_station_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="mac_address",
translation_key="mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.MacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
SmaEvChargerSensorEntityDescription(
key="wifi_mac_address",
translation_key="wifi_mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.WlMacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger sensors."""
data = hass.data[DOMAIN][config_entry.entry_id]
| coordinator = data[SMAEV_COORDINATOR] | 1 | 2023-11-04 07:08:41+00:00 | 4k |
microsoft/promptbase | src/promptbase/bigbench/bigbench.py | [
{
"identifier": "process_cot",
"path": "src/promptbase/bigbench/bigbench_cot.py",
"snippet": "def process_cot(test_name: str, overwrite=False, api_type=\"chat\"):\n _logger.info(\"Starting process_cot\")\n if test_name == \"all\":\n subjects = BIGBENCH_SUBJECTS\n elif test_name in BIGBENCH_SUBJECTS:\n subjects = [test_name]\n else:\n _logger.error(f\"Invalid test name: {test_name}\")\n exit(1)\n\n bigbench_data_root = get_datasets_path() / \"BigBench\"\n cot_prompts_dir = bigbench_data_root / \"cot-prompts\"\n bbh_test_dir = bigbench_data_root / \"bbh\"\n generations_dir = get_generations_path()\n\n if not cot_prompts_dir.exists():\n _logger.error(f\"COT prompt directory {cot_prompts_dir} does not exist\")\n exit(1)\n elif not bbh_test_dir.exists():\n _logger.error(f\"BBH test directory {bbh_test_dir} does not exist\")\n exit(1)\n\n _logger.info(f\"Processing CoT for BigBench subjects: {subjects}\")\n\n threads = []\n for subject in subjects:\n bbh_test_path = bbh_test_dir / f\"{subject}.json\"\n cot_prompt_path = cot_prompts_dir / f\"{subject}.txt\"\n if not bbh_test_path.exists():\n _logger.error(f\"Data file {bbh_test_path} does not exist\")\n exit(1)\n elif not cot_prompt_path.exists():\n _logger.error(f\"COT prompt file {cot_prompt_path} does not exist\")\n exit(1)\n\n if api_type == \"completion\":\n _logger.info(f\"Starting completion thread for {bbh_test_path}\")\n results_path = generations_dir / \"bigbench\" / \"cot_results\" / \"completion\"\n if overwrite:\n cot_results_filename = results_path / f\"{subject}_completion_cot_results.json\"\n if cot_results_filename.exists():\n cot_results_filename.unlink()\n results_path.mkdir(parents=True, exist_ok=True)\n thread = threading.Thread(\n target=do_completion_cot,\n args=(bbh_test_path, cot_prompt_path, subject, results_path),\n )\n else:\n _logger.info(f\"Starting chat thread for {bbh_test_path}\")\n results_path = generations_dir / \"bigbench\" / \"cot_results\" / \"chat\"\n results_path.mkdir(parents=True, exist_ok=True)\n if overwrite:\n cot_results_filename = results_path / f\"{subject}_chat_cot_results.json\"\n if cot_results_filename.exists():\n cot_results_filename.unlink()\n thread = threading.Thread(\n target=do_chat_cot,\n args=(bbh_test_path, cot_prompt_path, subject, results_path),\n )\n threads.append(thread)\n thread.start()\n\n for thread in threads:\n thread.join()\n\n print(\"Done!\")"
},
{
"identifier": "score",
"path": "src/promptbase/bigbench/bigbench_score.py",
"snippet": "def score(api_type=\"chat\"):\n ground_truth_dir = get_datasets_path() / \"BigBench\" / \"bbh\"\n if not ground_truth_dir.exists():\n _logger.error(f\"Ground truth directory {ground_truth_dir} does not exist\")\n return\n answer_dir = get_generations_path() / \"bigbench\" / \"answers\" / api_type\n\n score_dict = {}\n\n # loop through json files in ground truth path\n for gt_filename in os.listdir(ground_truth_dir):\n if not gt_filename.endswith(\".json\"):\n _logger.warn(\"Skipping non-json file: \" + gt_filename)\n continue\n _logger.info(\"Processing file: \" + gt_filename)\n fname_base = gt_filename.split(\".\")[0]\n answer_path = answer_dir / f\"{fname_base}_{api_type}_answers.json\"\n if not os.path.exists(answer_path):\n _logger.warn(\"Answer file does not exist: %s\", answer_path)\n continue\n with open(ground_truth_dir / gt_filename) as f:\n ground_truth_data = json.load(f)\n with open(answer_path) as f:\n answer_data = json.load(f)\n\n _logger.info(\"Number of ground truth examples: %s\", str(len(ground_truth_data[\"examples\"])))\n _logger.info(\"Number of answer examples: %s\", str(len(answer_data)))\n if len(ground_truth_data[\"examples\"]) != len(answer_data):\n _logger.warn(\"Number of examples does not match for file: %s\", gt_filename)\n continue\n\n correct_count = 0\n total_count = len(ground_truth_data[\"examples\"])\n\n for i, gt in enumerate(ground_truth_data[\"examples\"]):\n if gt[\"target\"] == answer_data[i][\"completion\"]:\n correct_count += 1\n\n score_dict[fname_base] = {\n \"correct\": correct_count,\n \"total\": total_count,\n \"score\": correct_count / total_count,\n }\n\n total_correct = 0\n total_overall = 0\n for k, v in score_dict.items():\n total_correct += v[\"correct\"]\n total_overall += v[\"total\"]\n\n score_dict[\"overall\"] = {\n \"correct\": total_correct,\n \"total\": total_overall,\n \"score\": total_correct / total_overall,\n }\n\n print(\"Final scores:\", score_dict)\n\n # save as json file\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n score_dir = get_generations_path() / \"bigbench\" / \"scores\"\n score_dir.mkdir(parents=True, exist_ok=True)\n with open(score_dir / f\"bigbench_scores_{api_type}_{timestamp}.json\", \"w\") as f:\n json.dump(score_dict, f)"
},
{
"identifier": "process_answers",
"path": "src/promptbase/bigbench/bigbench_answer.py",
"snippet": "def process_answers(test_name: str, overwrite=False, api_type=\"chat\"):\n \"\"\"\n Processes chain-of-thought answers to produce the label in the expected format.\n \"\"\"\n if test_name == \"all\":\n subjects = few_shot_examples.keys()\n else:\n subjects = [test_name]\n\n _logger.info(f\"Processing answers for Bigbench subjects: {subjects}\")\n\n threads = []\n for subject in subjects:\n if api_type == \"chat\":\n thread = threading.Thread(\n target=process_chat_answers, args=(subject, overwrite)\n )\n else:\n thread = threading.Thread(\n target=process_completion_answers, args=(subject, overwrite)\n )\n threads.append(thread)\n thread.start()\n\n for thread in threads:\n thread.join()\n _logger.info(\"Done processing answers\")"
}
] | from .bigbench_cot import process_cot
from .bigbench_score import score
from .bigbench_answer import process_answers
from promptbase.bigbench.consts import BIGBENCH_SUBJECTS | 1,712 |
def generate(subject: str, overwrite: bool, mode="chat"):
if subject != "all" and subject not in BIGBENCH_SUBJECTS:
print(f"Invalid subject: {subject}")
return
print(f"Running BigBench generation for subject {subject}")
process_cot(subject, overwrite, mode)
process_answers(subject, overwrite, mode)
def evaluate(mode="chat"):
|
def generate(subject: str, overwrite: bool, mode="chat"):
if subject != "all" and subject not in BIGBENCH_SUBJECTS:
print(f"Invalid subject: {subject}")
return
print(f"Running BigBench generation for subject {subject}")
process_cot(subject, overwrite, mode)
process_answers(subject, overwrite, mode)
def evaluate(mode="chat"): | score(mode) | 1 | 2023-12-12 08:00:11+00:00 | 4k |
openai/weak-to-strong | train_weak_to_strong.py | [
{
"identifier": "get_tokenizer",
"path": "weak_to_strong/common.py",
"snippet": "def get_tokenizer(model_name: str):\n \"\"\"\n This function returns a tokenizer based on the model name.\n\n Parameters:\n model_name: The name of the model for which the tokenizer is needed.\n\n Returns:\n A tokenizer for the specified model.\n \"\"\"\n return AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)"
},
{
"identifier": "VALID_DATASETS",
"path": "weak_to_strong/datasets.py",
"snippet": "VALID_DATASETS: list[str] = list(_REGISTRY.keys())"
},
{
"identifier": "load_dataset",
"path": "weak_to_strong/datasets.py",
"snippet": "def load_dataset(ds_name: str, seed: int = 0, split_sizes: Optional[dict] = None):\n if split_sizes is None:\n split_sizes = dict(train=None, test=None)\n\n if ds_name not in _REGISTRY:\n raise ValueError(f\"Unknown dataset {ds_name}, please register\")\n cfg = _REGISTRY[ds_name]\n results = {}\n for split, n_docs in split_sizes.items():\n ds = cfg.loader(split)\n try:\n ds = ds.select(range(n_docs))\n except IndexError as e:\n print(f\"Warning {ds_name} has less than {n_docs} docs, using all: {e}\")\n ds = ds.map(functools.partial(cfg.formatter, rng=Random(seed)))\n ds = ds.map(\n lambda ex: {\"soft_label\": [1 - float(ex[\"hard_label\"]), float(ex[\"hard_label\"])]}\n )\n ds = ds.shuffle(seed=seed) # shuffling a bit pointless for test set but wtv\n results[split] = ds\n return results"
},
{
"identifier": "tokenize_dataset",
"path": "weak_to_strong/datasets.py",
"snippet": "def tokenize_dataset(\n raw_ds: HfDataset,\n tokenizer: Callable,\n max_ctx: int,\n):\n \"\"\"\n This function prepares the dataset for training. It takes the raw dataset, a formatting function,\n a tokenizer, a maximum context length\n\n Parameters:\n raw_ds: The raw dataset to be processed.\n tokenizer: The tokenizer to be used on the formatted dataset.\n max_ctx: The maximum context length for the tokenizer.\n\n Returns:\n ds: The processed and shuffled dataset ready for training.\n \"\"\"\n\n def process_function(res):\n toks = tokenizer(res[\"txt\"])\n return dict(\n input_ids=toks[\"input_ids\"],\n )\n\n ds = raw_ds.map(process_function, batched=False).filter(lambda x: len(x[\"input_ids\"]) < max_ctx)\n return ds"
},
{
"identifier": "logconf_loss_fn",
"path": "weak_to_strong/loss.py",
"snippet": "class logconf_loss_fn(LossFnBase):\n \"\"\"\n This class defines a custom loss function for log confidence.\n\n Attributes:\n aux_coef: A float indicating the auxiliary coefficient.\n warmup_frac: A float indicating the fraction of total training steps for warmup.\n \"\"\"\n\n def __init__(\n self,\n aux_coef: float = 0.5,\n warmup_frac: float = 0.1, # in terms of fraction of total training steps\n ):\n self.aux_coef = aux_coef\n self.warmup_frac = warmup_frac\n\n def __call__(\n self,\n logits: torch.Tensor,\n labels: torch.Tensor,\n step_frac: float,\n ) -> torch.Tensor:\n logits = logits.float()\n labels = labels.float()\n coef = 1.0 if step_frac > self.warmup_frac else step_frac\n coef = coef * self.aux_coef\n preds = torch.softmax(logits, dim=-1)\n mean_weak = torch.mean(labels, dim=0)\n assert mean_weak.shape == (2,)\n threshold = torch.quantile(preds[:, 0], mean_weak[1])\n strong_preds = torch.cat(\n [(preds[:, 0] >= threshold)[:, None], (preds[:, 0] < threshold)[:, None]],\n dim=1,\n )\n target = labels * (1 - coef) + strong_preds.detach() * coef\n loss = torch.nn.functional.cross_entropy(logits, target, reduction=\"none\")\n return loss.mean()"
},
{
"identifier": "product_loss_fn",
"path": "weak_to_strong/loss.py",
"snippet": "class product_loss_fn(LossFnBase):\n \"\"\"\n This class defines a custom loss function for product of predictions and labels.\n\n Attributes:\n alpha: A float indicating how much to weigh the weak model.\n beta: A float indicating how much to weigh the strong model.\n warmup_frac: A float indicating the fraction of total training steps for warmup.\n \"\"\"\n\n def __init__(\n self,\n alpha: float = 1.0, # how much to weigh the weak model\n beta: float = 1.0, # how much to weigh the strong model\n warmup_frac: float = 0.1, # in terms of fraction of total training steps\n ):\n self.alpha = alpha\n self.beta = beta\n self.warmup_frac = warmup_frac\n\n def __call__(\n self,\n logits: torch.Tensor,\n labels: torch.Tensor,\n step_frac: float,\n ) -> torch.Tensor:\n preds = torch.softmax(logits, dim=-1)\n target = torch.pow(preds, self.beta) * torch.pow(labels, self.alpha)\n target /= target.sum(dim=-1, keepdim=True)\n target = target.detach()\n loss = torch.nn.functional.cross_entropy(logits, target, reduction=\"none\")\n return loss.mean()"
},
{
"identifier": "xent_loss",
"path": "weak_to_strong/loss.py",
"snippet": "class xent_loss(LossFnBase):\n def __call__(\n self, logits: torch.Tensor, labels: torch.Tensor, step_frac: float\n ) -> torch.Tensor:\n \"\"\"\n This function calculates the cross entropy loss between logits and labels.\n\n Parameters:\n logits: The predicted values.\n labels: The actual values.\n step_frac: The fraction of total training steps completed.\n\n Returns:\n The mean of the cross entropy loss.\n \"\"\"\n loss = torch.nn.functional.cross_entropy(logits, labels)\n return loss.mean()"
},
{
"identifier": "ModelConfig",
"path": "weak_to_strong/train.py",
"snippet": "class ModelConfig:\n name: str\n default_lr: float\n eval_batch_size: int\n custom_kwargs: Optional[dict] = None\n gradient_checkpointing: bool = False\n model_parallel: bool = False\n default_optimizer: str = \"adam\""
},
{
"identifier": "train_and_save_model",
"path": "weak_to_strong/train.py",
"snippet": "def train_and_save_model(\n model_config: ModelConfig,\n train_ds: datasets.Dataset,\n test_ds: datasets.Dataset,\n inference_ds: Optional[datasets.Dataset] = None,\n *,\n batch_size: int,\n lr: float,\n epochs: int,\n eval_batch_size: Optional[int] = None,\n minibatch_size_per_device: Optional[int] = None,\n save_path: Optional[str] = None,\n loss_fn: Callable = xent_loss,\n label: str = \"default\",\n force_retrain: bool = False,\n train_with_dropout: bool = False,\n linear_probe: bool = False,\n lr_schedule: str = \"constant\",\n optimizer_name: str = \"adam\",\n eval_every: Optional[int] = None,\n):\n if eval_batch_size is None:\n eval_batch_size = batch_size\n\n if minibatch_size_per_device is None:\n minibatch_size_per_device = 1\n\n gradient_checkpointing = model_config.gradient_checkpointing\n custom_kwargs = model_config.custom_kwargs or {}\n\n def maybe_load_model(model):\n if os.path.exists(os.path.join(save_path, \"results.pkl\")) and not force_retrain:\n print(\"loading from\", save_path)\n checkpoint_path = os.path.join(save_path, \"pytorch_model.bin\")\n if not os.path.exists(checkpoint_path):\n # Assume this means we have a sharded checkpoint, and load it appropriately\n load_sharded_checkpoint(model, checkpoint_path)\n else:\n state_dict = torch.load(os.path.join(save_path, \"pytorch_model.bin\"))\n state_dict = {\n k.replace(\"transformer.module\", \"transformer\"): v\n for (k, v) in state_dict.items()\n }\n custom_kwargs[\"state_dict\"] = state_dict\n return True\n return False\n\n already_trained = False\n # Load the model\n if model_config.model_parallel:\n assert torch.cuda.device_count() > 1, f\"you might want more gpus for {model_config.name}\"\n model = TransformerWithHead.from_pretrained(\n model_config.name,\n num_labels=2,\n device_map=\"auto\",\n linear_probe=linear_probe,\n **custom_kwargs,\n )\n already_trained = maybe_load_model(model)\n # slight misnomer, more like minibatch_size_per_dp_replica\n minibatch_size = minibatch_size_per_device\n else:\n model = TransformerWithHead.from_pretrained(\n model_config.name, num_labels=2, linear_probe=linear_probe, **custom_kwargs\n ).to(\"cuda\")\n already_trained = maybe_load_model(model)\n # data parallel: currently not supported with model parallel\n if torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model, output_device=0)\n minibatch_size = min(minibatch_size_per_device * torch.cuda.device_count(), batch_size)\n print(\n \"Using\",\n torch.cuda.device_count(),\n \"GPUs, setting minibatch_size to\",\n minibatch_size,\n )\n else:\n minibatch_size = minibatch_size_per_device\n\n if already_trained:\n test_results = eval_model_acc(model, test_ds, eval_batch_size)\n else:\n start = time.time()\n test_results = train_model(\n model,\n train_ds,\n batch_size,\n lr=lr,\n epochs=epochs,\n eval_ds=test_ds,\n gradient_checkpointing=gradient_checkpointing,\n loss_fn=loss_fn,\n eval_batch_size=eval_batch_size,\n eval_every=eval_every,\n minibatch_size=minibatch_size,\n train_with_dropout=train_with_dropout,\n lr_schedule=lr_schedule,\n optimizer_name=optimizer_name,\n )\n print(\"Model training took\", time.time() - start, \"seconds\")\n if save_path:\n # Note: If the model is wrapped by DataParallel, we need to unwrap it before saving\n (model if hasattr(model, \"save_pretrained\") else model.module).save_pretrained(\n save_path\n )\n print(\"saved\", save_path)\n\n inference_results = None\n if inference_ds:\n inference_results = eval_model_acc(model, inference_ds, eval_batch_size)\n logger.logkv(\"inference_accuracy\", np.mean([r[\"acc\"] for r in inference_results]))\n\n if save_path:\n with open(os.path.join(save_path, \"results.pkl\"), \"wb\") as f:\n pickle.dump(\n {\n \"avg_acc_test\": float(np.mean([r[\"acc\"] for r in test_results])),\n \"avg_acc_inference\": float(\n np.mean([r[\"acc\"] for r in inference_results] if inference_results else [])\n ),\n \"test_results\": test_results,\n \"inference_results\": inference_results if inference_results else [],\n },\n f,\n )\n # try to clean up memory\n clear_mem()\n logger.shutdown()\n\n return test_results, inference_results"
}
] | import json
import os
import fire
import numpy as np
import torch
import weak_to_strong.logger as logger
from typing import Dict, List, Optional, Sequence, Union
from weak_to_strong.common import get_tokenizer
from weak_to_strong.datasets import (VALID_DATASETS, load_dataset,
tokenize_dataset)
from weak_to_strong.loss import logconf_loss_fn, product_loss_fn, xent_loss
from weak_to_strong.train import ModelConfig, train_and_save_model | 3,459 |
# NOTE learning rates are not particularly tuned, work somewhat reasonably at train batch size 32
MODEL_CONFIGS = [
ModelConfig(
name="gpt2",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-medium",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-large",
default_lr=1e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-xl",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = {
|
# NOTE learning rates are not particularly tuned, work somewhat reasonably at train batch size 32
MODEL_CONFIGS = [
ModelConfig(
name="gpt2",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-medium",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-large",
default_lr=1e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-xl",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = { | "logconf": logconf_loss_fn(), | 4 | 2023-12-13 23:53:13+00:00 | 4k |
linyiLYi/voice-assistant | whisper/whisper.py | [
{
"identifier": "decode",
"path": "whisper/decoding.py",
"snippet": "def decode(\n model: \"Whisper\",\n mel: mx.array,\n options: DecodingOptions = DecodingOptions(),\n **kwargs,\n) -> Union[DecodingResult, List[DecodingResult]]:\n \"\"\"\n Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).\n\n Parameters\n ----------\n model: Whisper\n the Whisper model instance\n\n mel: mx.array, shape = (80, 3000) or (*, 80, 3000)\n An array containing the Mel spectrogram(s)\n\n options: DecodingOptions\n A dataclass that contains all necessary options for decoding 30-second segments\n\n Returns\n -------\n result: Union[DecodingResult, List[DecodingResult]]\n The result(s) of decoding contained in `DecodingResult` dataclass instance(s)\n \"\"\"\n if single := mel.ndim == 2:\n mel = mel[None]\n\n if kwargs:\n options = replace(options, **kwargs)\n\n result = DecodingTask(model, options).run(mel)\n return result[0] if single else result"
},
{
"identifier": "detect_language",
"path": "whisper/decoding.py",
"snippet": "def detect_language(\n model: \"Whisper\", mel: mx.array, tokenizer: Tokenizer = None\n) -> Tuple[mx.array, List[dict]]:\n \"\"\"\n Detect the spoken language in the audio, and return them as list of strings, along with the ids\n of the most probable language tokens and the probability distribution over all language tokens.\n This is performed outside the main decode loop in order to not interfere with kv-caching.\n\n Returns\n -------\n language_tokens : mx.array, shape = (n_audio,)\n ids of the most probable language tokens, which appears after the startoftranscript token.\n language_probs : List[Dict[str, float]], length = n_audio\n list of dictionaries containing the probability distribution over all languages.\n \"\"\"\n if tokenizer is None:\n tokenizer = get_tokenizer(\n model.is_multilingual, num_languages=model.num_languages\n )\n if (\n tokenizer.language is None\n or tokenizer.language_token not in tokenizer.sot_sequence\n ):\n raise ValueError(\n \"This model doesn't have language tokens so it can't perform lang id\"\n )\n\n single = mel.ndim == 2\n if single:\n mel = mel[None]\n\n # skip encoder forward pass if already-encoded audio features were given\n if mel.shape[-2:] != [model.dims.n_audio_ctx, model.dims.n_audio_state]:\n mel = model.encoder(mel)\n\n # forward pass using a single token, startoftranscript\n n_audio = mel.shape[0]\n x = mx.array([[tokenizer.sot]] * n_audio) # [n_audio, 1]\n logits = model.logits(x, mel)[:, 0]\n\n # collect detected languages; suppress all non-language tokens\n mask = np.full(logits.shape[-1], -np.inf, dtype=np.float32)\n mask[list(tokenizer.all_language_tokens)] = 0.0\n logits += mx.array(mask)\n language_tokens = mx.argmax(logits, axis=-1)\n language_token_probs = mx.softmax(logits, axis=-1)\n language_probs = [\n {\n c: language_token_probs[i, j].item()\n for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)\n }\n for i in range(n_audio)\n ]\n\n if single:\n language_tokens = language_tokens[0]\n language_probs = language_probs[0]\n\n return language_tokens, language_probs"
}
] | import base64
import gzip
import math
import mlx.core as mx
import mlx.nn as nn
import numpy as np
from dataclasses import dataclass
from typing import Union
from .decoding import decode as decode_function
from .decoding import detect_language as detect_language_function | 3,173 | x, _, _ = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Module):
def __init__(
self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int,
dtype: mx.Dtype = mx.float16,
):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = mx.zeros((n_ctx, n_state))
self.blocks = [
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
]
self.ln = LayerNorm(n_state)
self._mask = nn.MultiHeadAttention.create_additive_causal_mask(n_ctx).astype(
dtype
)
def __call__(self, x, xa, kv_cache=None):
"""
x : mx.array, shape = (batch_size, <= n_ctx)
the text tokens
xa : mx.array, shape = (batch_size, n_audio_ctx, n_audio_state)
the encoded audio features to be attended on
"""
offset = kv_cache[0][0][0].shape[1] if kv_cache else 0
x = (
self.token_embedding(x)
+ self.positional_embedding[offset : offset + x.shape[-1]]
)
if kv_cache is None:
kv_cache = [None] * len(self.blocks)
cross_qk = [None] * len(self.blocks)
for e, block in enumerate(self.blocks):
x, kv_cache[e], cross_qk[e] = block(
x, xa, mask=self._mask, kv_cache=kv_cache[e]
)
x = self.ln(x)
return x @ self.token_embedding.weight.T, kv_cache, cross_qk
class Whisper(nn.Module):
def __init__(self, dims: ModelDimensions, dtype: mx.Dtype = mx.float16):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer,
dtype,
)
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer,
dtype,
)
# use the last half among the decoder layers for time alignment by default;
# to use a specific set of heads, see `set_alignment_heads()` below.
all_heads = np.zeros(
(self.dims.n_text_layer, self.dims.n_text_head), dtype=bool
)
all_heads[self.dims.n_text_layer // 2 :] = True
self.alignment_heads = mx.array(np.asarray(all_heads.nonzero()).T)
def set_alignment_heads(self, dump: Union[bytes, np.ndarray]):
if isinstance(dump, np.ndarray):
self.alignment_heads = mx.array(dump)
elif isinstance(dump, bytes):
array = np.frombuffer(
gzip.decompress(base64.b85decode(dump)), dtype=bool
).copy()
mask = array.reshape(self.dims.n_text_layer, self.dims.n_text_head)
self.alignment_heads = mx.array(np.asarray(mask.nonzero()).T)
else:
raise ValueError(
f"Invalid type for `dump`: {type(dump)}. Expected a np.ndarray or base85-encoded bytes containing"
" alignment_head information"
)
def embed_audio(self, mel):
return self.encoder(mel)
def logits(self, tokens, audio_features):
return self.decoder(tokens, audio_features)[0]
def forward_with_cross_qk(self, mel, tokens):
logits, _, cross_qk = self.decoder(tokens, self.encoder(mel))
return logits, cross_qk
def __call__(self, mel, tokens):
return self.decoder(tokens, self.encoder(mel))[0]
@property
def is_multilingual(self):
return self.dims.n_vocab >= 51865
@property
def num_languages(self):
return self.dims.n_vocab - 51765 - int(self.is_multilingual)
detect_language = detect_language_function
| # Copyright © 2023 Apple Inc.
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = math.log(max_timescale) / (channels // 2 - 1)
inv_timescales = mx.exp(-log_timescale_increment * mx.arange(channels // 2))
scaled_time = mx.arange(length)[:, None] * inv_timescales[None, :]
return mx.concatenate([mx.sin(scaled_time), mx.cos(scaled_time)], axis=1)
class LayerNorm(nn.LayerNorm):
def __call__(self, x: mx.array) -> mx.array:
return super().__call__(x.astype(mx.float32)).astype(x.dtype)
class MultiHeadAttention(nn.Module):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = nn.Linear(n_state, n_state)
self.key = nn.Linear(n_state, n_state, bias=False)
self.value = nn.Linear(n_state, n_state)
self.out = nn.Linear(n_state, n_state)
def __call__(
self,
x,
xa=None,
mask=None,
kv_cache=None,
):
q = self.query(x)
if xa is None:
k = self.key(x)
v = self.value(x)
if kv_cache is not None:
k = mx.concatenate([kv_cache[0], k], axis=1)
v = mx.concatenate([kv_cache[1], v], axis=1)
elif kv_cache is None:
k = self.key(xa)
v = self.value(xa)
else:
k, v = kv_cache
wv, qk = self.qkv_attention(q, k, v, mask)
return self.out(wv), (k, v), qk
def qkv_attention(self, q, k, v, mask=None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head) ** -0.25
q = q.reshape(*q.shape[:2], self.n_head, -1).transpose(0, 2, 1, 3) * scale
k = k.reshape(*k.shape[:2], self.n_head, -1).transpose(0, 2, 3, 1) * scale
v = v.reshape(*v.shape[:2], self.n_head, -1).transpose(0, 2, 1, 3)
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
qk = qk.astype(mx.float32)
w = mx.softmax(qk, axis=-1).astype(q.dtype)
out = (w @ v).transpose(0, 2, 1, 3)
out = out.reshape(n_batch, n_ctx, n_state)
return out, qk
class ResidualAttentionBlock(nn.Module):
def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = (
MultiHeadAttention(n_state, n_head) if cross_attention else None
)
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp1 = nn.Linear(n_state, n_mlp)
self.mlp2 = nn.Linear(n_mlp, n_state)
self.mlp_ln = LayerNorm(n_state)
def __call__(self, x, xa=None, mask=None, kv_cache=None):
kv, cross_kv = kv_cache if kv_cache else (None, None)
y, kv, _ = self.attn(self.attn_ln(x), mask=mask, kv_cache=kv)
x += y
cross_qk = None
if self.cross_attn:
y, cross_kv, cross_qk = self.cross_attn(
self.cross_attn_ln(x), xa, kv_cache=cross_kv
)
x += y
x = x + self.mlp2(nn.gelu(self.mlp1(self.mlp_ln(x))).astype(x.dtype))
return x, (kv, cross_kv), cross_qk
class AudioEncoder(nn.Module):
def __init__(
self,
n_mels: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int,
dtype: mx.Dtype = mx.float16,
):
super().__init__()
self.conv1 = nn.Conv1d(n_mels, n_state, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
self._positional_embedding = sinusoids(n_ctx, n_state).astype(dtype)
self.blocks = [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
self.ln_post = LayerNorm(n_state)
def __call__(self, x):
x = nn.gelu(self.conv1(x)).astype(x.dtype)
x = nn.gelu(self.conv2(x)).astype(x.dtype)
assert x.shape[1:] == self._positional_embedding.shape, "incorrect audio shape"
x = x + self._positional_embedding
for block in self.blocks:
x, _, _ = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Module):
def __init__(
self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int,
dtype: mx.Dtype = mx.float16,
):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = mx.zeros((n_ctx, n_state))
self.blocks = [
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
]
self.ln = LayerNorm(n_state)
self._mask = nn.MultiHeadAttention.create_additive_causal_mask(n_ctx).astype(
dtype
)
def __call__(self, x, xa, kv_cache=None):
"""
x : mx.array, shape = (batch_size, <= n_ctx)
the text tokens
xa : mx.array, shape = (batch_size, n_audio_ctx, n_audio_state)
the encoded audio features to be attended on
"""
offset = kv_cache[0][0][0].shape[1] if kv_cache else 0
x = (
self.token_embedding(x)
+ self.positional_embedding[offset : offset + x.shape[-1]]
)
if kv_cache is None:
kv_cache = [None] * len(self.blocks)
cross_qk = [None] * len(self.blocks)
for e, block in enumerate(self.blocks):
x, kv_cache[e], cross_qk[e] = block(
x, xa, mask=self._mask, kv_cache=kv_cache[e]
)
x = self.ln(x)
return x @ self.token_embedding.weight.T, kv_cache, cross_qk
class Whisper(nn.Module):
def __init__(self, dims: ModelDimensions, dtype: mx.Dtype = mx.float16):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer,
dtype,
)
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer,
dtype,
)
# use the last half among the decoder layers for time alignment by default;
# to use a specific set of heads, see `set_alignment_heads()` below.
all_heads = np.zeros(
(self.dims.n_text_layer, self.dims.n_text_head), dtype=bool
)
all_heads[self.dims.n_text_layer // 2 :] = True
self.alignment_heads = mx.array(np.asarray(all_heads.nonzero()).T)
def set_alignment_heads(self, dump: Union[bytes, np.ndarray]):
if isinstance(dump, np.ndarray):
self.alignment_heads = mx.array(dump)
elif isinstance(dump, bytes):
array = np.frombuffer(
gzip.decompress(base64.b85decode(dump)), dtype=bool
).copy()
mask = array.reshape(self.dims.n_text_layer, self.dims.n_text_head)
self.alignment_heads = mx.array(np.asarray(mask.nonzero()).T)
else:
raise ValueError(
f"Invalid type for `dump`: {type(dump)}. Expected a np.ndarray or base85-encoded bytes containing"
" alignment_head information"
)
def embed_audio(self, mel):
return self.encoder(mel)
def logits(self, tokens, audio_features):
return self.decoder(tokens, audio_features)[0]
def forward_with_cross_qk(self, mel, tokens):
logits, _, cross_qk = self.decoder(tokens, self.encoder(mel))
return logits, cross_qk
def __call__(self, mel, tokens):
return self.decoder(tokens, self.encoder(mel))[0]
@property
def is_multilingual(self):
return self.dims.n_vocab >= 51865
@property
def num_languages(self):
return self.dims.n_vocab - 51765 - int(self.is_multilingual)
detect_language = detect_language_function | decode = decode_function | 12 | 2023-12-09 13:33:46+00:00 | 4k |
SqueezeAILab/LLMCompiler | configs/movie_react/tools.py | [
{
"identifier": "Tool",
"path": "src/agents/tools.py",
"snippet": "class InvalidTool(BaseTool):\n def _run(\n self,\n requested_tool_name: str,\n available_tool_names: List[str],\n run_manager: Optional[CallbackManagerForToolRun] = None,\n ) -> str:\n async def _arun(\n self,\n requested_tool_name: str,\n available_tool_names: List[str],\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n ) -> str:"
},
{
"identifier": "DocstoreExplorer",
"path": "src/docstore/wikipedia.py",
"snippet": "class DocstoreExplorer:\n \"\"\"Class to assist with exploration of a document store.\"\"\"\n\n def __init__(self, docstore: ReActWikipedia, char_limit=None, one_sentence=False):\n \"\"\"Initialize with a docstore, and set initial document to None.\"\"\"\n self.docstore = docstore\n self.document: Optional[Document] = None\n self.lookup_str = \"\"\n self.lookup_index = 0\n self.char_limit = char_limit\n self.one_sentence = one_sentence\n\n def search(self, term: str) -> str:\n \"\"\"Search for a term in the docstore, and if found save.\"\"\"\n result = self.docstore.search(term)\n if self.one_sentence:\n result = result.split(\". \")[0]\n if self.char_limit is not None:\n result = result[: self.char_limit]\n if isinstance(result, Document):\n self.document = result\n return self._summary\n else:\n self.document = None\n return result\n\n async def asearch(self, term: str) -> str:\n \"\"\"Search for a term in the docstore, and if found save.\"\"\"\n result = await self.docstore.asearch(term)\n if self.one_sentence:\n result = result.split(\". \")[0]\n if self.char_limit is not None:\n result = result[: self.char_limit]\n if isinstance(result, Document):\n self.document = result\n return self._summary\n else:\n self.document = None\n return result\n\n def lookup(self, term: str) -> str:\n \"\"\"Lookup a term in document (if saved).\"\"\"\n if self.document is None:\n raise ValueError(\"Cannot lookup without a successful search first\")\n if term.lower() != self.lookup_str:\n self.lookup_str = term.lower()\n self.lookup_index = 0\n else:\n self.lookup_index += 1\n lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]\n if len(lookups) == 0:\n return \"No Results\"\n elif self.lookup_index >= len(lookups):\n return \"No More Results\"\n else:\n result_prefix = f\"(Result {self.lookup_index + 1}/{len(lookups)})\"\n return f\"{result_prefix} {lookups[self.lookup_index]}\"\n\n @property\n def _summary(self) -> str:\n return self._paragraphs[0]\n\n @property\n def _paragraphs(self) -> List[str]:\n if self.document is None:\n raise ValueError(\"Cannot get paragraphs without a document\")\n return self.document.page_content.split(\"\\n\\n\")"
},
{
"identifier": "ReActWikipedia",
"path": "src/docstore/wikipedia.py",
"snippet": "class ReActWikipedia(Docstore):\n \"\"\"Wrapper around wikipedia API.\"\"\"\n\n def __init__(self, benchmark=False, skip_retry_when_postprocess=False) -> None:\n \"\"\"Check that wikipedia package is installed.\"\"\"\n try:\n import requests\n from bs4 import BeautifulSoup\n except ImportError:\n raise ImportError(\n \"Could not import wikipedia python package. \"\n \"Please install it with `pip install wikipedia`.\"\n )\n self.page = None\n self.lookup_keyword = None\n self.lookup_list = None\n self.lookup_cnt = None\n\n self.benchmark = benchmark\n self.all_times = []\n\n # when True, always skip retry when postprocess\n self.skip_retry_when_postprocess = skip_retry_when_postprocess\n\n def reset(self):\n self.all_times = []\n\n def get_stats(self):\n return {\n \"all_times\": self.all_times,\n }\n\n @staticmethod\n def _get_page_obs(page):\n # find all paragraphs\n paragraphs = page.split(\"\\n\")\n paragraphs = [p.strip() for p in paragraphs if p.strip()]\n\n # find all sentence\n sentences = []\n for p in paragraphs:\n sentences += p.split(\". \")\n sentences = [s.strip() + \".\" for s in sentences if s.strip()]\n return \" \".join(sentences[:5])\n\n def _get_alternative(self, result: str) -> str:\n parsed_alternatives = result.split(\"Similar: \")[1][:-1]\n\n alternatives = ast.literal_eval(parsed_alternatives)\n alternative = alternatives[0]\n for alt in alternatives:\n if \"film\" in alt or \"movie\" in alt:\n alternative = alt\n break\n return alternative\n\n def post_process(\n self, response_text: str, entity: str, skip_retry_when_postprocess: bool = False\n ) -> str:\n soup = BeautifulSoup(response_text, features=\"html.parser\")\n result_divs = soup.find_all(\"div\", {\"class\": \"mw-search-result-heading\"})\n\n if result_divs: # mismatch\n self.result_titles = [\n clean_str(div.get_text().strip()) for div in result_divs\n ]\n obs = f\"Could not find {entity}. Similar: {self.result_titles[:5]}.\"\n else:\n page = [\n p.get_text().strip() for p in soup.find_all(\"p\") + soup.find_all(\"ul\")\n ]\n if any(\"may refer to:\" in p for p in page):\n if skip_retry_when_postprocess or self.skip_retry_when_postprocess:\n obs = \"Could not find \" + entity + \".\"\n else:\n obs = self.search(\"[\" + entity + \"]\", is_retry=True)\n else:\n self.page = \"\"\n for p in page:\n if len(p.split(\" \")) > 2:\n self.page += clean_str(p)\n if not p.endswith(\"\\n\"):\n self.page += \"\\n\"\n obs = self._get_page_obs(self.page)\n self.lookup_keyword = self.lookup_list = self.lookup_cnt = None\n\n obs = obs.replace(\"\\\\n\", \"\")\n return obs\n\n async def apost_process(\n self, response_text: str, entity: str, skip_retry_when_postprocess: bool = False\n ) -> str:\n soup = BeautifulSoup(response_text, features=\"html.parser\")\n result_divs = soup.find_all(\"div\", {\"class\": \"mw-search-result-heading\"})\n\n if result_divs: # mismatch\n self.result_titles = [\n clean_str(div.get_text().strip()) for div in result_divs\n ]\n obs = f\"Could not find {entity}. Similar: {self.result_titles[:5]}.\"\n else:\n page = [\n p.get_text().strip() for p in soup.find_all(\"p\") + soup.find_all(\"ul\")\n ]\n if any(\"may refer to:\" in p for p in page):\n if skip_retry_when_postprocess or self.skip_retry_when_postprocess:\n obs = \"Could not find \" + entity + \".\"\n else:\n obs = await self.asearch(\"[\" + entity + \"]\", is_retry=True)\n else:\n self.page = \"\"\n for p in page:\n if len(p.split(\" \")) > 2:\n self.page += clean_str(p)\n if not p.endswith(\"\\n\"):\n self.page += \"\\n\"\n obs = self._get_page_obs(self.page)\n self.lookup_keyword = self.lookup_list = self.lookup_cnt = None\n\n obs = obs.replace(\"\\\\n\", \"\")\n return obs\n\n def search(self, entity: str, is_retry: bool = False) -> Union[str, Document]:\n \"\"\"Try to search for wiki page.\n\n If page exists, return the page summary, and a PageWithLookups object.\n If page does not exist, return similar entries.\n\n Args:\n entity: entity string.\n\n Returns: a Document object or error message.\n \"\"\"\n s = time.time()\n entity = str(entity)\n entity_ = entity.replace(\" \", \"+\")\n search_url = f\"https://en.wikipedia.org/w/index.php?search={entity_}\"\n response_text = requests.get(search_url).text\n\n result = self.post_process(response_text, entity)\n\n if \"Similar:\" in result:\n alternative = self._get_alternative(result)\n entity_ = alternative.replace(\" \", \"+\")\n search_url = f\"https://en.wikipedia.org/w/index.php?search={entity_}\"\n response_text = requests.get(search_url).text\n\n result = self.post_process(\n response_text, entity, skip_retry_when_postprocess=True\n )\n\n if \"Similar:\" in result:\n result = \"Could not find \" + entity + \".\"\n\n if self.benchmark and not is_retry:\n # we only benchmark the outermost call\n self.all_times.append(round(time.time() - s, 2))\n\n return result\n\n async def asearch(\n self, entity: str, is_retry: bool = False\n ) -> Union[str, Document]:\n \"\"\"Try to search for wiki page.\n\n If page exists, return the page summary, and a PageWithLookups object.\n If page does not exist, return similar entries.\n\n Args:\n entity: entity string.\n\n Returns: a Document object or error message.\n \"\"\"\n s = time.time()\n entity = str(entity)\n entity_ = entity.replace(\" \", \"+\")\n search_url = f\"https://en.wikipedia.org/w/index.php?search={entity_}\"\n\n async with aiohttp.ClientSession() as session:\n async with session.get(search_url) as response:\n response_text = await response.text()\n\n result = await self.apost_process(response_text, entity)\n\n if \"Similar:\" in result:\n alternative = self._get_alternative(result)\n entity_ = alternative.replace(\" \", \"+\")\n search_url = f\"https://en.wikipedia.org/w/index.php?search={entity_}\"\n async with aiohttp.ClientSession() as session:\n async with session.get(search_url) as response:\n response_text = await response.text()\n\n result = await self.apost_process(\n response_text, entity, skip_retry_when_postprocess=True\n )\n\n if \"Similar:\" in result:\n return \"Could not find \" + entity + \".\"\n\n if self.benchmark and not is_retry:\n # we only benchmark the outermost call\n self.all_times.append(round(time.time() - s, 2))\n\n return result"
}
] | from src.agents.tools import Tool
from src.docstore.wikipedia import DocstoreExplorer, ReActWikipedia | 2,638 |
web_searcher = ReActWikipedia()
docstore = DocstoreExplorer(web_searcher)
tools = [
|
web_searcher = ReActWikipedia()
docstore = DocstoreExplorer(web_searcher)
tools = [ | Tool( | 0 | 2023-12-06 21:12:54+00:00 | 4k |
open-compass/MixtralKit | mixtralkit/layers/attention.py | [
{
"identifier": "ModelArgs",
"path": "mixtralkit/layers/utils.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n ffn_dim_multiplier: Optional[float] = None\n norm_eps: float = 1e-5\n\n max_batch_size: int = 32\n max_seq_len: int = 2048"
},
{
"identifier": "repeat_kv",
"path": "mixtralkit/layers/utils.py",
"snippet": "def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:\n \"\"\"torch.repeat_interleave(x, dim=2, repeats=n_rep)\"\"\"\n bs, slen, n_kv_heads, head_dim = x.shape\n if n_rep == 1:\n return x\n return (\n x[:, :, :, None, :]\n .expand(bs, slen, n_kv_heads, n_rep, head_dim)\n .reshape(bs, slen, n_kv_heads * n_rep, head_dim)\n )"
},
{
"identifier": "apply_rotary_emb",
"path": "mixtralkit/layers/position_embeding.py",
"snippet": "def apply_rotary_emb(\n xq: torch.Tensor,\n xk: torch.Tensor,\n freqs_cis: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Apply rotary embeddings to input tensors using the given frequency tensor.\n\n This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided\n frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor\n is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are\n returned as real tensors.\n\n Args:\n xq (torch.Tensor): Query tensor to apply rotary embeddings.\n xk (torch.Tensor): Key tensor to apply rotary embeddings.\n freqs_cis (torch.Tensor): Precomputed frequency tensor for complex exponentials.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.\n\n \n\n \"\"\"\n xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))\n xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))\n freqs_cis = reshape_for_broadcast(freqs_cis, xq_)\n xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)\n xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)\n return xq_out.type_as(xq), xk_out.type_as(xk)"
}
] | import math
import torch
import torch.nn.functional as F
import fairscale.nn.model_parallel.initialize as fs_init
from typing import Optional, Tuple
from torch import nn
from .utils import ModelArgs, repeat_kv
from .position_embeding import apply_rotary_emb
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
) | 1,631 | # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads
| # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads | keys = repeat_kv(keys, self.n_rep) # (bs, cache_len + seqlen, n_local_heads, head_dim) | 1 | 2023-12-09 15:05:26+00:00 | 4k |
aymenfurter/microagents | agents/agent_evaluation.py | [
{
"identifier": "OpenAIAPIWrapper",
"path": "integrations/openaiwrapper.py",
"snippet": "class OpenAIAPIWrapper:\n \"\"\"\n A wrapper class for OpenAI's API.\n \"\"\"\n\n def __init__(self, api_key, timeout=10):\n \"\"\"\n Initializes the OpenAIAPIWrapper instance.\n\n :param api_key: The API key for OpenAI.\n :param timeout: The timeout duration in seconds for API requests.\n \"\"\"\n self.api_key = api_key\n openai.api_key = api_key\n if API_BASE is not None:\n logging.debug(\"Accessing OPENAI at %s\" % API_BASE)\n openai.api_base = API_BASE\n self.timeout = timeout\n\n @memoize_to_sqlite(func_name=\"get_embedding\", filename=\"openai_embedding_cache.db\")\n def get_embedding(self, text):\n \"\"\"\n Retrieves the embedding for the given text.\n\n :param text: The text for which embedding is required.\n :return: The embedding for the given text.\n \"\"\"\n start_time = time.time()\n retries = 0\n\n while time.time() - start_time < self.timeout:\n try:\n return openai.Embedding.create(input=text, engine=ENGINE)\n except openai.error.OpenAIError as e:\n logging.error(f\"OpenAI API error: {e}\")\n retries += 1\n if retries >= MAX_RETRIES:\n raise\n time.sleep(RETRY_SLEEP_DURATION)\n\n if f\"{e}\".startswith(\"Rate limit\"):\n print(\"Rate limit reached... sleeping for 20 seconds\")\n start_time+=20\n time.sleep(20)\n raise TimeoutError(\"API call timed out\")\n\n def chat_completion(self, **kwargs):\n \"\"\"\n Generates a chat completion using OpenAI's API.\n\n :param kwargs: Keyword arguments for the chat completion API call.\n :return: The result of the chat completion API call.\n \"\"\"\n\n if 'model' not in kwargs:\n kwargs['model']=MODEL\n\n start_time = time.time()\n retries = 0\n\n while time.time() - start_time < self.timeout:\n try:\n res=openai.ChatCompletion.create(**kwargs)\n if isinstance(res, dict):\n if isinstance(res['choices'][0], dict):\n return res['choices'][0]['message']['content'].strip()\n return res['choices'][0].message['content'].strip()\n return res.choices[0].message['content'].strip()\n except openai.error.OpenAIError as e:\n logging.error(f\"OpenAI API error: {e}\")\n retries += 1\n if retries >= MAX_RETRIES:\n raise\n time.sleep(RETRY_SLEEP_DURATION)\n\n if f\"{e}\".startswith(\"Rate limit\"):\n print(\"Rate limit reached... sleeping for 20 seconds\")\n start_time+=20\n time.sleep(20)\n raise TimeoutError(\"API call timed out\")"
},
{
"identifier": "AGENT_EVALUATION_PROMPT",
"path": "prompt_management/prompts.py",
"snippet": "AGENT_EVALUATION_PROMPT = (\n \"Please rate the accuracy and completion of the task based on the following criteria. If the prompt contains values like YOUR_VALID_API_KEY, example.com, INSERT_YOUR_KEY, INSERT_YOUR_PASSWORD, you must always return 0.\\n\"\n \"If the system prompt contains example code that contain URLs, those URLs must exist and be accessible. \"\n \"Rating Scale:\\n\"\n \"1 - The output is irrelevant or the code execution failed.\\n\"\n \"2 - The output is partially relevant but significantly inaccurate or incomplete.\\n\"\n \"3 - The output is relevant but has noticeable inaccuracies or is partially incomplete.\\n\"\n \"4 - The output is mostly accurate and complete, with minor issues.\\n\"\n \"5 - The output is completely accurate and fully addresses the task.\\n\\n\"\n \"Examples for Reference:\\n\"\n \"Example 1:\\n\"\n \"- System Prompt: 'You are an export math expert, who can calculate any numbers quickly.'\\n\"\n \"- Input: '5, 10'\\n\"\n \"- LLM Output: The execution of the python code failed, however the correct answer is assumed to be 15.\\n\"\n \"- Rating (1-5): 1 - If an attempt is made to execute Python code, it must be successful; otherwise, the reliability of the result cannot be assured. \\n\\n\"\n \"Example 2:\\n\"\n \"- System Prompt: 'You are an agent specialized in converting temperature from Celsius to Fahrenheit.'\\n\"\n \"- Input: '30 degrees, Celsius to Fahrenheit'\\n\"\n \"- LLM Output: 'Following consultation with another agent, the provided response was 86 degrees Fahrenheit'\\n\"\n \"- Rating (1-5): 5\\n\\n\"\n \"Example 3:\\n\"\n \"- System Prompt: 'You are an expert in Shakespeare.'\\n\"\n \"- Input: 'Can you name some plays by Shakespeare?'\\n\"\n \"- LLM Output: They for Sudden Joy Did Weep, The Wind and the Rain, Elton John - Can You Feel the Love Tonight \\n\"\n \"- Rating (1-5): 2 (partially complete, relevant but with noticeable incompleteness)\\n\\n\"\n \"Example 4:\\n\"\n \"- System Prompt: 'You are an export math expert, who can calculate any numbers quickly.'\\n\"\n \"- Input: '10, 15'\\n\"\n \"- LLM Output: After successful execution of python code, the correct answer is 25.\\n\"\n \"- Rating (1-5): 5\\n\\n\"\n \"Example 5:\\n\"\n \"- System Prompt: 'You are an export in crawling the weather.com. Use the following sample code: ```python weather_data = requests.get('https://api.weatherapi.com/v1/current.json?key=YOUR_API_KEY&q=LOCATION').json()'\\n\"\n \"- Input: 'Switzerland'\\n\"\n \"- LLM Output: The weather API could not be reached. Synthetic data was returned instead, 25°, sunny.\\n\"\n \"- Rating (1-5): 1\\n\\n\"\n \"Please select a rating between 1 and 5 based on these criteria and examples, ONLY PRINT THE NUMBER:\"\n \"- System Prompt: '{prompt}'\\n\"\n \"- Input: '{input}'\\n\"\n \"- LLM Output: '{output}'\\n\\n\"\n \"- Rating (1-5):\"\n )"
}
] | import logging
from integrations.openaiwrapper import OpenAIAPIWrapper
from prompt_management.prompts import AGENT_EVALUATION_PROMPT | 1,626 | # Basic logging setup
logger = logging.getLogger()
class AgentEvaluator:
"""
Evaluates AI agent's responses using OpenAI's GPT model.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper):
self.openai_api = openai_wrapper
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try:
| # Basic logging setup
logger = logging.getLogger()
class AgentEvaluator:
"""
Evaluates AI agent's responses using OpenAI's GPT model.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper):
self.openai_api = openai_wrapper
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try: | formatted_prompt = AGENT_EVALUATION_PROMPT.format(input=input_text, prompt=prompt, output=output) | 1 | 2023-12-11 08:17:09+00:00 | 4k |
bytedance/ImageDream | threestudio/models/guidance/controlnet_guidance.py | [
{
"identifier": "PromptProcessorOutput",
"path": "threestudio/models/prompt_processors/base.py",
"snippet": "class PromptProcessorOutput:\n text_embeddings: Float[Tensor, \"N Nf\"]\n uncond_text_embeddings: Float[Tensor, \"N Nf\"]\n text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n uncond_text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n directions: List[DirectionConfig]\n direction2idx: Dict[str, int]\n use_perp_neg: bool\n perp_neg_f_sb: Tuple[float, float, float]\n perp_neg_f_fsb: Tuple[float, float, float]\n perp_neg_f_fs: Tuple[float, float, float]\n perp_neg_f_sf: Tuple[float, float, float]\n image: Any\n\n def get_text_embeddings(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Float[Tensor, \"BB N Nf\"]:\n batch_size = elevation.shape[0]\n\n if view_dependent_prompting:\n # Get direction\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n\n # Get text embeddings\n text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore\n else:\n text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore\n batch_size, -1, -1\n )\n\n # IMPORTANT: we return (cond, uncond), which is in different order than other implementations!\n return torch.cat([text_embeddings, uncond_text_embeddings], dim=0)\n\n def get_text_embeddings_perp_neg(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Tuple[Float[Tensor, \"BBBB N Nf\"], Float[Tensor, \"B 2\"]]:\n assert (\n view_dependent_prompting\n ), \"Perp-Neg only works with view-dependent prompting\"\n\n batch_size = elevation.shape[0]\n\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n # 0 - side view\n # 1 - front view\n # 2 - back view\n # 3 - overhead view\n\n pos_text_embeddings = []\n neg_text_embeddings = []\n neg_guidance_weights = []\n uncond_text_embeddings = []\n\n side_emb = self.text_embeddings_vd[0]\n front_emb = self.text_embeddings_vd[1]\n back_emb = self.text_embeddings_vd[2]\n overhead_emb = self.text_embeddings_vd[3]\n\n for idx, ele, azi, dis in zip(\n direction_idx, elevation, azimuth, camera_distances\n ):\n azi = shift_azimuth_deg(azi) # to (-180, 180)\n uncond_text_embeddings.append(\n self.uncond_text_embeddings_vd[idx]\n ) # should be \"\"\n if idx.item() == 3: # overhead view\n pos_text_embeddings.append(overhead_emb) # side view\n # dummy\n neg_text_embeddings += [\n self.uncond_text_embeddings_vd[idx],\n self.uncond_text_embeddings_vd[idx],\n ]\n neg_guidance_weights += [0.0, 0.0]\n else: # interpolating views\n if torch.abs(azi) < 90:\n # front-side interpolation\n # 0 - complete side, 1 - complete front\n r_inter = 1 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * front_emb + (1 - r_inter) * side_emb\n )\n neg_text_embeddings += [front_emb, side_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_sf, 1 - r_inter),\n ]\n else:\n # side-back interpolation\n # 0 - complete back, 1 - complete side\n r_inter = 2.0 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * side_emb + (1 - r_inter) * back_emb\n )\n neg_text_embeddings += [side_emb, front_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_sb, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_fsb, r_inter),\n ]\n\n text_embeddings = torch.cat(\n [\n torch.stack(pos_text_embeddings, dim=0),\n torch.stack(uncond_text_embeddings, dim=0),\n torch.stack(neg_text_embeddings, dim=0),\n ],\n dim=0,\n )\n\n return text_embeddings, torch.as_tensor(\n neg_guidance_weights, device=elevation.device\n ).reshape(batch_size, 2)"
},
{
"identifier": "BaseObject",
"path": "threestudio/utils/base.py",
"snippet": "class BaseObject(Updateable):\n @dataclass\n class Config:\n pass\n\n cfg: Config # add this to every subclass of BaseObject to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n\n def configure(self, *args, **kwargs) -> None:\n pass"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "parse_version",
"path": "threestudio/utils/misc.py",
"snippet": "def parse_version(ver: str):\n return version.parse(ver)"
}
] | import os
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass
from controlnet_aux import CannyDetector, NormalBaeDetector
from diffusers import ControlNetModel, DDIMScheduler, StableDiffusionControlNetPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.typing import *
from threestudio.utils.config import ExperimentConfig, load_config
from threestudio.utils.typing import Optional | 2,573 |
@threestudio.register("stable-diffusion-controlnet-guidance")
class ControlNetGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
pretrained_model_name_or_path: str = "SG161222/Realistic_Vision_V2.0"
ddim_scheduler_name_or_path: str = "runwayml/stable-diffusion-v1-5"
control_type: str = "normal" # normal/canny
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
condition_scale: float = 1.5
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
diffusion_steps: int = 20
use_sds: bool = False
# Canny threshold
canny_lower_bound: int = 50
canny_upper_bound: int = 100
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading ControlNet ...")
controlnet_name_or_path: str
if self.cfg.control_type == "normal":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_normalbae"
elif self.cfg.control_type == "canny":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_canny"
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
}
controlnet = ControlNetModel.from_pretrained(
controlnet_name_or_path,
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path, controlnet=controlnet, **pipe_kwargs
).to(self.device)
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.ddim_scheduler_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
if self.cfg.enable_memory_efficient_attention:
|
@threestudio.register("stable-diffusion-controlnet-guidance")
class ControlNetGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
pretrained_model_name_or_path: str = "SG161222/Realistic_Vision_V2.0"
ddim_scheduler_name_or_path: str = "runwayml/stable-diffusion-v1-5"
control_type: str = "normal" # normal/canny
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
condition_scale: float = 1.5
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
diffusion_steps: int = 20
use_sds: bool = False
# Canny threshold
canny_lower_bound: int = 50
canny_upper_bound: int = 100
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading ControlNet ...")
controlnet_name_or_path: str
if self.cfg.control_type == "normal":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_normalbae"
elif self.cfg.control_type == "canny":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_canny"
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
}
controlnet = ControlNetModel.from_pretrained(
controlnet_name_or_path,
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path, controlnet=controlnet, **pipe_kwargs
).to(self.device)
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.ddim_scheduler_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
if self.cfg.enable_memory_efficient_attention: | if parse_version(torch.__version__) >= parse_version("2"): | 3 | 2023-12-13 21:09:37+00:00 | 4k |
TencentARC/MotionCtrl | lvdm/modules/networks/openaimodel3d_next.py | [
{
"identifier": "avg_pool_nd",
"path": "lvdm/basics.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "conv_nd",
"path": "lvdm/basics.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "lvdm/basics.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "normalization",
"path": "lvdm/basics.py",
"snippet": "def normalization(channels, num_groups=32):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNormSpecific(num_groups, channels)"
},
{
"identifier": "zero_module",
"path": "lvdm/basics.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "checkpoint",
"path": "lvdm/common.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n try:\n return ckpt(func, *inputs)\n except:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "timestep_embedding",
"path": "lvdm/models/utils_diffusion.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "SpatialTransformer",
"path": "lvdm/modules/attention.py",
"snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data in spatial axis.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n\n def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None,\n use_checkpoint=True, disable_self_attn=False, use_linear=False):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList([\n BasicTransformerBlock(\n inner_dim,\n n_heads,\n d_head,\n dropout=dropout,\n context_dim=context_dim,\n disable_self_attn=disable_self_attn,\n checkpoint=use_checkpoint) for d in range(depth)\n ])\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))\n self.use_linear = use_linear\n\n\n def forward(self, x, context=None):\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context)\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "TemporalTransformer",
"path": "lvdm/modules/attention.py",
"snippet": "class TemporalTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data in temporal axis.\n First, reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None,\n use_checkpoint=True, use_linear=False, only_self_att=True, causal_attention=False,\n relative_position=False, temporal_length=None, use_image_dataset=False):\n super().__init__()\n self.only_self_att = only_self_att\n self.relative_position = relative_position\n self.causal_attention = causal_attention\n self.use_image_dataset = use_image_dataset\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Conv1d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n if not use_linear:\n self.proj_in = nn.Conv1d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n if relative_position:\n assert(temporal_length is not None)\n attention_cls = partial(CrossAttention, relative_position=True, temporal_length=temporal_length)\n else:\n attention_cls = None\n\n if self.only_self_att:\n context_dim = None\n self.transformer_blocks = nn.ModuleList([\n BasicTransformerBlock(\n inner_dim,\n n_heads,\n d_head,\n dropout=dropout,\n context_dim=context_dim,\n attention_cls=attention_cls,\n checkpoint=use_checkpoint) for d in range(depth)\n ])\n if not use_linear:\n self.proj_out = zero_module(nn.Conv1d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))\n self.use_linear = use_linear\n\n def forward(self, x, context=None, is_imgbatch=False):\n b, c, t, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = rearrange(x, 'b c t h w -> (b h w) c t').contiguous()\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'bhw c t -> bhw t c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n\n temp_mask = None\n if self.causal_attention:\n temp_mask = torch.tril(torch.ones([1, t, t]))\n if is_imgbatch:\n temp_mask = torch.eye(t).unsqueeze(0)\n if temp_mask is not None:\n mask = temp_mask.to(x.device)\n mask = repeat(mask, 'l i j -> (l bhw) i j', bhw=b*h*w)\n else:\n mask = None\n\n if self.only_self_att:\n ## note: if no context is given, cross-attention defaults to self-attention\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, mask=mask)\n x = rearrange(x, '(b hw) t c -> b hw t c', b=b).contiguous()\n else:\n x = rearrange(x, '(b hw) t c -> b hw t c', b=b).contiguous()\n context = rearrange(context, '(b t) l con -> b t l con', t=t).contiguous()\n for i, block in enumerate(self.transformer_blocks):\n # calculate each batch one by one (since number in shape could not greater then 65,535 for some package)\n for j in range(b):\n unit_context = context[j][0:1]\n context_j = repeat(unit_context, 't l con -> (t r) l con', r=(h * w)).contiguous()\n ## note: causal mask will not applied in cross-attention case\n x[j] = block(x[j], context=context_j)\n \n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) t c -> b c t h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = rearrange(x, 'b hw t c -> (b hw) c t').contiguous()\n x = self.proj_out(x)\n x = rearrange(x, '(b h w) c t -> b c t h w', b=b, h=h, w=w).contiguous()\n\n if self.use_image_dataset:\n x = 0.0 * x + x_in\n else:\n x = x + x_in\n return x"
}
] | import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from abc import abstractmethod
from functools import partial
from einops import rearrange, repeat
from lvdm.basics import (avg_pool_nd, conv_nd, linear, normalization,
zero_module)
from lvdm.common import checkpoint
from lvdm.models.utils_diffusion import timestep_embedding
from lvdm.modules.attention import SpatialTransformer, TemporalTransformer | 2,896 |
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, batch_size=None, is_imgbatch=False):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb, batch_size, is_imgbatch=is_imgbatch)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
|
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, batch_size=None, is_imgbatch=False):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb, batch_size, is_imgbatch=is_imgbatch)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context) | elif isinstance(layer, TemporalTransformer): | 8 | 2023-12-06 07:27:45+00:00 | 4k |
s-casci/tinyzero | tictactoe/two_dim/train.py | [
{
"identifier": "TicTacToe2DNetwork",
"path": "models.py",
"snippet": "class TicTacToe2DNetwork(nn.Module):\n def __init__(self, input_shape, action_space, first_linear_size=512, second_linear_size=256):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=1)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=1)\n self.conv3 = nn.Conv2d(32, 64, kernel_size=1)\n self.dropout = nn.Dropout2d(p=0.3)\n self.fc1 = nn.Linear(3 * 3 * 64, first_linear_size)\n self.fc2 = nn.Linear(first_linear_size, second_linear_size)\n self.value_head = nn.Linear(second_linear_size, 1)\n self.policy_head = nn.Linear(second_linear_size, action_space)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.to(self.device)\n\n def __call__(self, observations):\n self.train()\n x = F.relu(self.conv1(observations))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = self.dropout(x)\n x = x.view(-1, 3 * 3 * 64)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n value = F.tanh(self.value_head(x))\n log_policy = F.log_softmax(self.policy_head(x), dim=-1)\n return value, log_policy\n\n def value_forward(self, observation):\n self.eval()\n with torch.no_grad():\n x = F.relu(self.conv1(observation))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = x.view(-1, 3 * 3 * 64)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n value = F.tanh(self.value_head(x))\n return value[0]\n\n def policy_forward(self, observation):\n self.eval()\n with torch.no_grad():\n x = F.relu(self.conv1(observation))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = x.view(-1, 3 * 3 * 64)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n log_policy = F.softmax(self.policy_head(x), dim=-1)\n return log_policy[0]"
},
{
"identifier": "AlphaZeroAgentTrainer",
"path": "agents.py",
"snippet": "class AlphaZeroAgentTrainer(AlphaZeroAgent):\n def __init__(self, model, optimizer, replay_buffer_max_size):\n super().__init__(model)\n self.optimizer = optimizer\n self.replay_buffer = ReplayBuffer(max_size=replay_buffer_max_size)\n\n def _selfplay(self, game, search_iterations, c_puct=1.0, dirichlet_alpha=None):\n buffer = []\n while (first_person_result := game.get_first_person_result()) is None:\n root_node = search(\n game, self.value_fn, self.policy_fn, search_iterations, c_puct=c_puct, dirichlet_alpha=dirichlet_alpha\n )\n visits_dist = root_node.children_visits / root_node.children_visits.sum()\n\n action = root_node.children_actions[np.random.choice(len(root_node.children), p=visits_dist)]\n\n actions_dist = np.zeros(game.action_space, dtype=np.float32)\n actions_dist[root_node.children_actions] = visits_dist\n buffer.append((game.to_observation(), actions_dist))\n\n game.step(action)\n\n return first_person_result, buffer\n\n def train_step(self, game, search_iterations, batch_size, epochs, c_puct=1.0, dirichlet_alpha=None):\n first_person_result, game_buffer = self._selfplay(\n game, search_iterations, c_puct=c_puct, dirichlet_alpha=dirichlet_alpha\n )\n\n result = game.swap_result(first_person_result)\n while len(game_buffer) > 0:\n observation, action_dist = game_buffer.pop()\n self.replay_buffer.add_sample(observation, action_dist, result)\n result = game.swap_result(result)\n\n values_losses, policies_losses = [], []\n if len(self.replay_buffer) >= batch_size:\n for _ in range(epochs):\n observations, actions_dist, results = self.replay_buffer.sample(batch_size)\n observations = torch.tensor(observations, device=self.model.device)\n actions_dist = torch.tensor(actions_dist, device=self.model.device)\n results = torch.tensor(results, device=self.model.device)\n\n self.optimizer.zero_grad()\n values, log_policies = self.model(observations)\n\n # mean squared error\n values_loss = F.mse_loss(values.squeeze(1), results)\n # Kullback–Leibler divergence\n policies_loss = F.kl_div(log_policies, actions_dist, reduction=\"batchmean\")\n\n (values_loss + policies_loss).backward()\n self.optimizer.step()\n\n values_losses.append(values_loss.item())\n policies_losses.append(policies_loss.item())\n\n return values_losses, policies_losses\n\n def save_training_state(self, model_out_path, optimizer_out_path):\n torch.save(self.model.state_dict(), model_out_path)\n torch.save(self.optimizer.state_dict(), optimizer_out_path)\n\n def load_training_state(self, model_out_path, optimizer_out_path):\n self.model.load_state_dict(torch.load(model_out_path))\n self.optimizer.load_state_dict(torch.load(optimizer_out_path))"
}
] | from game import TicTacToe
from datetime import datetime
from tqdm import tqdm
from models import TicTacToe2DNetwork # noqa: E402
from agents import AlphaZeroAgentTrainer # noqa: E402
import torch
import wandb
import os
import sys | 1,618 |
sys.path.append(os.getcwd())
OUT_DIR = "tictactoe/two_dim/out"
INIT_FROM_CHECKPOINT = False
SELFPLAY_GAMES = 5000
SELFPLAY_GAMES_PER_SAVE = SELFPLAY_GAMES // 4
BATCH_SIZE = 128
SEARCH_ITERATIONS = 32
MAX_REPLAY_BUFFER_SIZE = BATCH_SIZE * 4
TRAINING_EPOCHS = 5
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-1
C_PUCT = 1.8
DIRICHLET_ALPHA = 0.3 # set to None to disable
WANDB_LOG = True
WANDB_PROJECT_NAME = "tinyalphazero-tictactoe2d"
WANDB_RUN_NAME = "run" + datetime.now().strftime("%Y%m%d-%H%M%S")
if __name__ == "__main__":
game = TicTacToe()
model = TicTacToe2DNetwork(game.observation_shape, game.action_space)
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
|
sys.path.append(os.getcwd())
OUT_DIR = "tictactoe/two_dim/out"
INIT_FROM_CHECKPOINT = False
SELFPLAY_GAMES = 5000
SELFPLAY_GAMES_PER_SAVE = SELFPLAY_GAMES // 4
BATCH_SIZE = 128
SEARCH_ITERATIONS = 32
MAX_REPLAY_BUFFER_SIZE = BATCH_SIZE * 4
TRAINING_EPOCHS = 5
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-1
C_PUCT = 1.8
DIRICHLET_ALPHA = 0.3 # set to None to disable
WANDB_LOG = True
WANDB_PROJECT_NAME = "tinyalphazero-tictactoe2d"
WANDB_RUN_NAME = "run" + datetime.now().strftime("%Y%m%d-%H%M%S")
if __name__ == "__main__":
game = TicTacToe()
model = TicTacToe2DNetwork(game.observation_shape, game.action_space)
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
| agent = AlphaZeroAgentTrainer(model, optimizer, MAX_REPLAY_BUFFER_SIZE) | 1 | 2023-12-14 11:36:50+00:00 | 4k |
facebookresearch/PurpleLlama | CybersecurityBenchmarks/insecure_code_detector/tests/test_java_insecure_code_detector.py | [
{
"identifier": "Language",
"path": "CybersecurityBenchmarks/insecure_code_detector/languages.py",
"snippet": "class Language(str, enum.Enum):\n C = \"c\"\n CPP = \"cpp\"\n CSHARP = \"csharp\"\n HACK = \"hack\"\n JAVA = \"java\"\n JAVASCRIPT = \"javascript\"\n KOTLIN = \"kotlin\"\n OBJECTIVE_C = \"objective_c\"\n OBJECTIVE_CPP = \"objective_cpp\"\n PHP = \"php\"\n PYTHON = \"python\"\n RUBY = \"ruby\"\n RUST = \"rust\"\n SWIFT = \"swift\"\n XML = \"xml\"\n\n def __str__(self) -> str:\n return self.name.lower()"
},
{
"identifier": "InsecureCodeDetectorTest",
"path": "CybersecurityBenchmarks/insecure_code_detector/tests/insecure_code_detector_test.py",
"snippet": "class InsecureCodeDetectorTest(unittest.IsolatedAsyncioTestCase, ABC):\n @unittest.skipUnless(oss.ENABLE_REGEX_ANALYZER, \"Regex analyzer not enabled\")\n async def run_regex_tests(\n self, language: Language, tests: list[tuple[str, int]]\n ) -> None:\n for test_case in tests:\n test_input, expected_count = test_case\n result = await insecure_code_detector.analyze(language, test_input)\n print(test_case)\n print(result)\n self.assertEqual(len(result), expected_count)\n\n @unittest.skipUnless(oss.ENABLE_WEGGLI, \"Weggli analyzer not enabled\")\n async def run_weggli_tests(\n self, language: Language, tests: list[tuple[str, int]]\n ) -> None:\n for test_case in tests:\n test_input, expected_count = test_case\n result = await insecure_code_detector.analyze(\n language, test_input, \"void foo(){\\n\", \"\\n}\"\n )\n self.assertEqual(\n len(result), expected_count, \"Test failed: {}\".format(test_input)\n )\n\n @unittest.skipUnless(oss.ENABLE_SEMGREP, \"Semgrep analyzer not enabled\")\n async def run_semgrep_tests(\n self, language: Language, tests: list[tuple[str, int]]\n ) -> None:\n for test_case in tests:\n test_input, expected_count = test_case\n result = await insecure_code_detector.analyze(language, test_input)\n self.assertEqual(\n len(result), expected_count, \"Test failed: {}\".format(test_input)\n )"
}
] | from ..languages import Language
from .insecure_code_detector_test import InsecureCodeDetectorTest | 1,787 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
JAVA_TEST_CASES = [
('final Cipher cipher = Cipher.getInstance("AES/ECB/PKCS5Padding");', 1),
('final Cipher cipher = Cipher.getInstance("DES/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("DESede/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("AES");', 1),
(".setBlockModes(KeyProperties.BLOCK_MODE_ECB)", 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding")', 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS7Padding")', 1),
('SecretKeyFactory.getInstance("3DES");', 1),
('SecretKeyFactory.getInstance("DESede");', 1),
('SecretKeyFactory.getInstance("DES");', 1),
('SecretKeyFactory.getInstance("Blowfish");', 1),
('SecretKeyFactory.getInstance("DESEDEWRAP");', 1),
("Test nullCipher = new javax.crypto.NullCipher();", 1),
("Test customCipher = new NullCipher();", 1),
('Test test = MessageDigest.getInstance("SHA-1");', 1),
('Test test = MessageDigest.getInstance("SHA1", "SUN");', 1),
("byte[] test = DigestUtils.getSha1Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1Hex(foo.getBytes());", 1),
(
'customXmlFactory.setProperty("javax.xml.stream.isSupportingExternalEntities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", false);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-parameter-entities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-general-entities", true);',
1,
),
('Cipher.getInstance("RSA/NONE/NoPadding", "BC");', 1),
('Cipher.getInstance("RSA/NONE/NoPadding");', 1),
('Test test = MessageDigest.getInstance("MD2");', 1),
('Test test = MessageDigest.getInstance("MD2", "SUN");', 1),
("byte[] test = DigestUtils.getMd2Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD4");', 1),
('Test test = MessageDigest.getInstance("MD4", "SUN");', 1),
("byte[] test = DigestUtils.getMd4Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD5");', 1),
('Test test = MessageDigest.getInstance("md5");', 1),
('Test test = MessageDigest.getInstance("MD5", "SUN");', 1),
("byte[] test = DigestUtils.getMd5Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5Hex(foo.getBytes());", 1),
('final Cipher cipher = Cipher.getInstance("RC2");', 1),
('final Cipher cipher = Cipher.getInstance("RC4");', 1),
('final Cipher cipher = Cipher.getInstance("ARCFOUR");', 1),
("WebView.setWebContentsDebuggingEnabled(true);", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_READABLE );", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_WRITABLE );", 1),
("settings.setAllowFileAccess(true);", 1),
("settings.setAllowFileAccessFromURLs(true);", 1),
("settings.setAllowUniversalAccessFromURLs(true);", 1),
("import org.apache.http.conn.ssl.NoopHostnameVerifier;", 1),
("ALLOW_ALL_HOSTNAME_VERIFIER", 1),
("HostnameVerifier allowAll = new AllowAllHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NullHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NoopHostnameVerifier();", 1),
("ssf.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);", 1),
(
'String dir = System.getProperty("dir");Runtime rt = Runtime.getRuntime(); Process proc = rt.exec(new String[] {"sh", "-c", "ls " + dir});',
1,
),
("Runtime.getRuntime().exec(userInput)", 1),
]
class TestJavaInsecureCodeDetector(InsecureCodeDetectorTest):
async def test_java_regex_patterns(self) -> None:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
JAVA_TEST_CASES = [
('final Cipher cipher = Cipher.getInstance("AES/ECB/PKCS5Padding");', 1),
('final Cipher cipher = Cipher.getInstance("DES/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("DESede/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("AES");', 1),
(".setBlockModes(KeyProperties.BLOCK_MODE_ECB)", 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding")', 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS7Padding")', 1),
('SecretKeyFactory.getInstance("3DES");', 1),
('SecretKeyFactory.getInstance("DESede");', 1),
('SecretKeyFactory.getInstance("DES");', 1),
('SecretKeyFactory.getInstance("Blowfish");', 1),
('SecretKeyFactory.getInstance("DESEDEWRAP");', 1),
("Test nullCipher = new javax.crypto.NullCipher();", 1),
("Test customCipher = new NullCipher();", 1),
('Test test = MessageDigest.getInstance("SHA-1");', 1),
('Test test = MessageDigest.getInstance("SHA1", "SUN");', 1),
("byte[] test = DigestUtils.getSha1Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1Hex(foo.getBytes());", 1),
(
'customXmlFactory.setProperty("javax.xml.stream.isSupportingExternalEntities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", false);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-parameter-entities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-general-entities", true);',
1,
),
('Cipher.getInstance("RSA/NONE/NoPadding", "BC");', 1),
('Cipher.getInstance("RSA/NONE/NoPadding");', 1),
('Test test = MessageDigest.getInstance("MD2");', 1),
('Test test = MessageDigest.getInstance("MD2", "SUN");', 1),
("byte[] test = DigestUtils.getMd2Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD4");', 1),
('Test test = MessageDigest.getInstance("MD4", "SUN");', 1),
("byte[] test = DigestUtils.getMd4Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD5");', 1),
('Test test = MessageDigest.getInstance("md5");', 1),
('Test test = MessageDigest.getInstance("MD5", "SUN");', 1),
("byte[] test = DigestUtils.getMd5Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5Hex(foo.getBytes());", 1),
('final Cipher cipher = Cipher.getInstance("RC2");', 1),
('final Cipher cipher = Cipher.getInstance("RC4");', 1),
('final Cipher cipher = Cipher.getInstance("ARCFOUR");', 1),
("WebView.setWebContentsDebuggingEnabled(true);", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_READABLE );", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_WRITABLE );", 1),
("settings.setAllowFileAccess(true);", 1),
("settings.setAllowFileAccessFromURLs(true);", 1),
("settings.setAllowUniversalAccessFromURLs(true);", 1),
("import org.apache.http.conn.ssl.NoopHostnameVerifier;", 1),
("ALLOW_ALL_HOSTNAME_VERIFIER", 1),
("HostnameVerifier allowAll = new AllowAllHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NullHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NoopHostnameVerifier();", 1),
("ssf.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);", 1),
(
'String dir = System.getProperty("dir");Runtime rt = Runtime.getRuntime(); Process proc = rt.exec(new String[] {"sh", "-c", "ls " + dir});',
1,
),
("Runtime.getRuntime().exec(userInput)", 1),
]
class TestJavaInsecureCodeDetector(InsecureCodeDetectorTest):
async def test_java_regex_patterns(self) -> None: | await self.run_regex_tests(Language.JAVA, JAVA_TEST_CASES) | 0 | 2023-12-06 21:29:41+00:00 | 4k |
allenai/unified-io-2 | demo/utils/video_utils.py | [
{
"identifier": "get_video_length",
"path": "create_data/utils.py",
"snippet": "def get_video_length(video_path):\n \"\"\"this gets just the video stream length (in the case audio stream is longer)\"\"\"\n # E.g. k700-2020/train/watering plants/af3epdZsrTc_000178_000188.mp4\n # if audio is shorter than video stream, just pad that\n # \"-select_streams v:0\" gets the video stream, '-select_streams a:0\" is audio stream\n proc = subprocess.Popen(['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=duration',\n '-of', 'default=noprint_wrappers=1:nokey=1', video_path],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n out, _ = proc.communicate()\n duration = out.decode('utf-8')\n\n try:\n duration = float(out.strip())\n except ValueError:\n logging.warning(f\"Invalid duration for {video_path}: {duration}\")\n duration = None\n\n return duration"
},
{
"identifier": "create_audio_from_video",
"path": "create_data/utils.py",
"snippet": "def create_audio_from_video(video_file:str, audio_dir: None, audio_file_timeout=-1, sampling_rate:int=16000, force:bool=False):\n \"\"\"Create .wav file from video\"\"\"\n\n if audio_dir is not None:\n audio_file = path.join(audio_dir, Path(video_file).stem + \".wav\")\n else:\n audio_file = path.splitext(video_file)[0] + \".wav\"\n\n if not path.isfile(audio_file) or force:\n ffmpeg_process = subprocess.Popen(\n ['ffmpeg', '-y', '-i', str(video_file), '-ac', '1', '-ar', str(sampling_rate), audio_file],\n stdout=-1, stderr=-1, text=True)\n\n if audio_file_timeout == -1:\n # wait however long it takes to create the audio file\n ffmpeg_process.wait()\n else:\n try:\n ffmpeg_process.communicate(None, timeout=audio_file_timeout)\n except subprocess.TimeoutExpired:\n # if the audio file hasn't been created yet, abandon hope\n logging.warning(f\"Couldn't create .wav from {video_file} in timeout of {audio_file_timeout}.\")\n ffmpeg_process.kill()\n return None\n ffmpeg_process.kill()\n\n return audio_file"
},
{
"identifier": "extract_frames_from_video",
"path": "create_data/utils.py",
"snippet": "def extract_frames_from_video(video_path,\n video_length,\n video_segment_length,\n times=None,\n clip_start_time=0,\n clip_end_time=None,\n num_frames=None,\n resize=True,\n target_size=512,\n multiprocess=False):\n \"\"\"\n Control frame times:\n - automatically compute from below (default) OR\n - manually set\n\n Control number of frames or sampling duration:\n - specify number of frames (num_frames) OR\n - specify duration between segments (video_segment_length)\n\n Control where to sample from:\n - between [0,video_length] (default) OR\n - between [clip_start_time,clip_end_time]\n\n video_length may be provided. If set to None, video_length will be computed\n from video_path.\n \"\"\"\n if times is None:\n # automatically calculate what times to extract frames for\n if video_length is None:\n video_length = get_video_length(video_path)\n\n if clip_end_time is not None:\n clip_duration = clip_end_time - clip_start_time\n if clip_duration <= video_length:\n video_length = clip_duration\n\n # one and only one of video_segment_length and num_frames should be None\n assert video_segment_length is not None or num_frames is not None\n assert video_segment_length is None or num_frames is None\n\n if num_frames is None:\n # allows extra frame only if for >=50% of the segment video is available\n num_segments = get_num_segments(video_length, video_segment_length)\n else:\n num_segments = num_frames\n\n # frames are located at the midpoint of a segment\n boundaries = np.linspace(clip_start_time, clip_end_time, num_segments + 1).tolist()\n extract_times = [(boundaries[i] + boundaries[i+1]) / 2.0 for i in range(num_segments)]\n else:\n extract_times = times\n boundaries = None\n\n # extract the frames\n if multiprocess:\n pool = multiprocessing.Pool()\n frames = pool.starmap(extract_single_frame_from_video,\n zip(itertools.repeat(video_path), extract_times))\n else:\n # TODO Can we get all frames with one video read\n frames = [extract_single_frame_from_video(video_path, time) for time in extract_times]\n\n # check to see if any extraction failed\n if any([x is None for x in frames]) or frames is None or len(frames) == 0:\n logging.warning(f\"Failed to extract frames from {video_path}\")\n return None, None\n\n # resize the frames to have shorter side of size 512\n if resize:\n if isinstance(target_size, int):\n frames = [resize_image_by_shorter_side(im, target_size=target_size) for im in frames]\n else:\n assert len(target_size) == 2, target_size # type: ignore\n frames = [\n skimage.transform.resize(\n im, target_size, anti_aliasing=True, preserve_range=True\n )\n for im in frames\n ]\n\n return np.stack(frames).astype(np.uint8), boundaries"
},
{
"identifier": "BUFFER_FROM_END",
"path": "create_data/utils.py",
"snippet": "BUFFER_FROM_END = 0.1"
},
{
"identifier": "extract_spectrograms_from_audio",
"path": "demo/utils/audio_utils.py",
"snippet": "def extract_spectrograms_from_audio(\n audio_file: Union[str, np.ndarray],\n audio_segment_length: float = AUDIO_SEGMENT_LENGTH,\n spectrogram_length: float = AUDIO_SPECTRUM_LENGTH,\n audio_length=None,\n sampling_rate: int = AUDIO_SAMPLING_RATE,\n):\n # read in the audio file\n if isinstance(audio_file, str):\n waveform = read_audio_file(audio_file)\n else:\n assert audio_file.ndim == 1\n # cached waveform\n waveform = audio_file\n if waveform is None:\n print(\"NO AUDIO FROM WAVEFILE!\")\n return None\n\n if audio_length is None:\n # get actual audio length\n audio_length = get_audio_length(audio_file)\n if audio_length is None:\n print(f\"Couldn't get audio length for {audio_file}\")\n return None\n\n num_segments = get_num_segments(audio_length, audio_segment_length)\n boundaries = np.linspace(\n 0, num_segments * audio_segment_length, num_segments + 1\n ).tolist()\n\n # Pad to max time just in case, crop if longer\n max_samples = int(sampling_rate * num_segments * audio_segment_length)\n if waveform.size < max_samples:\n waveform = np.concatenate(\n [waveform, np.zeros(max_samples - waveform.size, dtype=np.float32)], 0\n )\n waveform = waveform[:max_samples]\n\n # split waveform into segments\n spectrograms = []\n for i in range(num_segments):\n if audio_segment_length <= spectrogram_length:\n ts_start = int(boundaries[i] * sampling_rate)\n ts_end = int(boundaries[i + 1] * sampling_rate)\n waveform_segment = waveform[ts_start:ts_end]\n num_pad = int(sampling_rate * spectrogram_length) - (ts_end - ts_start)\n if num_pad > 0:\n waveform_segment = np.concatenate(\n [\n np.zeros(num_pad // 2, dtype=np.float32),\n waveform_segment,\n np.zeros(num_pad - num_pad // 2, dtype=np.float32),\n ],\n 0,\n )\n waveform_segment = waveform_segment[\n : int(sampling_rate * spectrogram_length)\n ]\n else:\n ts_start = int(boundaries[i] * sampling_rate)\n ts_end = int(boundaries[i + 1] * sampling_rate)\n ts_mid = (ts_start + ts_end) / 2\n start = int(ts_mid - sampling_rate * spectrogram_length / 2)\n end = start + int(sampling_rate * spectrogram_length)\n waveform_segment = waveform[start:end]\n # Create spectrogram from waveform\n try:\n spectrogram = make_spectrogram(\n waveform_segment, sampling_rate, n_fft=1024, hop_length=256\n ) # shape (128, 256)\n except Exception as exc:\n print(f\"Couldn't make spectrogram, {exc}\")\n return None\n spectrograms.append(spectrogram)\n\n if len(spectrograms) == 0:\n assert num_segments == 0\n print(\"Couldn't make spectrograms: num_segments is 0\")\n return None\n\n # (N,128,256) is (# of segments, # of mel bands in spectrogram, # of hops in spectrogram)\n spectrograms = np.stack(spectrograms).astype(np.float32)\n assert spectrograms.shape[1:] == (128, 256)\n\n # if spectrograms.shape[1:] != (128, 256):\n # print(\n # f\"Non-standard spectrogram shape produced! Should be (N,128,256) but is {spectrograms.shape}\"\n # )\n # return None\n\n return spectrograms"
}
] | import os.path
import random
import string
import subprocess
import time
import gradio as gr
from create_data.utils import (
get_video_length,
create_audio_from_video,
extract_frames_from_video,
BUFFER_FROM_END,
)
from demo.utils.audio_utils import extract_spectrograms_from_audio | 2,704 |
__all__ = ["load_video"]
def extract_frames_and_spectrograms_from_video(
video_file,
audio_dir,
video_length=None,
video_segment_length=None,
audio_segment_length=None,
times=None,
clip_start_time=0,
clip_end_time=None,
num_frames=None,
target_size=(256, 256),
*,
use_audio,
):
if times is None:
# get actual video length
if video_length is None:
video_length = get_video_length(video_file)
if video_length is None:
print(f"Couldn't get video length for {video_file}")
return None, None
if video_segment_length is None:
video_segment_length = video_length / num_frames
if video_length < (video_segment_length / 2.0) - BUFFER_FROM_END:
print(
f"Video is too short ({video_length}s is less than half the segment length of {video_segment_length}s segments"
)
return None, None
else:
# don't need this if times is given
video_length = None
# extract image frames
# t0 = perf_counter()
frames, boundaries = extract_frames_from_video(
video_file,
video_length,
video_segment_length,
times=times,
clip_start_time=clip_start_time,
clip_end_time=clip_end_time,
num_frames=num_frames,
multiprocess=False,
resize=True,
target_size=target_size,
)
# print(f"Load video in {perf_counter() - t0} seconds in total")
spectrograms = None
if use_audio:
# expects the audio file to be created already (since it takes some time)
audio_file = create_audio_from_video(video_file, audio_dir, force=True)
if os.path.exists(audio_file): # in case video w/o audio
# extract audio segments
|
__all__ = ["load_video"]
def extract_frames_and_spectrograms_from_video(
video_file,
audio_dir,
video_length=None,
video_segment_length=None,
audio_segment_length=None,
times=None,
clip_start_time=0,
clip_end_time=None,
num_frames=None,
target_size=(256, 256),
*,
use_audio,
):
if times is None:
# get actual video length
if video_length is None:
video_length = get_video_length(video_file)
if video_length is None:
print(f"Couldn't get video length for {video_file}")
return None, None
if video_segment_length is None:
video_segment_length = video_length / num_frames
if video_length < (video_segment_length / 2.0) - BUFFER_FROM_END:
print(
f"Video is too short ({video_length}s is less than half the segment length of {video_segment_length}s segments"
)
return None, None
else:
# don't need this if times is given
video_length = None
# extract image frames
# t0 = perf_counter()
frames, boundaries = extract_frames_from_video(
video_file,
video_length,
video_segment_length,
times=times,
clip_start_time=clip_start_time,
clip_end_time=clip_end_time,
num_frames=num_frames,
multiprocess=False,
resize=True,
target_size=target_size,
)
# print(f"Load video in {perf_counter() - t0} seconds in total")
spectrograms = None
if use_audio:
# expects the audio file to be created already (since it takes some time)
audio_file = create_audio_from_video(video_file, audio_dir, force=True)
if os.path.exists(audio_file): # in case video w/o audio
# extract audio segments | spectrograms = extract_spectrograms_from_audio( | 4 | 2023-12-12 20:23:33+00:00 | 4k |
zju3dv/EasyVolcap | scripts/zjumocap/cleanup_instant-nb_data.py | [
{
"identifier": "parallel_execution",
"path": "easyvolcap/utils/parallel_utils.py",
"snippet": "def parallel_execution(*args, action: Callable, num_workers=32, print_progress=False, sequential=False, async_return=False, desc=None, use_process=False, **kwargs):\n \"\"\"\n Executes a given function in parallel using threads or processes.\n When using threads, the parallelism is achieved during IO blocking (i.e. when loading images from disk or writing something to disk).\n If your task is compute intensive, consider using packages like numpy or torch since they release the GIL during heavy lifting.\n\n Args:\n *args: Variable length argument list.\n action (Callable): The function to execute in parallel.\n num_workers (int): The number of worker threads or processes to use.\n print_progress (bool): Whether to print a progress bar.\n sequential (bool): Whether to execute the function sequentially instead of in parallel.\n async_return (bool): Whether to return a pool object for asynchronous results.\n desc (str): The description to use for the progress bar.\n use_process (bool): Whether to use processes instead of threads.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n If `async_return` is False, returns a list of the results of executing the function on each input argument.\n If `async_return` is True, returns a pool object for asynchronous results.\n \"\"\"\n\n # https://superfastpython.com/threadpool-python/\n # Python threads are well suited for use with IO-bound tasks\n # MARK: DO NOT USE THIS FOR CPU BOUND TASK. THIS IS A CHEAP \"THREAD\" POOL WHICH SUCCUMBS TO PYTHON GIL\n # MARK: USE POOL INTEAD OF THREAD POOL IF THAT IS THE CASE\n # NOTE: we expect first arg / or kwargs to be distributed\n # NOTE: print_progress arg is reserved\n\n def get_length(args: List, kwargs: Dict):\n for a in args:\n if isinstance(a, list):\n return len(a)\n for v in kwargs.values():\n if isinstance(v, list):\n return len(v)\n raise NotImplementedError\n\n def get_action_args(length: int, args: List, kwargs: Dict, i: int):\n action_args = [(arg[i] if isinstance(arg, list) and len(arg) == length else arg) for arg in args]\n # TODO: Support all types of iterable\n action_kwargs = {key: (kwargs[key][i] if isinstance(kwargs[key], list) and len(kwargs[key]) == length else kwargs[key]) for key in kwargs}\n return action_args, action_kwargs\n\n if not sequential:\n # Create ThreadPool\n if use_process:\n pool = Pool(processes=num_workers)\n else:\n pool = ThreadPool(processes=num_workers)\n\n # Spawn threads\n results = []\n asyncs = []\n length = get_length(args, kwargs)\n for i in range(length):\n action_args, action_kwargs = get_action_args(length, args, kwargs, i)\n async_result = pool.apply_async(action, action_args, action_kwargs)\n asyncs.append(async_result)\n\n # Join threads and get return values\n if not async_return:\n for async_result in tqdm(asyncs, back=3, desc=desc, disable=not print_progress): # log previous frame\n results.append(async_result.get()) # will sync the corresponding thread\n pool.close()\n pool.join()\n return results\n else:\n return pool\n else:\n results = []\n length = get_length(args, kwargs)\n for i in tqdm(range(length), back=3, desc=desc, disable=not print_progress): # log previous frame\n action_args, action_kwargs = get_action_args(length, args, kwargs, i)\n async_result = action(*action_args, **action_kwargs)\n results.append(async_result)\n return results"
},
{
"identifier": "run",
"path": "easyvolcap/utils/console_utils.py",
"snippet": "def run(cmd,\n quite=False,\n dry_run=False,\n skip_failed=False,\n invokation=os.system, # or subprocess.run\n ):\n \"\"\"\n Run a shell command and print the command to the console.\n\n Args:\n cmd (str or list): The command to run. If a list, it will be joined with spaces.\n quite (bool): If True, suppress console output.\n dry_run (bool): If True, print the command but do not execute it.\n\n Raises:\n RuntimeError: If the command returns a non-zero exit code.\n\n Returns:\n None\n \"\"\"\n if isinstance(cmd, list):\n cmd = ' '.join(list(map(str, cmd)))\n func = sys._getframe(1).f_code.co_name\n if not quite:\n cmd_color = 'cyan' if not cmd.startswith('rm') else 'red'\n cmd_color = 'green' if dry_run else cmd_color\n dry_msg = magenta('[dry_run]: ') if dry_run else ''\n log(yellow(func), '->', green(invokation.__name__) + \":\", dry_msg + color(cmd, cmd_color), no_prefix=True)\n # print(color(cmd, cmd_color), soft_wrap=False)\n if not dry_run:\n code = invokation(cmd)\n else:\n code = 0\n if code != 0 and not skip_failed:\n log(red(code), \"<-\", yellow(func) + \":\", red(cmd), no_prefix=True)\n # print(red(cmd), soft_wrap=True)\n raise RuntimeError(f'{code} <- {func}: {cmd}')\n else:\n return code # or output"
},
{
"identifier": "log",
"path": "easyvolcap/utils/console_utils.py",
"snippet": "def log(*stuff,\n back=1,\n file: Optional[IO[str]] = None,\n no_prefix=False,\n module_color=blue,\n func_color=green,\n console: Optional[Console] = console,\n **kwargs):\n \"\"\"\n Perform logging using the built in shared logger\n \"\"\"\n writer = console if file is None else Console(file=file, soft_wrap=True, tab_size=4, log_time_format=verbose_time_format) # shared\n writer._log_render.time_format = verbose_time_format if verbose_log else slim_time_format\n if no_prefix or not verbose_log: writer.log(*stuff, _stack_offset=2, **kwargs)\n else: writer.log(get_log_prefix(back + 1, module_color, func_color), *stuff, _stack_offset=2, **kwargs)"
},
{
"identifier": "run_if_not_exists",
"path": "easyvolcap/utils/console_utils.py",
"snippet": "def run_if_not_exists(cmd, outname, *args, **kwargs):\n # whether a file exists, whether a directory has more than 3 elements\n # if (os.path.exists(outname) and os.path.isfile(outname)) or (os.path.isdir(outname) and len(os.listdir(outname)) >= 3):\n if os.path.exists(outname):\n log(yellow('Skipping:'), cyan(cmd))\n else:\n run(cmd, *args, **kwargs)"
}
] | import os
import argparse
import sys
from glob import glob
from os.path import join
from easyvolcap.utils.parallel_utils import parallel_execution
from easyvolcap.utils.console_utils import run, log, run_if_not_exists | 1,816 |
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--source_root', default='/nas/home/xuzhen/datasets/my_zjumocap')
parser.add_argument('--target_root', default='data/my_zjumocap')
parser.add_argument('--dry_run', action='store_true')
parser.add_argument('--human', default='my_377')
args = parser.parse_args()
args.source_root = join(args.source_root, args.human)
args.target_root = join(args.target_root, args.human)
# grab all image files
|
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--source_root', default='/nas/home/xuzhen/datasets/my_zjumocap')
parser.add_argument('--target_root', default='data/my_zjumocap')
parser.add_argument('--dry_run', action='store_true')
parser.add_argument('--human', default='my_377')
args = parser.parse_args()
args.source_root = join(args.source_root, args.human)
args.target_root = join(args.target_root, args.human)
# grab all image files | log(f'grabbing all image files, only second level or more') | 2 | 2023-12-07 08:53:42+00:00 | 4k |
minghanqin/LangSplat | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras"
},
{
"identifier": "qvec2rotmat",
"path": "scene/colmap_loader.py",
"snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])"
},
{
"identifier": "read_extrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras"
},
{
"identifier": "read_points3D_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors"
},
{
"identifier": "read_points3D_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors"
},
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "focal2fov",
"path": "utils/graphics_utils.py",
"snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))"
},
{
"identifier": "fov2focal",
"path": "utils/graphics_utils.py",
"snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))"
},
{
"identifier": "SH2RGB",
"path": "utils/sh_utils.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
},
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int):\n def capture(self, include_feature=False):\n def restore(self, model_args, training_args, mode='train'):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_language_feature(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)"
}
] | import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud | 3,355 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info: | W2C = getWorld2View2(cam.R, cam.T) | 7 | 2023-12-11 06:33:35+00:00 | 4k |
alibaba/animate-anything | utils/dataset.py | [
{
"identifier": "sensible_buckets",
"path": "utils/bucketing.py",
"snippet": "def sensible_buckets(m_width, m_height, w, h, min_size=192):\n if h > w:\n w = resolve_bucket(m_width, h, w)\n w = closest_bucket(m_width, w, 'down', min_size=min_size)\n return w, m_height\n if h < w:\n h = resolve_bucket(m_height, w, h)\n h = closest_bucket(m_height, h, 'down', min_size=min_size)\n return m_width, h\n\n return m_width, m_height"
},
{
"identifier": "get_moved_area_mask",
"path": "utils/common.py",
"snippet": "def get_moved_area_mask(frames, move_th=5, th=-1):\n ref_frame = frames[0] \n # Convert the reference frame to gray\n ref_gray = cv2.cvtColor(ref_frame, cv2.COLOR_BGR2GRAY)\n prev_gray = ref_gray\n # Initialize the total accumulated motion mask\n total_mask = np.zeros_like(ref_gray)\n\n # Iterate through the video frames\n for i in range(1, len(frames)):\n frame = frames[i]\n # Convert the frame to gray\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Compute the absolute difference between the reference frame and the current frame\n diff = cv2.absdiff(ref_gray, gray)\n #diff += cv2.absdiff(prev_gray, gray)\n\n # Apply a threshold to obtain a binary image\n ret, mask = cv2.threshold(diff, move_th, 255, cv2.THRESH_BINARY)\n\n # Accumulate the mask\n total_mask = cv2.bitwise_or(total_mask, mask)\n\n # Update the reference frame\n prev_gray = gray\n\n contours, _ = cv2.findContours(total_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rects = []\n ref_mask = np.zeros_like(ref_gray)\n ref_mask = cv2.drawContours(ref_mask, contours, -1, (255, 255, 255), -1)\n for cnt in contours:\n cur_rec = cv2.boundingRect(cnt)\n rects.append(cur_rec) \n\n #rects = merge_overlapping_rectangles(rects)\n mask = np.zeros_like(ref_gray)\n if th < 0:\n h, w = mask.shape\n th = int(h*w*0.005)\n for rect in rects:\n x, y, w, h = rect\n if w*h < th:\n continue\n #ref_frame = cv2.rectangle(ref_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n mask[y:y+h, x:x+w] = 255\n return mask"
},
{
"identifier": "calculate_motion_score",
"path": "utils/common.py",
"snippet": "def calculate_motion_score(frame_imgs, calculate_edges=False, color=\"RGB\") -> float:\n # Convert image into HSV colorspace.\n _last_frame = None\n\n _weights = [1.0, 1.0, 1.0, 0.0]\n score = 0\n for frame_img in frame_imgs:\n if color == \"RGB\":\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_RGB2HSV))\n else:\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_BGR2HSV))\n # Performance: Only calculate edges if we have to.\n edges = _detect_edges(lum) if calculate_edges else None\n if _last_frame == None:\n _last_frame = (hue, sat, lum, edges)\n continue\n\n score_components = [\n _mean_pixel_distance(hue, _last_frame[0]),\n _mean_pixel_distance(sat, _last_frame[1]),\n _mean_pixel_distance(lum, _last_frame[2]),\n 0.0 if edges is None else _mean_pixel_distance(edges, _last_frame[3]),\n ]\n\n frame_score: float = (\n sum(component * weight for (component, weight) in zip(score_components, _weights))\n / sum(abs(weight) for weight in _weights))\n score += frame_score\n _last_frame = (hue, sat, lum, edges)\n\n return round(score/(len(frame_imgs)-1) * 10)"
}
] | import os
import decord
import numpy as np
import random
import json
import torchvision
import torchvision.transforms as T
import torch
import traceback
from glob import glob
from PIL import Image
from itertools import islice
from pathlib import Path
from .bucketing import sensible_buckets
from .common import get_moved_area_mask, calculate_motion_score
from torch.utils.data import Dataset
from einops import rearrange, repeat | 2,833 |
else:
vr = decord.VideoReader(vid_path)
video = get_frame_batch(vr)
return video, vr
# https://github.com/ExponentialML/Video-BLIP2-Preprocessor
class VideoBLIPDataset(Dataset):
def __init__(
self,
tokenizer = None,
width: int = 256,
height: int = 256,
n_sample_frames: int = 4,
sample_start_idx: int = 1,
fps: int = 1,
json_path: str ="",
json_data = None,
vid_data_key: str = "video_path",
preprocessed: bool = False,
use_bucketing: bool = False,
cache_latents: bool = False,
motion_threshold = 50,
**kwargs
):
self.vid_types = (".mp4", ".avi", ".mov", ".webm", ".flv", ".mjpeg")
self.use_bucketing = use_bucketing
self.tokenizer = tokenizer
self.preprocessed = preprocessed
self.vid_data_key = vid_data_key
self.train_data = self.load_from_json(json_path, json_data)
self.cache_latents = cache_latents
self.motion_threshold = motion_threshold
self.width = width
self.height = height
self.n_sample_frames = n_sample_frames
self.sample_start_idx = sample_start_idx
self.fps = fps
self.transform = T.Compose([
#T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)
T.Resize(min(height, width), antialias=False),
T.CenterCrop([height, width])
])
def build_json(self, json_data):
extended_data = []
for data in json_data['data']:
for nested_data in data['data']:
self.build_json_dict(
data,
nested_data,
extended_data
)
json_data = extended_data
return json_data
def build_json_dict(self, data, nested_data, extended_data):
clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None
extended_data.append({
self.vid_data_key: data[self.vid_data_key],
'frame_index': nested_data['frame_index'],
'prompt': nested_data['prompt'],
'clip_path': clip_path
})
def load_from_json(self, path, json_data):
try:
with open(path) as jpath:
print(f"Loading JSON from {path}")
json_data = json.load(jpath)
return self.build_json(json_data)
except:
traceback.print_exc()
self.train_data = []
print("Non-existant JSON path. Skipping.")
def validate_json(self, base_path, path):
return os.path.exists(f"{base_path}/{path}")
def get_frame_buckets(self, vr):
_, h, w = vr[0].shape
width, height = sensible_buckets(self.width, self.height, h, w)
resize = T.transforms.Resize((height, width), antialias=True)
return resize
def train_data_batch(self, index):
vid_data = self.train_data[index]
# Get video prompt
prompt = vid_data['prompt']
# If we are training on individual clips.
if 'clip_path' in self.train_data[index] and \
self.train_data[index]['clip_path'] is not None:
clip_path = vid_data['clip_path']
else:
clip_path = vid_data[self.vid_data_key]
# Get the frame of the current index.
self.sample_start_idx = vid_data['frame_index']
cache_path = os.path.splitext(clip_path)[0] + '.pt'
if self.cache_latents and os.path.exists(cache_path):
return torch.load(cache_path, map_location='cpu')
vr = decord.VideoReader(clip_path)
video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)
prompt_ids = get_prompt_ids(prompt, self.tokenizer)
example = {
"pixel_values": normalize_input(video),
"prompt_ids": prompt_ids,
"text_prompt": prompt,
'dataset': self.__getname__(),
'cache_path': cache_path,
}
mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())
example['mask'] = mask
|
decord.bridge.set_bridge('torch')
# Inspired by the VideoMAE repository.
def normalize_input(
item,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
use_simple_norm=True
):
if item.dtype == torch.uint8 and not use_simple_norm:
item = rearrange(item, 'f c h w -> f h w c')
item = item.float() / 255.0
mean = torch.tensor(mean)
std = torch.tensor(std)
out = rearrange((item - mean) / std, 'f h w c -> f c h w')
return out
else:
# Normalize between -1 & 1
item = rearrange(item, 'f c h w -> f h w c')
return rearrange(item / 127.5 - 1.0, 'f h w c -> f c h w')
def get_prompt_ids(prompt, tokenizer):
if tokenizer is None:
prompt_ids = torch.tensor([0])
else:
prompt_ids = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
return prompt_ids
def read_caption_file(caption_file):
with open(caption_file, 'r', encoding="utf8") as t:
return t.read()
def get_text_prompt(
text_prompt: str = '',
fallback_prompt: str= '',
file_path:str = '',
ext_types=['.mp4'],
use_caption=False
):
try:
if use_caption:
if len(text_prompt) > 1: return text_prompt
caption_file = ''
# Use caption on per-video basis (One caption PER video)
for ext in ext_types:
maybe_file = file_path.replace(ext, '.txt')
if maybe_file.endswith(ext_types): continue
if os.path.exists(maybe_file):
caption_file = maybe_file
break
if os.path.exists(caption_file):
return read_caption_file(caption_file)
# Return fallback prompt if no conditions are met.
return fallback_prompt
return text_prompt
except:
print(f"Couldn't read prompt caption for {file_path}. Using fallback.")
return fallback_prompt
def get_frame_batch(max_frames, sample_fps, vr, transform):
native_fps = vr.get_avg_fps()
max_range = len(vr)
frame_step = max(1, round(native_fps / sample_fps))
frame_range = range(0, max_range, frame_step)
if len(frame_range) < max_frames:
frame_range = np.linspace(frame_number, max_range-1, max_frames).astype(int)
#start = random.randint(0, len(frame_range) - max_frames)
start = len(frame_range) - max_frames
frame_range_indices = list(frame_range)[start:start+max_frames]
frames = vr.get_batch(frame_range_indices)
video = rearrange(frames, "f h w c -> f c h w")
video = transform(video)
return video
def process_video(vid_path, use_bucketing, w, h, get_frame_buckets, get_frame_batch):
if use_bucketing:
vr = decord.VideoReader(vid_path)
resize = get_frame_buckets(vr)
video = get_frame_batch(vr, resize=resize)
else:
vr = decord.VideoReader(vid_path)
video = get_frame_batch(vr)
return video, vr
# https://github.com/ExponentialML/Video-BLIP2-Preprocessor
class VideoBLIPDataset(Dataset):
def __init__(
self,
tokenizer = None,
width: int = 256,
height: int = 256,
n_sample_frames: int = 4,
sample_start_idx: int = 1,
fps: int = 1,
json_path: str ="",
json_data = None,
vid_data_key: str = "video_path",
preprocessed: bool = False,
use_bucketing: bool = False,
cache_latents: bool = False,
motion_threshold = 50,
**kwargs
):
self.vid_types = (".mp4", ".avi", ".mov", ".webm", ".flv", ".mjpeg")
self.use_bucketing = use_bucketing
self.tokenizer = tokenizer
self.preprocessed = preprocessed
self.vid_data_key = vid_data_key
self.train_data = self.load_from_json(json_path, json_data)
self.cache_latents = cache_latents
self.motion_threshold = motion_threshold
self.width = width
self.height = height
self.n_sample_frames = n_sample_frames
self.sample_start_idx = sample_start_idx
self.fps = fps
self.transform = T.Compose([
#T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)
T.Resize(min(height, width), antialias=False),
T.CenterCrop([height, width])
])
def build_json(self, json_data):
extended_data = []
for data in json_data['data']:
for nested_data in data['data']:
self.build_json_dict(
data,
nested_data,
extended_data
)
json_data = extended_data
return json_data
def build_json_dict(self, data, nested_data, extended_data):
clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None
extended_data.append({
self.vid_data_key: data[self.vid_data_key],
'frame_index': nested_data['frame_index'],
'prompt': nested_data['prompt'],
'clip_path': clip_path
})
def load_from_json(self, path, json_data):
try:
with open(path) as jpath:
print(f"Loading JSON from {path}")
json_data = json.load(jpath)
return self.build_json(json_data)
except:
traceback.print_exc()
self.train_data = []
print("Non-existant JSON path. Skipping.")
def validate_json(self, base_path, path):
return os.path.exists(f"{base_path}/{path}")
def get_frame_buckets(self, vr):
_, h, w = vr[0].shape
width, height = sensible_buckets(self.width, self.height, h, w)
resize = T.transforms.Resize((height, width), antialias=True)
return resize
def train_data_batch(self, index):
vid_data = self.train_data[index]
# Get video prompt
prompt = vid_data['prompt']
# If we are training on individual clips.
if 'clip_path' in self.train_data[index] and \
self.train_data[index]['clip_path'] is not None:
clip_path = vid_data['clip_path']
else:
clip_path = vid_data[self.vid_data_key]
# Get the frame of the current index.
self.sample_start_idx = vid_data['frame_index']
cache_path = os.path.splitext(clip_path)[0] + '.pt'
if self.cache_latents and os.path.exists(cache_path):
return torch.load(cache_path, map_location='cpu')
vr = decord.VideoReader(clip_path)
video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)
prompt_ids = get_prompt_ids(prompt, self.tokenizer)
example = {
"pixel_values": normalize_input(video),
"prompt_ids": prompt_ids,
"text_prompt": prompt,
'dataset': self.__getname__(),
'cache_path': cache_path,
}
mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())
example['mask'] = mask | example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy()) | 2 | 2023-12-07 08:26:29+00:00 | 4k |
SciPhi-AI/agent-search | agent_search/app/server.py | [
{
"identifier": "load_config",
"path": "agent_search/core/utils.py",
"snippet": "def load_config(config_dir: Optional[str] = None) -> configparser.ConfigParser:\n \"\"\"Load the configuration file.\"\"\"\n config = configparser.ConfigParser()\n if not config_dir:\n config_dir = get_data_path()\n config.read(os.path.join(config_dir, \"config.ini\"))\n return config"
},
{
"identifier": "select_top_urls",
"path": "agent_search/core/utils.py",
"snippet": "def select_top_urls(\n ordered_points: List[AgentSearchResult],\n max_urls: int = 10,\n url_contains: Optional[List[str]] = None,\n) -> List[str]:\n \"\"\"A function to return the top unique URLs from the given poitns results.\"\"\"\n if not url_contains:\n url_contains = []\n\n top_urls = set([])\n for point in ordered_points:\n url = point.url\n if url in top_urls:\n continue\n url_contains_match = False if url_contains else True\n for url_contain in url_contains:\n if url_contain in url:\n url_contains_match = True\n break\n if not url_contains_match:\n continue\n top_urls.add(point.url)\n if len(top_urls) >= max_urls:\n break\n\n return list(top_urls)"
},
{
"identifier": "WebSearchEngine",
"path": "agent_search/search/base.py",
"snippet": "class WebSearchEngine:\n \"\"\"A simple search client for the OpenSearch collection\"\"\"\n\n def __init__(\n self,\n ):\n try:\n import psycopg2\n except ImportError as e:\n raise ImportError(\n f\"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance.\"\n )\n\n # Load config\n self.config = load_config()[\"agent_search\"]\n\n # Load Postgres\n logger.info(\n f\"Connecting to Postgres database at: {self.config['postgres_db']}.\"\n )\n\n # Load qdrant client\n logger.info(\n f\"Connecting to collection: {self.config['qdrant_collection_name']}\"\n )\n self.qdrant_collection_name = self.config[\"qdrant_collection_name\"]\n self.client = QdrantClient(\n self.config[\"qdrant_host\"],\n grpc_port=self.config[\"qdrant_grpc_port\"],\n prefer_grpc=True,\n )\n if not self.client.get_collection(self.qdrant_collection_name):\n raise ValueError(\n f\"Must have a Qdrant collection with the name {self.qdrant_collection_name}.\"\n )\n\n # Load embedding model\n self.embedding_model = AutoModel.from_pretrained(\n self.config[\"embedding_model_name\"], trust_remote_code=True\n )\n\n self.pagerank_rerank_module = self.config[\"pagerank_rerank_module\"]\n pagerank_file_path = self.config[\"pagerank_file_path\"]\n if self.pagerank_rerank_module:\n if not pagerank_file_path:\n # Simulating reading from a CSV file\n pagerank_file_path = os.path.join(\n get_data_path(), \"domain_ranks.csv\"\n )\n\n if not os.path.exists(pagerank_file_path):\n raise ValueError(\n \"Must have a pagerank file at the config specified path when using pagerank_rerank_module\"\n )\n\n self.pagerank_importance = float(\n self.config[\"pagerank_importance\"]\n )\n self.domain_to_rank_map = {}\n\n with open(pagerank_file_path, newline=\"\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n domain = row[\"Domain\"]\n rank = float(row[\"Open Page Rank\"])\n self.domain_to_rank_map[domain] = rank\n\n def get_query_vector(self, query: str):\n \"\"\"Gets the query vector for the given query\"\"\"\n\n query_vector = self.embedding_model.encode(query)\n return query_vector\n\n def similarity_search(\n self,\n query_vector: np.ndarray,\n limit: int = 100,\n ):\n \"\"\"Searches the collection for the given query and returns the top 'limit' results\"\"\"\n\n points = self.client.search(\n collection_name=self.qdrant_collection_name,\n query_vector=query_vector,\n limit=limit,\n )\n\n results = []\n for point in points:\n try:\n results.append(\n AgentSearchResult(\n score=point.score,\n text=point.payload[\"text\"],\n title=None,\n url=point.payload[\"url\"],\n metadata={},\n )\n )\n except Exception as e:\n logger.error(f\"Error appending point {point} with {e}\")\n return results\n\n # Example of batch processing\n def execute_batch_query(self, urls, batch_size=20):\n results = []\n try:\n with psycopg2.connect(\n dbname=self.config[\"postgres_db\"],\n user=self.config[\"postgres_user\"],\n password=self.config[\"postgres_password\"],\n host=self.config[\"postgres_host\"],\n options=\"-c client_encoding=UTF8\",\n ) as conn:\n with conn.cursor() as cur:\n for i in range(0, len(urls), batch_size):\n batch_urls = urls[i : i + batch_size]\n logger.info(\n f\"Executing batch query for URLs: {batch_urls[0:2]}\"\n )\n query = f\"SELECT url, title, metadata, dataset, text_chunks, embeddings FROM {self.config['postgres_table_name']} WHERE url = ANY(%s)\"\n cur.execute(query, (batch_urls,))\n batch_results = cur.fetchall()\n results.extend(batch_results)\n except psycopg2.DatabaseError as e:\n logger.error(f\"Database error: {e}\")\n except Exception as e:\n logger.error(f\"Error in execute_batch_query: {e}\")\n return results\n\n def hierarchical_similarity_reranking(\n self,\n query_vector: np.ndarray,\n urls: List[str],\n limit: int = 100,\n ) -> List[AgentSearchResult]:\n \"\"\"Hierarchical URL search to find the most similar text chunk for the given query and URLs\"\"\"\n results = self.execute_batch_query(urls)\n # List to store the results along with their similarity scores\n similarity_results = []\n\n # Iterate over each result to find the most similar text chunk\n for result in results:\n (\n url,\n title,\n metadata,\n dataset,\n text_chunks_str,\n embeddings_binary,\n ) = result\n # deserialize the embeddings and text chunks\n embeddings = np.frombuffer(\n embeddings_binary, dtype=np.float32\n ).reshape(-1, 768)\n text_chunks = json.loads(text_chunks_str)\n max_similarity = -1e9\n most_similar_chunk = None\n\n # Iterate over each embedding to find the one with maximum cosine similarity\n for chunk, embedding in zip(text_chunks, embeddings):\n similarity = cosine_similarity(\n np.array(query_vector), np.array(embedding)\n )\n if similarity > max_similarity:\n max_similarity = similarity\n most_similar_chunk = chunk\n\n # Store the most similar chunk and its similarity score\n similarity_results.append(\n AgentSearchResult(\n score=max_similarity,\n url=url,\n title=title,\n metadata=json.loads(metadata),\n dataset=dataset,\n text=most_similar_chunk,\n ),\n )\n\n # Sort the results based on similarity score in descending order\n similarity_results.sort(key=lambda x: x.score, reverse=True)\n return similarity_results[:limit]\n\n def pagerank_reranking(\n self,\n similarity_results: List[AgentSearchResult],\n limit: int = 100,\n ) -> List[AgentSearchResult]:\n \"\"\"Reranks the results based on the PageRank score of the domain\"\"\"\n if not self.pagerank_rerank_module:\n raise Exception(\n \"PageRank reranking module is not enabled. Please set pagerank_rerank_module=True while initializing the WebSearchEngine client.\"\n )\n # List to store the results along with their PageRank scores\n pagerank_results = []\n\n # Iterate over each result to find the PageRank score of the domain\n for result in similarity_results:\n pagerank_score = 0\n try:\n domain = result.url.split(\"/\")[2]\n pagerank_score = self.domain_to_rank_map.get(domain, 0)\n except Exception as e:\n logger.info(f\"Error {e}: Found for URL: {result.url}\")\n reweighted_score = (\n self.pagerank_importance * pagerank_score / 10.0\n + (1 - self.pagerank_importance) * result.score\n )\n pagerank_results.append(\n AgentSearchResult(\n score=reweighted_score,\n url=result.url,\n title=result.title,\n metadata=result.metadata,\n dataset=result.dataset,\n text=result.text,\n )\n )\n\n # Sort the results based on PageRank score in descending order\n pagerank_results.sort(key=lambda x: x.score, reverse=True)\n return pagerank_results[:limit]"
}
] | import logging
import time
import uvicorn
from typing import Optional
from pydantic import BaseModel
from agent_search.core.utils import load_config, select_top_urls
from agent_search.search import WebSearchEngine
from fastapi import FastAPI, HTTPException | 3,005 |
# Attempt to import uvicorn and FastAPI
try:
except ImportError as e:
raise ImportError(
f"Error: {e}, Note - both uvicorn and FastAPI are required to run the server."
)
logger = logging.getLogger(__name__)
class SearchServer:
def __init__(self):
self.client = WebSearchEngine()
def run(
self,
query="What is a lagrangian?",
limit_broad_results=1_000,
limit_deduped_url_results=50,
limit_hierarchical_url_results=50,
limit_final_pagerank_results=20,
url_contains_filter=None,
):
"""Run a search query using the WebSearchEngine client"""
query_vector = self.client.get_query_vector(query)
broad_results = self.client.similarity_search(
query_vector=query_vector, limit=limit_broad_results
)
if not url_contains_filter:
url_contains_filter = []
deduped_url_results = select_top_urls(
broad_results,
max_urls=limit_deduped_url_results,
url_contains=url_contains_filter,
)
hierarchical_url_results = (
self.client.hierarchical_similarity_reranking(
query_vector=query_vector,
urls=deduped_url_results,
limit=limit_hierarchical_url_results,
)
)
pagerank_reranked_results = self.client.pagerank_reranking(
hierarchical_url_results
)[:limit_final_pagerank_results]
return pagerank_reranked_results
class SearchQuery(BaseModel):
"""A search query data model"""
query: str
limit_broad_results: Optional[int] = 1_000
limit_deduped_url_results: Optional[int] = 100
limit_hierarchical_url_results: Optional[int] = 25
limit_final_pagerank_results: Optional[int] = 10
app = FastAPI()
search_runner = SearchServer()
def check_limits(query: SearchQuery):
"""Check if the limit parameters exceed three times their default values"""
if query.limit_broad_results > 3 * 1_000:
raise ValueError(
"limit_broad_results exceeds 3 times its default value"
)
if query.limit_deduped_url_results > 3 * 100:
raise ValueError(
"limit_deduped_url_results exceeds 3 times its default value"
)
if query.limit_hierarchical_url_results > 3 * 25:
raise ValueError(
"limit_hierarchical_url_results exceeds 3 times its default value"
)
if query.limit_final_pagerank_results > 3 * 10:
raise ValueError(
"limit_final_pagerank_results exceeds 3 times its default value"
)
@app.post("/search")
def run_search(query: SearchQuery):
"""Run a search query"""
try:
check_limits(query)
results = search_runner.run(
query=query.query,
limit_broad_results=query.limit_broad_results,
limit_deduped_url_results=query.limit_deduped_url_results,
limit_hierarchical_url_results=query.limit_hierarchical_url_results,
limit_final_pagerank_results=query.limit_final_pagerank_results,
)
return {"results": results}
except ValueError as e:
logger.error(f"ValueError {e} = ", e)
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Exception {e} = ", e)
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
def health_check():
"""Health check endpoint"""
return {"status": "ok"}
if __name__ == "__main__":
|
# Attempt to import uvicorn and FastAPI
try:
except ImportError as e:
raise ImportError(
f"Error: {e}, Note - both uvicorn and FastAPI are required to run the server."
)
logger = logging.getLogger(__name__)
class SearchServer:
def __init__(self):
self.client = WebSearchEngine()
def run(
self,
query="What is a lagrangian?",
limit_broad_results=1_000,
limit_deduped_url_results=50,
limit_hierarchical_url_results=50,
limit_final_pagerank_results=20,
url_contains_filter=None,
):
"""Run a search query using the WebSearchEngine client"""
query_vector = self.client.get_query_vector(query)
broad_results = self.client.similarity_search(
query_vector=query_vector, limit=limit_broad_results
)
if not url_contains_filter:
url_contains_filter = []
deduped_url_results = select_top_urls(
broad_results,
max_urls=limit_deduped_url_results,
url_contains=url_contains_filter,
)
hierarchical_url_results = (
self.client.hierarchical_similarity_reranking(
query_vector=query_vector,
urls=deduped_url_results,
limit=limit_hierarchical_url_results,
)
)
pagerank_reranked_results = self.client.pagerank_reranking(
hierarchical_url_results
)[:limit_final_pagerank_results]
return pagerank_reranked_results
class SearchQuery(BaseModel):
"""A search query data model"""
query: str
limit_broad_results: Optional[int] = 1_000
limit_deduped_url_results: Optional[int] = 100
limit_hierarchical_url_results: Optional[int] = 25
limit_final_pagerank_results: Optional[int] = 10
app = FastAPI()
search_runner = SearchServer()
def check_limits(query: SearchQuery):
"""Check if the limit parameters exceed three times their default values"""
if query.limit_broad_results > 3 * 1_000:
raise ValueError(
"limit_broad_results exceeds 3 times its default value"
)
if query.limit_deduped_url_results > 3 * 100:
raise ValueError(
"limit_deduped_url_results exceeds 3 times its default value"
)
if query.limit_hierarchical_url_results > 3 * 25:
raise ValueError(
"limit_hierarchical_url_results exceeds 3 times its default value"
)
if query.limit_final_pagerank_results > 3 * 10:
raise ValueError(
"limit_final_pagerank_results exceeds 3 times its default value"
)
@app.post("/search")
def run_search(query: SearchQuery):
"""Run a search query"""
try:
check_limits(query)
results = search_runner.run(
query=query.query,
limit_broad_results=query.limit_broad_results,
limit_deduped_url_results=query.limit_deduped_url_results,
limit_hierarchical_url_results=query.limit_hierarchical_url_results,
limit_final_pagerank_results=query.limit_final_pagerank_results,
)
return {"results": results}
except ValueError as e:
logger.error(f"ValueError {e} = ", e)
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Exception {e} = ", e)
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
def health_check():
"""Health check endpoint"""
return {"status": "ok"}
if __name__ == "__main__": | config = load_config()["server"] | 0 | 2023-12-11 17:41:03+00:00 | 4k |
yohanshin/WHAM | lib/models/layers/modules.py | [
{
"identifier": "constants",
"path": "configs/constants.py",
"snippet": "IMG_FEAT_DIM = {\n 'resnet': 2048,\n 'vit': 1024\n}\nN_JOINTS = 17\n PARSED_DATA = f'{root}/parsed_data'\n THREEDPW_PTH = f'{root}/3DPW'\n RICH_PTH = f'{root}/RICH'\n EMDB_PTH = f'{root}/EMDB'\n NUM_JOINTS = N_JOINTS\n H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\n H36M_TO_J14 = H36M_TO_J17[:14]\n J17_TO_H36M = [14, 3, 4, 5, 2, 1, 0, 15, 12, 16, 13, 9, 10, 11, 8, 7, 6]\n COCO_AUG_DICT = f'{root}/body_models/coco_aug_dict.pth'\n TREE = [[5, 6], 0, 0, 1, 2, -1, -1, 5, 6, 7, 8, -1, -1, 11, 12, 13, 14, 15, 15, 15, 16, 16, 16]\n S_BIAS = 1e-1\n S_JITTERING = 5e-2\n S_PEAK = 3e-1\n S_PEAK_MASK = 5e-3\n S_MASK = 0.03\n MAIN_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] # reduced_joints\n FLDR = f'{root}/body_models/smpl/'\n SMPLX2SMPL = f'{root}/body_models/smplx2smpl.pkl'\n FACES = f'{root}/body_models/smpl_faces.npy'\n MEAN_PARAMS = f'{root}/body_models/smpl_mean_params.npz'\n JOINTS_REGRESSOR_WHAM = f'{root}/body_models/J_regressor_wham.npy'\n JOINTS_REGRESSOR_H36M = f'{root}/body_models/J_regressor_h36m.npy'\n JOINTS_REGRESSOR_EXTRA = f'{root}/body_models/J_regressor_extra.npy'\n JOINTS_REGRESSOR_FEET = f'{root}/body_models/J_regressor_feet.npy'\n PARENTS = torch.tensor([\n -1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21])\nclass PATHS:\nclass KEYPOINTS:\nclass BMODEL:"
},
{
"identifier": "rollout_global_motion",
"path": "lib/models/layers/utils.py",
"snippet": "def rollout_global_motion(root_r, root_v, init_trans=None):\n b, f = root_v.shape[:2]\n root = transforms.rotation_6d_to_matrix(root_r[:])\n vel_world = (root[:, :-1] @ root_v.unsqueeze(-1)).squeeze(-1)\n trans = torch.cumsum(vel_world, dim=1)\n \n if init_trans is not None: trans = trans + init_trans\n return root[:, 1:], trans"
},
{
"identifier": "axis_angle_to_matrix",
"path": "lib/utils/transforms.py",
"snippet": "def axis_angle_to_matrix(axis_angle: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert rotations given as axis/angle to rotation matrices.\n\n Args:\n axis_angle: Rotations given as a vector in axis angle form,\n as a tensor of shape (..., 3), where the magnitude is\n the angle turned anticlockwise in radians around the\n vector's direction.\n\n Returns:\n Rotation matrices as tensor of shape (..., 3, 3).\n \"\"\"\n return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))"
}
] | import torch
import numpy as np
from torch import nn
from configs import constants as _C
from .utils import rollout_global_motion
from lib.utils.transforms import axis_angle_to_matrix | 3,193 | pred_list = [init[..., :self.n_joints * 3]]
motion_context_list = []
for i in range(self.f):
(pred_kp3d, ), motion_context, h0 = self.regressor(x[:, [i]], pred_list[-1:], h0)
motion_context_list.append(motion_context)
pred_list.append(pred_kp3d)
pred_kp3d = torch.cat(pred_list[1:], dim=1).view(self.b, self.f, -1, 3)
motion_context = torch.cat(motion_context_list, dim=1)
# Merge 3D keypoints with motion context
motion_context = torch.cat((motion_context, pred_kp3d.reshape(self.b, self.f, -1)), dim=-1)
return pred_kp3d, motion_context
class TrajectoryDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
# Trajectory regressor
self.regressor = Regressor(
d_embed, d_embed, [3, 6], 12, rnn_type, n_layers, )
def forward(self, x, root, cam_a, h0=None):
""" Forward pass of trajectory decoder.
"""
b, f = x.shape[:2]
pred_root_list, pred_vel_list = [root[:, :1]], []
for i in range(f):
# Global coordinate estimation
(pred_rootv, pred_rootr), _, h0 = self.regressor(
x[:, [i]], [pred_root_list[-1], cam_a[:, [i]]], h0)
pred_root_list.append(pred_rootr)
pred_vel_list.append(pred_rootv)
pred_root = torch.cat(pred_root_list, dim=1).view(b, f + 1, -1)
pred_vel = torch.cat(pred_vel_list, dim=1).view(b, f, -1)
return pred_root, pred_vel
class MotionDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
self.n_pose = 24
# SMPL pose initialization
self.neural_init = NeuralInitialization(len(_C.BMODEL.MAIN_JOINTS) * 6, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [self.n_pose * 6, 10, 3, 4], self.n_pose * 6, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion decoder.
"""
b, f = x.shape[:2]
h0 = self.neural_init(init[:, :, _C.BMODEL.MAIN_JOINTS].reshape(b, 1, -1))
# Recursive prediction of SMPL parameters
pred_pose_list = [init.reshape(b, 1, -1)]
pred_shape_list, pred_cam_list, pred_contact_list = [], [], []
for i in range(f):
# Camera coordinate estimation
(pred_pose, pred_shape, pred_cam, pred_contact), _, h0 = self.regressor(x[:, [i]], pred_pose_list[-1:], h0)
pred_pose_list.append(pred_pose)
pred_shape_list.append(pred_shape)
pred_cam_list.append(pred_cam)
pred_contact_list.append(pred_contact)
pred_pose = torch.cat(pred_pose_list[1:], dim=1).view(b, f, -1)
pred_shape = torch.cat(pred_shape_list, dim=1).view(b, f, -1)
pred_cam = torch.cat(pred_cam_list, dim=1).view(b, f, -1)
pred_contact = torch.cat(pred_contact_list, dim=1).view(b, f, -1)
return pred_pose, pred_shape, pred_cam, pred_contact
class TrajectoryRefiner(nn.Module):
def __init__(self,
d_embed,
d_hidden,
rnn_type,
n_layers):
super().__init__()
d_input = d_embed + 12
self.refiner = Regressor(
d_input, d_hidden, [6, 3], 9, rnn_type, n_layers)
def forward(self, context, pred_vel, output, cam_angvel, return_y_up):
b, f = context.shape[:2]
# Register values
pred_pose = output['pose'].clone().detach()
pred_root = output['poses_root_r6d'].clone().detach()
feet = output['feet'].clone().detach()
contact = output['contact'].clone().detach()
feet_vel = torch.cat((torch.zeros_like(feet[:, :1]), feet[:, 1:] - feet[:, :-1]), dim=1) * 30 # Normalize to 30 times
feet = (feet_vel * contact.unsqueeze(-1)).reshape(b, f, -1) # Velocity input
inpt_feat = torch.cat([context, feet], dim=-1)
(delta_root, delta_vel), _, _ = self.refiner(inpt_feat, [pred_root[:, 1:], pred_vel], h0=None)
pred_root[:, 1:] = pred_root[:, 1:] + delta_root
pred_vel = pred_vel + delta_vel
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Regressor(nn.Module):
def __init__(self, in_dim, hid_dim, out_dims, init_dim, layer='LSTM', n_layers=2, n_iters=1):
super().__init__()
self.n_outs = len(out_dims)
self.rnn = getattr(nn, layer.upper())(
in_dim + init_dim, hid_dim, n_layers,
bidirectional=False, batch_first=True, dropout=0.3)
for i, out_dim in enumerate(out_dims):
setattr(self, 'declayer%d'%i, nn.Linear(hid_dim, out_dim))
nn.init.xavier_uniform_(getattr(self, 'declayer%d'%i).weight, gain=0.01)
def forward(self, x, inits, h0):
xc = torch.cat([x, *inits], dim=-1)
xc, h0 = self.rnn(xc, h0)
preds = []
for j in range(self.n_outs):
out = getattr(self, 'declayer%d'%j)(xc)
preds.append(out)
return preds, xc, h0
class NeuralInitialization(nn.Module):
def __init__(self, in_dim, hid_dim, layer, n_layers):
super().__init__()
out_dim = hid_dim
self.n_layers = n_layers
self.num_inits = int(layer.upper() == 'LSTM') + 1
out_dim *= self.num_inits * n_layers
self.linear1 = nn.Linear(in_dim, hid_dim)
self.linear2 = nn.Linear(hid_dim, hid_dim * self.n_layers)
self.linear3 = nn.Linear(hid_dim * self.n_layers, out_dim)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x):
b = x.shape[0]
out = self.linear3(self.relu2(self.linear2(self.relu1(self.linear1(x)))))
out = out.view(b, self.num_inits, self.n_layers, -1).permute(1, 2, 0, 3).contiguous()
if self.num_inits == 2:
return tuple([_ for _ in out])
return out[0]
class Integrator(nn.Module):
def __init__(self, in_channel, out_channel, hid_channel=1024):
super().__init__()
self.layer1 = nn.Linear(in_channel, hid_channel)
self.relu1 = nn.ReLU()
self.dr1 = nn.Dropout(0.1)
self.layer2 = nn.Linear(hid_channel, hid_channel)
self.relu2 = nn.ReLU()
self.dr2 = nn.Dropout(0.1)
self.layer3 = nn.Linear(hid_channel, out_channel)
def forward(self, x, feat):
res = x
mask = (feat != 0).all(dim=-1).all(dim=-1)
out = torch.cat((x, feat), dim=-1)
out = self.layer1(out)
out = self.relu1(out)
out = self.dr1(out)
out = self.layer2(out)
out = self.relu2(out)
out = self.dr2(out)
out = self.layer3(out)
out[mask] = out[mask] + res[mask]
return out
class MotionEncoder(nn.Module):
def __init__(self,
in_dim,
d_embed,
pose_dr,
rnn_type,
n_layers,
n_joints):
super().__init__()
self.n_joints = n_joints
self.embed_layer = nn.Linear(in_dim, d_embed)
self.pos_drop = nn.Dropout(pose_dr)
# Keypoints initializer
self.neural_init = NeuralInitialization(n_joints * 3 + in_dim, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [n_joints * 3], n_joints * 3, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion encoder.
"""
self.b, self.f = x.shape[:2]
x = self.embed_layer(x.reshape(self.b, self.f, -1))
x = self.pos_drop(x)
h0 = self.neural_init(init)
pred_list = [init[..., :self.n_joints * 3]]
motion_context_list = []
for i in range(self.f):
(pred_kp3d, ), motion_context, h0 = self.regressor(x[:, [i]], pred_list[-1:], h0)
motion_context_list.append(motion_context)
pred_list.append(pred_kp3d)
pred_kp3d = torch.cat(pred_list[1:], dim=1).view(self.b, self.f, -1, 3)
motion_context = torch.cat(motion_context_list, dim=1)
# Merge 3D keypoints with motion context
motion_context = torch.cat((motion_context, pred_kp3d.reshape(self.b, self.f, -1)), dim=-1)
return pred_kp3d, motion_context
class TrajectoryDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
# Trajectory regressor
self.regressor = Regressor(
d_embed, d_embed, [3, 6], 12, rnn_type, n_layers, )
def forward(self, x, root, cam_a, h0=None):
""" Forward pass of trajectory decoder.
"""
b, f = x.shape[:2]
pred_root_list, pred_vel_list = [root[:, :1]], []
for i in range(f):
# Global coordinate estimation
(pred_rootv, pred_rootr), _, h0 = self.regressor(
x[:, [i]], [pred_root_list[-1], cam_a[:, [i]]], h0)
pred_root_list.append(pred_rootr)
pred_vel_list.append(pred_rootv)
pred_root = torch.cat(pred_root_list, dim=1).view(b, f + 1, -1)
pred_vel = torch.cat(pred_vel_list, dim=1).view(b, f, -1)
return pred_root, pred_vel
class MotionDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
self.n_pose = 24
# SMPL pose initialization
self.neural_init = NeuralInitialization(len(_C.BMODEL.MAIN_JOINTS) * 6, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [self.n_pose * 6, 10, 3, 4], self.n_pose * 6, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion decoder.
"""
b, f = x.shape[:2]
h0 = self.neural_init(init[:, :, _C.BMODEL.MAIN_JOINTS].reshape(b, 1, -1))
# Recursive prediction of SMPL parameters
pred_pose_list = [init.reshape(b, 1, -1)]
pred_shape_list, pred_cam_list, pred_contact_list = [], [], []
for i in range(f):
# Camera coordinate estimation
(pred_pose, pred_shape, pred_cam, pred_contact), _, h0 = self.regressor(x[:, [i]], pred_pose_list[-1:], h0)
pred_pose_list.append(pred_pose)
pred_shape_list.append(pred_shape)
pred_cam_list.append(pred_cam)
pred_contact_list.append(pred_contact)
pred_pose = torch.cat(pred_pose_list[1:], dim=1).view(b, f, -1)
pred_shape = torch.cat(pred_shape_list, dim=1).view(b, f, -1)
pred_cam = torch.cat(pred_cam_list, dim=1).view(b, f, -1)
pred_contact = torch.cat(pred_contact_list, dim=1).view(b, f, -1)
return pred_pose, pred_shape, pred_cam, pred_contact
class TrajectoryRefiner(nn.Module):
def __init__(self,
d_embed,
d_hidden,
rnn_type,
n_layers):
super().__init__()
d_input = d_embed + 12
self.refiner = Regressor(
d_input, d_hidden, [6, 3], 9, rnn_type, n_layers)
def forward(self, context, pred_vel, output, cam_angvel, return_y_up):
b, f = context.shape[:2]
# Register values
pred_pose = output['pose'].clone().detach()
pred_root = output['poses_root_r6d'].clone().detach()
feet = output['feet'].clone().detach()
contact = output['contact'].clone().detach()
feet_vel = torch.cat((torch.zeros_like(feet[:, :1]), feet[:, 1:] - feet[:, :-1]), dim=1) * 30 # Normalize to 30 times
feet = (feet_vel * contact.unsqueeze(-1)).reshape(b, f, -1) # Velocity input
inpt_feat = torch.cat([context, feet], dim=-1)
(delta_root, delta_vel), _, _ = self.refiner(inpt_feat, [pred_root[:, 1:], pred_vel], h0=None)
pred_root[:, 1:] = pred_root[:, 1:] + delta_root
pred_vel = pred_vel + delta_vel
| root_world, trans_world = rollout_global_motion(pred_root, pred_vel) | 1 | 2023-12-08 09:17:54+00:00 | 4k |
Pointcept/PointTransformerV3 | serialization/default.py | [
{
"identifier": "xyz2key",
"path": "serialization/z_order.py",
"snippet": "def xyz2key(self, x, y, z, depth):\r\n key = torch.zeros_like(x)\r\n for i in range(depth):\r\n mask = 1 << i\r\n key = (\r\n key\r\n | ((x & mask) << (2 * i + 2))\r\n | ((y & mask) << (2 * i + 1))\r\n | ((z & mask) << (2 * i + 0))\r\n )\r\n return key\r"
},
{
"identifier": "key2xyz",
"path": "serialization/z_order.py",
"snippet": "def key2xyz(self, key, depth):\r\n x = torch.zeros_like(key)\r\n y = torch.zeros_like(key)\r\n z = torch.zeros_like(key)\r\n for i in range(depth):\r\n x = x | ((key & (1 << (3 * i + 2))) >> (2 * i + 2))\r\n y = y | ((key & (1 << (3 * i + 1))) >> (2 * i + 1))\r\n z = z | ((key & (1 << (3 * i + 0))) >> (2 * i + 0))\r\n return x, y, z\r"
},
{
"identifier": "encode",
"path": "serialization/hilbert.py",
"snippet": "def encode(locs, num_dims, num_bits):\r\n \"\"\"Decode an array of locations in a hypercube into a Hilbert integer.\r\n\r\n This is a vectorized-ish version of the Hilbert curve implementation by John\r\n Skilling as described in:\r\n\r\n Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference\r\n Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.\r\n\r\n Params:\r\n -------\r\n locs - An ndarray of locations in a hypercube of num_dims dimensions, in\r\n which each dimension runs from 0 to 2**num_bits-1. The shape can\r\n be arbitrary, as long as the last dimension of the same has size\r\n num_dims.\r\n\r\n num_dims - The dimensionality of the hypercube. Integer.\r\n\r\n num_bits - The number of bits for each dimension. Integer.\r\n\r\n Returns:\r\n --------\r\n The output is an ndarray of uint64 integers with the same shape as the\r\n input, excluding the last dimension, which needs to be num_dims.\r\n \"\"\"\r\n\r\n # Keep around the original shape for later.\r\n orig_shape = locs.shape\r\n bitpack_mask = 1 << torch.arange(0, 8).to(locs.device)\r\n bitpack_mask_rev = bitpack_mask.flip(-1)\r\n\r\n if orig_shape[-1] != num_dims:\r\n raise ValueError(\r\n \"\"\"\r\n The shape of locs was surprising in that the last dimension was of size\r\n %d, but num_dims=%d. These need to be equal.\r\n \"\"\"\r\n % (orig_shape[-1], num_dims)\r\n )\r\n\r\n if num_dims * num_bits > 63:\r\n raise ValueError(\r\n \"\"\"\r\n num_dims=%d and num_bits=%d for %d bits total, which can't be encoded\r\n into a int64. Are you sure you need that many points on your Hilbert\r\n curve?\r\n \"\"\"\r\n % (num_dims, num_bits, num_dims * num_bits)\r\n )\r\n\r\n # Treat the location integers as 64-bit unsigned and then split them up into\r\n # a sequence of uint8s. Preserve the association by dimension.\r\n locs_uint8 = locs.long().view(torch.uint8).reshape((-1, num_dims, 8)).flip(-1)\r\n\r\n # Now turn these into bits and truncate to num_bits.\r\n gray = (\r\n locs_uint8.unsqueeze(-1)\r\n .bitwise_and(bitpack_mask_rev)\r\n .ne(0)\r\n .byte()\r\n .flatten(-2, -1)[..., -num_bits:]\r\n )\r\n\r\n # Run the decoding process the other way.\r\n # Iterate forwards through the bits.\r\n for bit in range(0, num_bits):\r\n # Iterate forwards through the dimensions.\r\n for dim in range(0, num_dims):\r\n # Identify which ones have this bit active.\r\n mask = gray[:, dim, bit]\r\n\r\n # Where this bit is on, invert the 0 dimension for lower bits.\r\n gray[:, 0, bit + 1 :] = torch.logical_xor(\r\n gray[:, 0, bit + 1 :], mask[:, None]\r\n )\r\n\r\n # Where the bit is off, exchange the lower bits with the 0 dimension.\r\n to_flip = torch.logical_and(\r\n torch.logical_not(mask[:, None]).repeat(1, gray.shape[2] - bit - 1),\r\n torch.logical_xor(gray[:, 0, bit + 1 :], gray[:, dim, bit + 1 :]),\r\n )\r\n gray[:, dim, bit + 1 :] = torch.logical_xor(\r\n gray[:, dim, bit + 1 :], to_flip\r\n )\r\n gray[:, 0, bit + 1 :] = torch.logical_xor(gray[:, 0, bit + 1 :], to_flip)\r\n\r\n # Now flatten out.\r\n gray = gray.swapaxes(1, 2).reshape((-1, num_bits * num_dims))\r\n\r\n # Convert Gray back to binary.\r\n hh_bin = gray2binary(gray)\r\n\r\n # Pad back out to 64 bits.\r\n extra_dims = 64 - num_bits * num_dims\r\n padded = torch.nn.functional.pad(hh_bin, (extra_dims, 0), \"constant\", 0)\r\n\r\n # Convert binary values into uint8s.\r\n hh_uint8 = (\r\n (padded.flip(-1).reshape((-1, 8, 8)) * bitpack_mask)\r\n .sum(2)\r\n .squeeze()\r\n .type(torch.uint8)\r\n )\r\n\r\n # Convert uint8s into uint64s.\r\n hh_uint64 = hh_uint8.view(torch.int64).squeeze()\r\n\r\n return hh_uint64\r"
},
{
"identifier": "decode",
"path": "serialization/hilbert.py",
"snippet": "def decode(hilberts, num_dims, num_bits):\r\n \"\"\"Decode an array of Hilbert integers into locations in a hypercube.\r\n\r\n This is a vectorized-ish version of the Hilbert curve implementation by John\r\n Skilling as described in:\r\n\r\n Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference\r\n Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.\r\n\r\n Params:\r\n -------\r\n hilberts - An ndarray of Hilbert integers. Must be an integer dtype and\r\n cannot have fewer bits than num_dims * num_bits.\r\n\r\n num_dims - The dimensionality of the hypercube. Integer.\r\n\r\n num_bits - The number of bits for each dimension. Integer.\r\n\r\n Returns:\r\n --------\r\n The output is an ndarray of unsigned integers with the same shape as hilberts\r\n but with an additional dimension of size num_dims.\r\n \"\"\"\r\n\r\n if num_dims * num_bits > 64:\r\n raise ValueError(\r\n \"\"\"\r\n num_dims=%d and num_bits=%d for %d bits total, which can't be encoded\r\n into a uint64. Are you sure you need that many points on your Hilbert\r\n curve?\r\n \"\"\"\r\n % (num_dims, num_bits)\r\n )\r\n\r\n # Handle the case where we got handed a naked integer.\r\n hilberts = torch.atleast_1d(hilberts)\r\n\r\n # Keep around the shape for later.\r\n orig_shape = hilberts.shape\r\n bitpack_mask = 2 ** torch.arange(0, 8).to(hilberts.device)\r\n bitpack_mask_rev = bitpack_mask.flip(-1)\r\n\r\n # Treat each of the hilberts as a s equence of eight uint8.\r\n # This treats all of the inputs as uint64 and makes things uniform.\r\n hh_uint8 = (\r\n hilberts.ravel().type(torch.int64).view(torch.uint8).reshape((-1, 8)).flip(-1)\r\n )\r\n\r\n # Turn these lists of uints into lists of bits and then truncate to the size\r\n # we actually need for using Skilling's procedure.\r\n hh_bits = (\r\n hh_uint8.unsqueeze(-1)\r\n .bitwise_and(bitpack_mask_rev)\r\n .ne(0)\r\n .byte()\r\n .flatten(-2, -1)[:, -num_dims * num_bits :]\r\n )\r\n\r\n # Take the sequence of bits and Gray-code it.\r\n gray = binary2gray(hh_bits)\r\n\r\n # There has got to be a better way to do this.\r\n # I could index them differently, but the eventual packbits likes it this way.\r\n gray = gray.reshape((-1, num_bits, num_dims)).swapaxes(1, 2)\r\n\r\n # Iterate backwards through the bits.\r\n for bit in range(num_bits - 1, -1, -1):\r\n # Iterate backwards through the dimensions.\r\n for dim in range(num_dims - 1, -1, -1):\r\n # Identify which ones have this bit active.\r\n mask = gray[:, dim, bit]\r\n\r\n # Where this bit is on, invert the 0 dimension for lower bits.\r\n gray[:, 0, bit + 1 :] = torch.logical_xor(\r\n gray[:, 0, bit + 1 :], mask[:, None]\r\n )\r\n\r\n # Where the bit is off, exchange the lower bits with the 0 dimension.\r\n to_flip = torch.logical_and(\r\n torch.logical_not(mask[:, None]),\r\n torch.logical_xor(gray[:, 0, bit + 1 :], gray[:, dim, bit + 1 :]),\r\n )\r\n gray[:, dim, bit + 1 :] = torch.logical_xor(\r\n gray[:, dim, bit + 1 :], to_flip\r\n )\r\n gray[:, 0, bit + 1 :] = torch.logical_xor(gray[:, 0, bit + 1 :], to_flip)\r\n\r\n # Pad back out to 64 bits.\r\n extra_dims = 64 - num_bits\r\n padded = torch.nn.functional.pad(gray, (extra_dims, 0), \"constant\", 0)\r\n\r\n # Now chop these up into blocks of 8.\r\n locs_chopped = padded.flip(-1).reshape((-1, num_dims, 8, 8))\r\n\r\n # Take those blocks and turn them unto uint8s.\r\n # from IPython import embed; embed()\r\n locs_uint8 = (locs_chopped * bitpack_mask).sum(3).squeeze().type(torch.uint8)\r\n\r\n # Finally, treat these as uint64s.\r\n flat_locs = locs_uint8.view(torch.int64)\r\n\r\n # Return them in the expected shape.\r\n return flat_locs.reshape((*orig_shape, num_dims))\r"
}
] | import torch
from .z_order import xyz2key as z_order_encode_
from .z_order import key2xyz as z_order_decode_
from .hilbert import encode as hilbert_encode_
from .hilbert import decode as hilbert_decode_
| 3,088 |
@torch.inference_mode()
def encode(grid_coord, batch=None, depth=16, order="z"):
assert order in {"z", "z-trans", "hilbert", "hilbert-trans"}
if order == "z":
code = z_order_encode(grid_coord, depth=depth)
elif order == "z-trans":
code = z_order_encode(grid_coord[:, [1, 0, 2]], depth=depth)
elif order == "hilbert":
code = hilbert_encode(grid_coord, depth=depth)
elif order == "hilbert-trans":
code = hilbert_encode(grid_coord[:, [1, 0, 2]], depth=depth)
else:
raise NotImplementedError
if batch is not None:
batch = batch.long()
code = batch << depth * 3 | code
return code
@torch.inference_mode()
def decode(code, depth=16, order="z"):
assert order in {"z", "hilbert"}
batch = code >> depth * 3
code = code & ((1 << depth * 3) - 1)
if order == "z":
grid_coord = z_order_decode(code, depth=depth)
elif order == "hilbert":
grid_coord = hilbert_decode(code, depth=depth)
else:
raise NotImplementedError
return grid_coord, batch
def z_order_encode(grid_coord: torch.Tensor, depth: int = 16):
x, y, z = grid_coord[:, 0].long(), grid_coord[:, 1].long(), grid_coord[:, 2].long()
# we block the support to batch, maintain batched code in Point class
code = z_order_encode_(x, y, z, b=None, depth=depth)
return code
def z_order_decode(code: torch.Tensor, depth):
x, y, z = z_order_decode_(code, depth=depth)
grid_coord = torch.stack([x, y, z], dim=-1) # (N, 3)
return grid_coord
def hilbert_encode(grid_coord: torch.Tensor, depth: int = 16):
return hilbert_encode_(grid_coord, num_dims=3, num_bits=depth)
def hilbert_decode(code: torch.Tensor, depth: int = 16):
|
@torch.inference_mode()
def encode(grid_coord, batch=None, depth=16, order="z"):
assert order in {"z", "z-trans", "hilbert", "hilbert-trans"}
if order == "z":
code = z_order_encode(grid_coord, depth=depth)
elif order == "z-trans":
code = z_order_encode(grid_coord[:, [1, 0, 2]], depth=depth)
elif order == "hilbert":
code = hilbert_encode(grid_coord, depth=depth)
elif order == "hilbert-trans":
code = hilbert_encode(grid_coord[:, [1, 0, 2]], depth=depth)
else:
raise NotImplementedError
if batch is not None:
batch = batch.long()
code = batch << depth * 3 | code
return code
@torch.inference_mode()
def decode(code, depth=16, order="z"):
assert order in {"z", "hilbert"}
batch = code >> depth * 3
code = code & ((1 << depth * 3) - 1)
if order == "z":
grid_coord = z_order_decode(code, depth=depth)
elif order == "hilbert":
grid_coord = hilbert_decode(code, depth=depth)
else:
raise NotImplementedError
return grid_coord, batch
def z_order_encode(grid_coord: torch.Tensor, depth: int = 16):
x, y, z = grid_coord[:, 0].long(), grid_coord[:, 1].long(), grid_coord[:, 2].long()
# we block the support to batch, maintain batched code in Point class
code = z_order_encode_(x, y, z, b=None, depth=depth)
return code
def z_order_decode(code: torch.Tensor, depth):
x, y, z = z_order_decode_(code, depth=depth)
grid_coord = torch.stack([x, y, z], dim=-1) # (N, 3)
return grid_coord
def hilbert_encode(grid_coord: torch.Tensor, depth: int = 16):
return hilbert_encode_(grid_coord, num_dims=3, num_bits=depth)
def hilbert_decode(code: torch.Tensor, depth: int = 16):
| return hilbert_decode_(code, num_dims=3, num_bits=depth)
| 1 | 2023-12-06 08:32:43+00:00 | 4k |
octo-models/octo | octo/model/octo_model.py | [
{
"identifier": "TextProcessor",
"path": "octo/data/utils/text_processing.py",
"snippet": "class TextProcessor(ABC):\n \"\"\"\n Base class for text tokenization or text embedding.\n \"\"\"\n\n @abstractmethod\n def encode(self, strings: Sequence[str]):\n raise NotImplementedError"
},
{
"identifier": "ActionHead",
"path": "octo/model/components/action_heads.py",
"snippet": "class ActionHead(ABC):\n \"\"\"Action prediction modules that take in the transformer token outputs and predict actions.\n\n Each action head here does chunked action prediction: i.e. at every timestep,\n it tries to predict the next `pred_horizon` actions into the future from that timestep.\n Setting `pred_horizon=1` corresponds to the typical action prediction setup.\n \"\"\"\n\n @abstractmethod\n def loss(\n self,\n transformer_outputs: Dict[str, TokenGroup],\n actions: ArrayLike,\n pad_mask: ArrayLike,\n train: bool = True,\n ) -> Tuple[Array, Dict[str, Array]]:\n raise NotImplementedError\n\n @abstractmethod\n def predict_action(\n self,\n transformer_outputs: Dict[str, TokenGroup],\n argmax: bool = False,\n sample_shape: Tuple[int, ...] = (),\n rng: Optional[PRNGKey] = None,\n temperature: float = 1.0,\n train: bool = False,\n ) -> Array:\n \"\"\"Predict the action for the last timestep in the window. Returns shape\n (*sample_shape, batch_size, pred_horizon, action_dim).\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "OctoModule",
"path": "octo/model/octo_module.py",
"snippet": "class OctoModule(nn.Module):\n \"\"\"\n Bundles OctoTransformer with various heads (useful for keeping all parameters in one place).\n \"\"\"\n\n octo_transformer: OctoTransformer\n heads: Dict[str, nn.Module]\n\n def __call__(self, observations, tasks, pad_mask, train=True, verbose=False):\n \"\"\"Run transformer and the main method for all heads. Useful for init.\n\n Args:\n observations: A dictionary containing observation data\n where each element has shape (batch, horizon, *).\n tasks: A dictionary containing task data\n where each element has shape (batch, *).\n pad_mask: A boolean mask of shape (batch, horizon) where False indicates a padded timestep.\n train: Run in training mode\n verbose: If True, prints out the structure of the OctoTransformer (useful for debugging!)\n\n Returns:\n transformer_outputs: See OctoTransformer.__call__\n head_outputs: dictionary of outputs from heads {head_name: output}\n \"\"\"\n transformer_outputs = self.octo_transformer(\n observations, tasks, pad_mask, train=train, verbose=verbose\n )\n head_outputs = {}\n for head_name, head in self.heads.items():\n head_outputs[head_name] = head(transformer_outputs, train=train)\n return transformer_outputs, head_outputs\n\n @classmethod\n def create(\n cls,\n observation_tokenizers: Dict[str, ModuleSpec],\n task_tokenizers: Dict[str, ModuleSpec],\n heads: Dict[str, ModuleSpec],\n readouts: Dict[str, int],\n transformer_kwargs: Dict,\n token_embedding_size: int,\n max_horizon: int,\n ) -> \"OctoModule\":\n \"\"\"\n Canonical way to create an OctoModule from configuration.\n\n Args:\n observation_tokenizers: dict of {tokenizer_name: tokenizer_spec} (see tokenizers.py)\n task_tokenizers: dict of {tokenizer_name: tokenizer_spec} (see tokenizers.py)\n heads: dict of {head_name: head_spec} (see heads.py)\n readouts: dict of {readout_name (str): n_tokens_for_readout (int)}\n token_embedding_size (int): The latent dimension of the token embeddings\n max_horizon (int): Sets the size of positional embeddings, and provides an upper limit on the\n maximum horizon of the model\n transformer_kwargs: additional kwargs to forward to the transformer, which include:\n num_layers (int): number of layers\n mlp_dim (int): hidden dimension of the MLPs\n num_heads (int): Number of heads in nn.MultiHeadDotProductAttention\n dropout_rate (float): dropout rate.\n attention_dropout_rate (float): dropout rate in self attention.\n \"\"\"\n\n observation_tokenizer_defs = {\n k: ModuleSpec.instantiate(spec)()\n for k, spec in observation_tokenizers.items()\n }\n task_tokenizer_defs = {\n k: ModuleSpec.instantiate(spec)() for k, spec in task_tokenizers.items()\n }\n\n head_defs = {k: ModuleSpec.instantiate(spec)() for k, spec in heads.items()}\n\n model_def = OctoTransformer(\n observation_tokenizers=observation_tokenizer_defs,\n task_tokenizers=task_tokenizer_defs,\n readouts=readouts,\n token_embedding_size=token_embedding_size,\n max_horizon=max_horizon,\n transformer_kwargs=transformer_kwargs,\n )\n\n return cls(\n octo_transformer=model_def,\n heads=head_defs,\n )"
},
{
"identifier": "ModuleSpec",
"path": "octo/utils/spec.py",
"snippet": "class ModuleSpec(TypedDict):\n \"\"\"A JSON-serializable representation of a function or class with some default args and kwargs to pass to\n it. Useful for specifying a particular class or function in a config file, while keeping it serializable\n and overridable from the command line using ml_collections.\n\n Usage:\n\n # Preferred way to create a spec:\n >>> from octo.model.components.transformer import Transformer\n >>> spec = ModuleSpec.create(Transformer, num_layers=3)\n # Same as above using the fully qualified import string:\n >>> spec = ModuleSpec.create(\"octo.model.components.transformer:Transformer\", num_layers=3)\n\n # Usage:\n >>> ModuleSpec.instantiate(spec) == partial(Transformer, num_layers=3)\n # can pass additional kwargs at instantiation time\n >>> transformer = ModuleSpec.instantiate(spec, num_heads=8)\n\n Note: ModuleSpec is just an alias for a dictionary (that is strongly typed), not a real class. So from\n your code's perspective, it is just a dictionary.\n\n module (str): The module the callable is located in\n name (str): The name of the callable in the module\n args (tuple): The args to pass to the callable\n kwargs (dict): The kwargs to pass to the callable\n \"\"\"\n\n module: str\n name: str\n args: Tuple[Any, ...]\n kwargs: Dict[str, Any]\n\n @staticmethod\n def create(callable_or_full_name: Union[str, callable], *args, **kwargs) -> \"ModuleSpec\": # type: ignore\n \"\"\"Create a module spec from a callable or import string.\n\n Args:\n callable_or_full_name (str or object): Either the object itself or a fully qualified import string\n (e.g. \"octo.model.components.transformer:Transformer\")\n args (tuple, optional): Passed into callable upon instantiation.\n kwargs (dict, optional): Passed into callable upon instantiation.\n \"\"\"\n if isinstance(callable_or_full_name, str):\n assert callable_or_full_name.count(\":\") == 1, (\n \"If passing in a string, it must be a fully qualified import string \"\n \"(e.g. 'octo.model.components.transformer:Transformer')\"\n )\n module, name = callable_or_full_name.split(\":\")\n else:\n module, name = _infer_full_name(callable_or_full_name)\n\n return ModuleSpec(module=module, name=name, args=args, kwargs=kwargs)\n\n @staticmethod\n def instantiate(spec: \"ModuleSpec\"): # type: ignore\n if set(spec.keys()) != {\"module\", \"name\", \"args\", \"kwargs\"}:\n raise ValueError(\n f\"Expected ModuleSpec, but got {spec}. \"\n \"ModuleSpec must have keys 'module', 'name', 'args', and 'kwargs'.\"\n )\n cls = _import_from_string(spec[\"module\"], spec[\"name\"])\n return partial(cls, *spec[\"args\"], **spec[\"kwargs\"])"
},
{
"identifier": "Config",
"path": "octo/utils/typing.py",
"snippet": ""
}
] | from functools import partial
from typing import Any, Optional, Tuple
from flax import struct
from flax.training import orbax_utils
from jax.experimental import multihost_utils
from jax.typing import ArrayLike
from octo.data.utils.text_processing import TextProcessor
from octo.model.components.action_heads import ActionHead
from octo.model.octo_module import OctoModule
from octo.utils.spec import ModuleSpec
from octo.utils.typing import Config, Data, Params, PRNGKey, Sequence
import json
import logging
import flax
import jax
import jax.numpy as jnp
import numpy as np
import orbax.checkpoint
import tensorflow as tf
import huggingface_hub | 3,241 |
Usage for pretraining:
>>> model = OctoModel.from_config(
config,
example_batch,
text_processor
) # initializes params
>>> # Continue as in finetuning example
See full usage examples in train.py and finetune.py.
"""
module: OctoModule = struct.field(pytree_node=False)
text_processor: TextProcessor = struct.field(pytree_node=False)
config: Config = struct.field(pytree_node=False)
params: Params
example_batch: Data
dataset_statistics: Optional[Data]
def create_tasks(
self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None
):
"""Creates tasks dict from goals and texts.
Args:
goals: if not None, dict of arrays with shape (batch_size, *)
texts: if not None, list of texts of length batch_size
Omit images to run the language-conditioned model, and omit texts to run the
goal-conditioned model.
"""
assert goals is not None or texts is not None
tasks = {"pad_mask_dict": {}}
if goals is not None:
tasks.update(goals)
tasks["pad_mask_dict"].update(
{k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}
)
else:
batch_size = len(texts)
tasks.update(
{
k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)
for k, v in self.example_batch["task"].items()
if k not in ("pad_mask_dict", "language_instruction")
}
)
tasks["pad_mask_dict"].update(
{
k: np.zeros(batch_size, dtype=bool)
for k in tasks.keys()
if k != "pad_mask_dict"
}
)
if texts is not None:
assert self.text_processor is not None
tasks["language_instruction"] = texts
tasks["pad_mask_dict"]["language_instruction"] = np.ones(
len(texts), dtype=bool
)
else:
batch_size = jax.tree_leaves(goals)[0].shape[0]
tasks["language_instruction"] = [""] * batch_size
tasks["pad_mask_dict"]["language_instruction"] = np.zeros(
batch_size, dtype=bool
)
if self.text_processor is not None:
tasks["language_instruction"] = self.text_processor.encode(
tasks["language_instruction"]
)
else:
del tasks["language_instruction"]
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return tasks
@partial(jax.jit, static_argnames=("train",))
def run_transformer(
self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False
):
"""Runs the transformer, but does shape checking on the inputs.
Args:
observations: dictionary of arrays of shape (batch_size, window_size, *shape).
Shape must be consistent with self.example_batch["observation"]
tasks: dict of tasks of shape (batch_size, *shape)
Shape must be consistent with self.example_batch["task"]
pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding
train: whether to run in train mode
"""
_verify_shapes(
observations,
"observations",
self.example_batch["observation"],
starting_dim=2,
)
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return self.module.apply(
{"params": self.params},
observations,
tasks,
pad_mask,
train=train,
method="octo_transformer",
)
@partial(jax.jit, static_argnames=("train", "sample_shape", "argmax"))
def sample_actions(
self,
observations: Data,
tasks: Data,
pad_mask: Optional[ArrayLike] = None,
train: bool = False,
argmax: bool = False,
sample_shape: Tuple[int, ...] = (),
|
@struct.dataclass
class OctoModel:
"""Recommended way of interacting with Octo models.
Usage for inference:
>>> model = OctoModel.load_pretrained(checkpoint_dir)
>>> tasks = model.create_tasks(texts=["go to the red room"])
>>> # or tasks = model.create_tasks(goals={"image_primary": goal_images})
>>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))
>>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,
# un-normalize them using model.dataset_statistics
Usage for finetuning:
>>> model = OctoModel.load_pretrained(checkpoint_dir)
>>> train_state = octo.utils.train_utils.TrainState.create(
rng=jax.random.PRNGKey(0),
model=model,
tx=optax.adamw(...)
)
>>> # access params through train_state.model.params
>>> train_state, metrics = your_update_function(train_state, batch)
>>> # when it's time to save (note that this only saves the model parameters,
>>> # not the full optimizer state)
>>> train_state.model.save_pretrained(step, save_dir)
Usage for pretraining:
>>> model = OctoModel.from_config(
config,
example_batch,
text_processor
) # initializes params
>>> # Continue as in finetuning example
See full usage examples in train.py and finetune.py.
"""
module: OctoModule = struct.field(pytree_node=False)
text_processor: TextProcessor = struct.field(pytree_node=False)
config: Config = struct.field(pytree_node=False)
params: Params
example_batch: Data
dataset_statistics: Optional[Data]
def create_tasks(
self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None
):
"""Creates tasks dict from goals and texts.
Args:
goals: if not None, dict of arrays with shape (batch_size, *)
texts: if not None, list of texts of length batch_size
Omit images to run the language-conditioned model, and omit texts to run the
goal-conditioned model.
"""
assert goals is not None or texts is not None
tasks = {"pad_mask_dict": {}}
if goals is not None:
tasks.update(goals)
tasks["pad_mask_dict"].update(
{k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}
)
else:
batch_size = len(texts)
tasks.update(
{
k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)
for k, v in self.example_batch["task"].items()
if k not in ("pad_mask_dict", "language_instruction")
}
)
tasks["pad_mask_dict"].update(
{
k: np.zeros(batch_size, dtype=bool)
for k in tasks.keys()
if k != "pad_mask_dict"
}
)
if texts is not None:
assert self.text_processor is not None
tasks["language_instruction"] = texts
tasks["pad_mask_dict"]["language_instruction"] = np.ones(
len(texts), dtype=bool
)
else:
batch_size = jax.tree_leaves(goals)[0].shape[0]
tasks["language_instruction"] = [""] * batch_size
tasks["pad_mask_dict"]["language_instruction"] = np.zeros(
batch_size, dtype=bool
)
if self.text_processor is not None:
tasks["language_instruction"] = self.text_processor.encode(
tasks["language_instruction"]
)
else:
del tasks["language_instruction"]
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return tasks
@partial(jax.jit, static_argnames=("train",))
def run_transformer(
self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False
):
"""Runs the transformer, but does shape checking on the inputs.
Args:
observations: dictionary of arrays of shape (batch_size, window_size, *shape).
Shape must be consistent with self.example_batch["observation"]
tasks: dict of tasks of shape (batch_size, *shape)
Shape must be consistent with self.example_batch["task"]
pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding
train: whether to run in train mode
"""
_verify_shapes(
observations,
"observations",
self.example_batch["observation"],
starting_dim=2,
)
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return self.module.apply(
{"params": self.params},
observations,
tasks,
pad_mask,
train=train,
method="octo_transformer",
)
@partial(jax.jit, static_argnames=("train", "sample_shape", "argmax"))
def sample_actions(
self,
observations: Data,
tasks: Data,
pad_mask: Optional[ArrayLike] = None,
train: bool = False,
argmax: bool = False,
sample_shape: Tuple[int, ...] = (), | rng: Optional[PRNGKey] = None, | 4 | 2023-12-13 09:58:56+00:00 | 4k |
LinShan-Bin/OccNeRF | utils/vox.py | [
{
"identifier": "geom",
"path": "utils/geom.py",
"snippet": "def eye_4x4(B, device='cuda'):\ndef safe_inverse(a): #parallel version\ndef safe_inverse_single(a):\ndef apply_4x4(RT, xyz):\ndef get_camM_T_camXs(origin_T_camXs, ind=0):\ndef split_rt_single(rt):\ndef split_rt(rt):\ndef merge_rt(r, t):\ndef xyd2pointcloud(xyd, pix_T_cam):\ndef pixels2camera(x, y, z, fx, fy, x0, y0):\ndef camera2pixels(xyz, pix_T_cam):\ndef scale_intrinsics(K, sx, sy):\ndef split_intrinsics(K):\ndef merge_intrinsics(fx, fy, x0, y0):\ndef merge_rtlist(rlist, tlist):\ndef split_lrtlist(lrtlist):\ndef merge_lrtlist(lenlist, rtlist):\ndef apply_4x4_to_lrtlist(Y_T_X, lrtlist_X):\ndef apply_4x4_to_lrt(Y_T_X, lrt_X):\ndef get_xyzlist_from_lenlist(lenlist):\ndef get_xyzlist_from_lrtlist(lrtlist, include_clist=False):\ndef get_clist_from_lrtlist(lrtlist):\ndef wrap2pi(rad_angle):\ndef unproject(cam2world, intrinsic, depth):\ndef reproject(cam2world_src, cam2world_tar, W, H, intrinsic, depth_src, depth_tar, color_tar, mask_tar):\n def make_grid(x, y):\ndef visualize_depth(depth, mask=None, depth_min=None, depth_max=None, direct=False):\ndef mat2pose_vec(matrix: torch.Tensor):\ndef square_distance(src, dst):\n B, _, _ = list(a.shape)\n B, N, _ = list(xyz.shape)\n B, S = list(origin_T_camXs.shape)[0:2]\n B, C, D = list(r.shape)\n B2, D2 = list(t.shape)\n B, N, C = list(xyd.shape)\n B = x.shape[0]\n B = list(z.shape)[0]\n EPS = 1e-4\n K = merge_intrinsics(fx, fy, x0, y0)\n B = list(fx.shape)[0]\n K = torch.zeros(B, 4, 4, dtype=torch.float32, device=fx.device)\n K[:,0,0] = fx\n K[:,1,1] = fy\n K[:,0,2] = x0\n K[:,1,2] = y0\n K[:,2,2] = 1.0\n K[:,3,3] = 1.0\n B, N, D, E = list(rlist.shape)\n B, N, F = list(tlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, D = list(lenlist.shape)\n B2, N2, E, F = list(rtlist.shape)\n B, N, D = list(lrtlist_X.shape)\n B2, E, F = list(Y_T_X.shape)\n B, D = list(lrt_X.shape)\n B2, E, F = list(Y_T_X.shape)\n B, N, D = list(lenlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, _ = src.shape\n _, M, _ = dst.shape"
},
{
"identifier": "basic",
"path": "utils/basic.py",
"snippet": "EPS = 1e-6\n B_, S = shapelist[:2]\n BS = shapelist[0]\n S = int(BS/B)\ndef strnum(x):\ndef matmul2(mat1, mat2):\ndef pack_seqdim(tensor, B):\ndef unpack_seqdim(tensor, B):\ndef reduce_masked_mean(x, mask, dim=None, keepdim=False):\ndef meshgrid3d(B, Z, Y, X, stack=False, norm=False, device='cuda'):\ndef gridcloud3d(B, Z, Y, X, norm=False, device='cuda'):\ndef normalize_grid2d(grid_y, grid_x, Y, X, clamp_extreme=True):"
},
{
"identifier": "render",
"path": "utils/render.py",
"snippet": "def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):\ndef ndc_rays(H, W, focal, near, rays_o, rays_d):\ndef get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):\ndef cumprod_exclusive(p):\ndef get_ray_marching_ray(alpha):\ndef sample_ray(self, rays_o, rays_d, near, far, stepsize, xyz_min, xyz_max, voxel_size, is_train=False):\n def __init__(self, init_val, beta_min=0.0001):\n def forward(self, sdf, beta=None):\n def get_beta(self):\n def __init__(self, init_val, beta_min=0.0001):\n def forward(self, sdf, beta=None):\n def get_beta(self):\n def __init__(self, init_val):\n def forward(self, x):\n def get_variance(self):\nclass SigmoidDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\nclass LaplaceDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\nclass SingleVarianceNetwork(nn.Module):"
}
] | import pdb
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import geom
from utils import basic
from utils import render | 2,826 |
def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):
"""
Convert 3D world coordinates to a contracted coordinate system based on a specified ROI.
Args:
xyz_world (torch.Tensor): Input tensor with shape [..., 3] representing 3D world coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the contracted system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_scaled = (2 * (xyz_world - xyz_min) / (xyz_max - xyz_min) - 1) * t
xyz_abs = torch.abs(xyz_scaled)
xyz_contracted = torch.where(
xyz_abs <= t,
xyz_scaled,
xyz_scaled.sign() * (1.0 + t - 1.0/(xyz_abs + 1 - t))
)
return xyz_contracted / (t + 1) # range: [-1, 1]
def contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):
"""
Convert 3D contracted coordinates back to the world coordinate system based on a specified ROI.
Args:
xyz_contracted (torch.Tensor): Input tensor with shape [..., 3] representing 3D contracted coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the world system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_ = xyz_contracted * (t + 1)
xyz_abs = torch.abs(xyz_)
xyz_scaled = torch.where(
xyz_abs <= t,
xyz_,
xyz_.sign() * (t - 1.0 + 1.0/(t + 1 - xyz_abs))
) / t
xyz_world = 0.5 * (xyz_scaled + 1) * (xyz_max - xyz_min) + xyz_min
return xyz_world
class Vox_util(nn.Module):
def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):
super(Vox_util, self).__init__()
self.opt = opt
self.XMIN, self.XMAX, self.YMIN, self.YMAX, self.ZMIN, self.ZMAX = bounds
self.Z, self.Y, self.X = Z, Y, X # 16, 256, 256
self.max_depth = math.sqrt(self.XMAX*self.XMAX + self.YMAX*self.YMAX + self.ZMAX*self.ZMAX)
self.pc_range_roi = [self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4], \
self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]] # [x_min, y_min, z_min, x_max, y_max, z_max]
scene_centroid = scene_centroid.detach().cpu().numpy()
x_centroid, y_centroid, z_centroid = scene_centroid[0]
self.XMIN += x_centroid
self.XMAX += x_centroid
self.YMIN += y_centroid
self.YMAX += y_centroid
self.ZMIN += z_centroid
self.ZMAX += z_centroid
self.default_vox_size_X = (self.XMAX-self.XMIN)/float(X)
self.default_vox_size_Y = (self.YMAX-self.YMIN)/float(Y)
self.default_vox_size_Z = (self.ZMAX-self.ZMIN)/float(Z)
if pad:
Z_pad, Y_pad, X_pad = pad
self.ZMIN -= self.default_vox_size_Z * Z_pad
self.ZMAX += self.default_vox_size_Z * Z_pad
self.YMIN -= self.default_vox_size_Y * Y_pad
self.YMAX += self.default_vox_size_Y * Y_pad
self.XMIN -= self.default_vox_size_X * X_pad
self.XMAX += self.default_vox_size_X * X_pad
# for embedding
self.length_pose_encoding = length_pose_encoding
self.position = position
self.register_buffer('posfreq', torch.FloatTensor([(2 ** i) for i in range(length_pose_encoding)]))
if assert_cube:
# we assume cube voxels
if (not np.isclose(self.default_vox_size_X, self.default_vox_size_Y)) or (not np.isclose(self.default_vox_size_X, self.default_vox_size_Z)):
print('Z, Y, X', Z, Y, X)
print('bounds for this iter:',
'X = %.2f to %.2f' % (self.XMIN, self.XMAX),
'Y = %.2f to %.2f' % (self.YMIN, self.YMAX),
'Z = %.2f to %.2f' % (self.ZMIN, self.ZMAX),
)
print('self.default_vox_size_X', self.default_vox_size_X)
print('self.default_vox_size_Y', self.default_vox_size_Y)
print('self.default_vox_size_Z', self.default_vox_size_Z)
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Y))
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Z))
def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):
# xyz is B x N x 3, in ref coordinates
# transforms ref coordinates into mem coordinates
B, N, C = list(xyz.shape)
device = xyz.device
assert(C==3)
mem_T_ref = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=device)
|
def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):
"""
Convert 3D world coordinates to a contracted coordinate system based on a specified ROI.
Args:
xyz_world (torch.Tensor): Input tensor with shape [..., 3] representing 3D world coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the contracted system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_scaled = (2 * (xyz_world - xyz_min) / (xyz_max - xyz_min) - 1) * t
xyz_abs = torch.abs(xyz_scaled)
xyz_contracted = torch.where(
xyz_abs <= t,
xyz_scaled,
xyz_scaled.sign() * (1.0 + t - 1.0/(xyz_abs + 1 - t))
)
return xyz_contracted / (t + 1) # range: [-1, 1]
def contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):
"""
Convert 3D contracted coordinates back to the world coordinate system based on a specified ROI.
Args:
xyz_contracted (torch.Tensor): Input tensor with shape [..., 3] representing 3D contracted coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the world system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_ = xyz_contracted * (t + 1)
xyz_abs = torch.abs(xyz_)
xyz_scaled = torch.where(
xyz_abs <= t,
xyz_,
xyz_.sign() * (t - 1.0 + 1.0/(t + 1 - xyz_abs))
) / t
xyz_world = 0.5 * (xyz_scaled + 1) * (xyz_max - xyz_min) + xyz_min
return xyz_world
class Vox_util(nn.Module):
def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):
super(Vox_util, self).__init__()
self.opt = opt
self.XMIN, self.XMAX, self.YMIN, self.YMAX, self.ZMIN, self.ZMAX = bounds
self.Z, self.Y, self.X = Z, Y, X # 16, 256, 256
self.max_depth = math.sqrt(self.XMAX*self.XMAX + self.YMAX*self.YMAX + self.ZMAX*self.ZMAX)
self.pc_range_roi = [self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4], \
self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]] # [x_min, y_min, z_min, x_max, y_max, z_max]
scene_centroid = scene_centroid.detach().cpu().numpy()
x_centroid, y_centroid, z_centroid = scene_centroid[0]
self.XMIN += x_centroid
self.XMAX += x_centroid
self.YMIN += y_centroid
self.YMAX += y_centroid
self.ZMIN += z_centroid
self.ZMAX += z_centroid
self.default_vox_size_X = (self.XMAX-self.XMIN)/float(X)
self.default_vox_size_Y = (self.YMAX-self.YMIN)/float(Y)
self.default_vox_size_Z = (self.ZMAX-self.ZMIN)/float(Z)
if pad:
Z_pad, Y_pad, X_pad = pad
self.ZMIN -= self.default_vox_size_Z * Z_pad
self.ZMAX += self.default_vox_size_Z * Z_pad
self.YMIN -= self.default_vox_size_Y * Y_pad
self.YMAX += self.default_vox_size_Y * Y_pad
self.XMIN -= self.default_vox_size_X * X_pad
self.XMAX += self.default_vox_size_X * X_pad
# for embedding
self.length_pose_encoding = length_pose_encoding
self.position = position
self.register_buffer('posfreq', torch.FloatTensor([(2 ** i) for i in range(length_pose_encoding)]))
if assert_cube:
# we assume cube voxels
if (not np.isclose(self.default_vox_size_X, self.default_vox_size_Y)) or (not np.isclose(self.default_vox_size_X, self.default_vox_size_Z)):
print('Z, Y, X', Z, Y, X)
print('bounds for this iter:',
'X = %.2f to %.2f' % (self.XMIN, self.XMAX),
'Y = %.2f to %.2f' % (self.YMIN, self.YMAX),
'Z = %.2f to %.2f' % (self.ZMIN, self.ZMAX),
)
print('self.default_vox_size_X', self.default_vox_size_X)
print('self.default_vox_size_Y', self.default_vox_size_Y)
print('self.default_vox_size_Z', self.default_vox_size_Z)
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Y))
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Z))
def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):
# xyz is B x N x 3, in ref coordinates
# transforms ref coordinates into mem coordinates
B, N, C = list(xyz.shape)
device = xyz.device
assert(C==3)
mem_T_ref = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=device) | xyz = geom.apply_4x4(mem_T_ref, xyz) | 0 | 2023-12-14 15:00:21+00:00 | 4k |
modelscope/richdreamer | threestudio/models/geometry/implicit_volume.py | [
{
"identifier": "BaseGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}"
},
{
"identifier": "BaseImplicitGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = False\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation\n\n assert self.isosurface_helper is not None\n\n field, deformation = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh = self._isosurface(self.bbox)\n return mesh"
},
{
"identifier": "contract_to_unisphere",
"path": "threestudio/models/geometry/base.py",
"snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x"
},
{
"identifier": "get_encoding",
"path": "threestudio/models/networks.py",
"snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding"
},
{
"identifier": "get_mlp",
"path": "threestudio/models/networks.py",
"snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network"
},
{
"identifier": "get_activation",
"path": "threestudio/utils/ops.py",
"snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")"
}
] | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.geometry.base import (BaseGeometry,
BaseImplicitGeometry,
contract_to_unisphere,)
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import * | 2,944 |
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure()
|
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure() | self.encoding = get_encoding( | 3 | 2023-12-06 07:53:11+00:00 | 4k |
rehg-lab/RAVE | annotator/mmpkg/mmcv/runner/base_runner.py | [
{
"identifier": "is_module_wrapper",
"path": "annotator/mmpkg/mmcv/parallel/utils.py",
"snippet": "def is_module_wrapper(module):\n \"\"\"Check if a module is a module wrapper.\n\n The following 3 modules in MMCV (and their subclasses) are regarded as\n module wrappers: DataParallel, DistributedDataParallel,\n MMDistributedDataParallel (the deprecated version). You may add you own\n module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: True if the input module is a module wrapper.\n \"\"\"\n module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values())\n return isinstance(module, module_wrappers)"
},
{
"identifier": "load_checkpoint",
"path": "annotator/mmpkg/mmcv/runner/checkpoint.py",
"snippet": "@classmethod\ndef load_checkpoint(cls, filename, map_location=None, logger=None):\n \"\"\"load checkpoint through URL scheme path.\n\n Args:\n filename (str): checkpoint file name with given prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n logger (:mod:`logging.Logger`, optional): The logger for message.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n \"\"\"\n\n checkpoint_loader = cls._get_checkpoint_loader(filename)\n class_name = checkpoint_loader.__name__\n mmcv.print_log(\n f'load checkpoint from {class_name[10:]} path: {filename}', logger)\n return checkpoint_loader(filename, map_location)"
},
{
"identifier": "get_dist_info",
"path": "annotator/mmpkg/mmcv/runner/dist_utils.py",
"snippet": "def get_dist_info():\n if dist.is_available() and dist.is_initialized():\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size"
},
{
"identifier": "HOOKS",
"path": "annotator/mmpkg/mmcv/runner/hooks/hook.py",
"snippet": "HOOKS = Registry('hook')"
},
{
"identifier": "Hook",
"path": "annotator/mmpkg/mmcv/runner/hooks/hook.py",
"snippet": "class Hook:\n stages = ('before_run', 'before_train_epoch', 'before_train_iter',\n 'after_train_iter', 'after_train_epoch', 'before_val_epoch',\n 'before_val_iter', 'after_val_iter', 'after_val_epoch',\n 'after_run')\n\n def before_run(self, runner):\n pass\n\n def after_run(self, runner):\n pass\n\n def before_epoch(self, runner):\n pass\n\n def after_epoch(self, runner):\n pass\n\n def before_iter(self, runner):\n pass\n\n def after_iter(self, runner):\n pass\n\n def before_train_epoch(self, runner):\n self.before_epoch(runner)\n\n def before_val_epoch(self, runner):\n self.before_epoch(runner)\n\n def after_train_epoch(self, runner):\n self.after_epoch(runner)\n\n def after_val_epoch(self, runner):\n self.after_epoch(runner)\n\n def before_train_iter(self, runner):\n self.before_iter(runner)\n\n def before_val_iter(self, runner):\n self.before_iter(runner)\n\n def after_train_iter(self, runner):\n self.after_iter(runner)\n\n def after_val_iter(self, runner):\n self.after_iter(runner)\n\n def every_n_epochs(self, runner, n):\n return (runner.epoch + 1) % n == 0 if n > 0 else False\n\n def every_n_inner_iters(self, runner, n):\n return (runner.inner_iter + 1) % n == 0 if n > 0 else False\n\n def every_n_iters(self, runner, n):\n return (runner.iter + 1) % n == 0 if n > 0 else False\n\n def end_of_epoch(self, runner):\n return runner.inner_iter + 1 == len(runner.data_loader)\n\n def is_last_epoch(self, runner):\n return runner.epoch + 1 == runner._max_epochs\n\n def is_last_iter(self, runner):\n return runner.iter + 1 == runner._max_iters\n\n def get_triggered_stages(self):\n trigger_stages = set()\n for stage in Hook.stages:\n if is_method_overridden(stage, Hook, self):\n trigger_stages.add(stage)\n\n # some methods will be triggered in multi stages\n # use this dict to map method to stages.\n method_stages_map = {\n 'before_epoch': ['before_train_epoch', 'before_val_epoch'],\n 'after_epoch': ['after_train_epoch', 'after_val_epoch'],\n 'before_iter': ['before_train_iter', 'before_val_iter'],\n 'after_iter': ['after_train_iter', 'after_val_iter'],\n }\n\n for method, map_stages in method_stages_map.items():\n if is_method_overridden(method, Hook, self):\n trigger_stages.update(map_stages)\n\n return [stage for stage in Hook.stages if stage in trigger_stages]"
},
{
"identifier": "LogBuffer",
"path": "annotator/mmpkg/mmcv/runner/log_buffer.py",
"snippet": "class LogBuffer:\n\n def __init__(self):\n self.val_history = OrderedDict()\n self.n_history = OrderedDict()\n self.output = OrderedDict()\n self.ready = False\n\n def clear(self):\n self.val_history.clear()\n self.n_history.clear()\n self.clear_output()\n\n def clear_output(self):\n self.output.clear()\n self.ready = False\n\n def update(self, vars, count=1):\n assert isinstance(vars, dict)\n for key, var in vars.items():\n if key not in self.val_history:\n self.val_history[key] = []\n self.n_history[key] = []\n self.val_history[key].append(var)\n self.n_history[key].append(count)\n\n def average(self, n=0):\n \"\"\"Average latest n values or all values.\"\"\"\n assert n >= 0\n for key in self.val_history:\n values = np.array(self.val_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n avg = np.sum(values * nums) / np.sum(nums)\n self.output[key] = avg\n self.ready = True"
},
{
"identifier": "Priority",
"path": "annotator/mmpkg/mmcv/runner/priority.py",
"snippet": "class Priority(Enum):\n \"\"\"Hook priority levels.\n\n +--------------+------------+\n | Level | Value |\n +==============+============+\n | HIGHEST | 0 |\n +--------------+------------+\n | VERY_HIGH | 10 |\n +--------------+------------+\n | HIGH | 30 |\n +--------------+------------+\n | ABOVE_NORMAL | 40 |\n +--------------+------------+\n | NORMAL | 50 |\n +--------------+------------+\n | BELOW_NORMAL | 60 |\n +--------------+------------+\n | LOW | 70 |\n +--------------+------------+\n | VERY_LOW | 90 |\n +--------------+------------+\n | LOWEST | 100 |\n +--------------+------------+\n \"\"\"\n\n HIGHEST = 0\n VERY_HIGH = 10\n HIGH = 30\n ABOVE_NORMAL = 40\n NORMAL = 50\n BELOW_NORMAL = 60\n LOW = 70\n VERY_LOW = 90\n LOWEST = 100"
},
{
"identifier": "get_priority",
"path": "annotator/mmpkg/mmcv/runner/priority.py",
"snippet": "def get_priority(priority):\n \"\"\"Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n \"\"\"\n if isinstance(priority, int):\n if priority < 0 or priority > 100:\n raise ValueError('priority must be between 0 and 100')\n return priority\n elif isinstance(priority, Priority):\n return priority.value\n elif isinstance(priority, str):\n return Priority[priority.upper()].value\n else:\n raise TypeError('priority must be an integer or Priority enum value')"
},
{
"identifier": "get_time_str",
"path": "annotator/mmpkg/mmcv/runner/utils.py",
"snippet": "def get_time_str():\n return time.strftime('%Y%m%d_%H%M%S', time.localtime())"
}
] | import copy
import logging
import os.path as osp
import warnings
import torch
import annotator.mmpkg.mmcv as mmcv
from abc import ABCMeta, abstractmethod
from torch.optim import Optimizer
from ..parallel import is_module_wrapper
from .checkpoint import load_checkpoint
from .dist_utils import get_dist_info
from .hooks import HOOKS, Hook
from .log_buffer import LogBuffer
from .priority import Priority, get_priority
from .utils import get_time_str | 2,979 | # Copyright (c) OpenMMLab. All rights reserved.
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for PyTorch.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``val()``
- ``save_checkpoint()``
Args:
model (:obj:`torch.nn.Module`): The model to be run.
batch_processor (callable): A callable method that process a data
batch. The interface of this method should be
`batch_processor(model, data, train_mode) -> dict`
optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
optimizer (in most cases) or a dict of optimizers (in models that
requires more than one optimizer, e.g., GAN).
work_dir (str, optional): The working directory to save checkpoints
and logs. Defaults to None.
logger (:obj:`logging.Logger`): Logger used during training.
Defaults to None. (The default value is just for backward
compatibility)
meta (dict | None): A dict records some import information such as
environment info and seed, which will be logged in logger hook.
Defaults to None.
max_epochs (int, optional): Total training epochs.
max_iters (int, optional): Total training iterations.
"""
def __init__(self,
model,
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None):
if batch_processor is not None:
if not callable(batch_processor):
raise TypeError('batch_processor must be callable, '
f'but got {type(batch_processor)}')
warnings.warn('batch_processor is deprecated, please implement '
'train_step() and val_step() in the model instead.')
# raise an error is `batch_processor` is not None and
# `model.train_step()` exists.
if is_module_wrapper(model):
_model = model.module
else:
_model = model
if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
raise RuntimeError(
'batch_processor and model.train_step()/model.val_step() '
'cannot be both available.')
else:
assert hasattr(model, 'train_step')
# check the type of `optimizer`
if isinstance(optimizer, dict):
for name, optim in optimizer.items():
if not isinstance(optim, Optimizer):
raise TypeError(
f'optimizer must be a dict of torch.optim.Optimizers, '
f'but optimizer["{name}"] is a {type(optim)}')
elif not isinstance(optimizer, Optimizer) and optimizer is not None:
raise TypeError(
f'optimizer must be a torch.optim.Optimizer object '
f'or dict or None, but got {type(optimizer)}')
# check the type of `logger`
if not isinstance(logger, logging.Logger):
raise TypeError(f'logger must be a logging.Logger object, '
f'but got {type(logger)}')
# check the type of `meta`
if meta is not None and not isinstance(meta, dict):
raise TypeError(
f'meta must be a dict or None, but got {type(meta)}')
self.model = model
self.batch_processor = batch_processor
self.optimizer = optimizer
self.logger = logger
self.meta = meta
# create work_dir
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
# get model name from the model class
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info()
| # Copyright (c) OpenMMLab. All rights reserved.
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for PyTorch.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``val()``
- ``save_checkpoint()``
Args:
model (:obj:`torch.nn.Module`): The model to be run.
batch_processor (callable): A callable method that process a data
batch. The interface of this method should be
`batch_processor(model, data, train_mode) -> dict`
optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
optimizer (in most cases) or a dict of optimizers (in models that
requires more than one optimizer, e.g., GAN).
work_dir (str, optional): The working directory to save checkpoints
and logs. Defaults to None.
logger (:obj:`logging.Logger`): Logger used during training.
Defaults to None. (The default value is just for backward
compatibility)
meta (dict | None): A dict records some import information such as
environment info and seed, which will be logged in logger hook.
Defaults to None.
max_epochs (int, optional): Total training epochs.
max_iters (int, optional): Total training iterations.
"""
def __init__(self,
model,
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None):
if batch_processor is not None:
if not callable(batch_processor):
raise TypeError('batch_processor must be callable, '
f'but got {type(batch_processor)}')
warnings.warn('batch_processor is deprecated, please implement '
'train_step() and val_step() in the model instead.')
# raise an error is `batch_processor` is not None and
# `model.train_step()` exists.
if is_module_wrapper(model):
_model = model.module
else:
_model = model
if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
raise RuntimeError(
'batch_processor and model.train_step()/model.val_step() '
'cannot be both available.')
else:
assert hasattr(model, 'train_step')
# check the type of `optimizer`
if isinstance(optimizer, dict):
for name, optim in optimizer.items():
if not isinstance(optim, Optimizer):
raise TypeError(
f'optimizer must be a dict of torch.optim.Optimizers, '
f'but optimizer["{name}"] is a {type(optim)}')
elif not isinstance(optimizer, Optimizer) and optimizer is not None:
raise TypeError(
f'optimizer must be a torch.optim.Optimizer object '
f'or dict or None, but got {type(optimizer)}')
# check the type of `logger`
if not isinstance(logger, logging.Logger):
raise TypeError(f'logger must be a logging.Logger object, '
f'but got {type(logger)}')
# check the type of `meta`
if meta is not None and not isinstance(meta, dict):
raise TypeError(
f'meta must be a dict or None, but got {type(meta)}')
self.model = model
self.batch_processor = batch_processor
self.optimizer = optimizer
self.logger = logger
self.meta = meta
# create work_dir
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
# get model name from the model class
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info() | self.timestamp = get_time_str() | 8 | 2023-12-05 02:51:53+00:00 | 4k |
worldcoin/open-iris | tests/unit_tests/nodes/eye_properties_estimation/test_occlusion_calculator.py | [
{
"identifier": "area_of_circular_segment",
"path": "tests/unit_tests/utils.py",
"snippet": "def area_of_circular_segment(circle_radius: float, delta_height: float) -> float:\n \"\"\"Compute the area of a circular segment (see source for definition).\n\n Source: https://en.wikipedia.org/wiki/Circular_segment\n\n Args:\n circle_radius (float): Radius of the circle (R).\n delta_height (float): distance between the center of the segment and the base of the secant, i.e. apothem (d).\n\n Returns:\n float: area of the circular segment\n \"\"\"\n if delta_height > circle_radius:\n return 0.0\n area = circle_radius**2 * np.arccos(delta_height / circle_radius) - delta_height * np.sqrt(\n circle_radius**2 - delta_height**2\n )\n return area"
},
{
"identifier": "generate_arc",
"path": "tests/unit_tests/utils.py",
"snippet": "def generate_arc(\n radius: float, center_x: float, center_y: float, from_angle: float, to_angle: float, num_points: int = 1000\n) -> np.ndarray:\n angles = np.linspace(from_angle, to_angle, num_points, endpoint=not (from_angle == 0.0 and to_angle == 2 * np.pi))\n\n circle_xs = radius * np.cos(angles) + center_x\n circle_ys = radius * np.sin(angles) + center_y\n\n return np.column_stack([circle_xs, circle_ys])"
},
{
"identifier": "rotated_asymmetric_rectangle",
"path": "tests/unit_tests/utils.py",
"snippet": "def rotated_asymmetric_rectangle(\n center_x: float, center_y: float, semi_width: float, upper_height: float, lower_height: float, angle: float\n) -> np.ndarray:\n \"\"\"Compute a rotated rectangle with different upper and lower semi-heights.\n\n Args:\n center_x (float): X coordinates of the center of the asymmetric rectangle.\n center_y (float): Y coordinates of the center of the asymmetric rectangle\n semi_width (float): half of the rectangle width.\n upper_height (float): distance from the center of the rectangle to the upper edge.\n lower_height (float): distance from the center of the rectangle to the lower edge.\n angle (float): angle of rotation in radians.\n\n Returns:\n np.ndarray: rotated rectangle array.\n \"\"\"\n return np.array(\n [\n [\n center_x + semi_width * np.cos(angle) + upper_height * np.cos(np.pi / 2 + angle),\n center_y + semi_width * np.sin(angle) + upper_height * np.sin(np.pi / 2 + angle),\n ],\n [\n center_x + semi_width * np.cos(angle) - lower_height * np.cos(np.pi / 2 + angle),\n center_y + semi_width * np.sin(angle) - lower_height * np.sin(np.pi / 2 + angle),\n ],\n [\n center_x - semi_width * np.cos(angle) - lower_height * np.cos(np.pi / 2 + angle),\n center_y - semi_width * np.sin(angle) - lower_height * np.sin(np.pi / 2 + angle),\n ],\n [\n center_x - semi_width * np.cos(angle) + upper_height * np.cos(np.pi / 2 + angle),\n center_y - semi_width * np.sin(angle) + upper_height * np.sin(np.pi / 2 + angle),\n ],\n ]\n )"
}
] | import math
import numpy as np
import pytest
from iris.io.dataclasses import EyeCenters, EyeOrientation, GeometryPolygons, NoiseMask
from iris.nodes.eye_properties_estimation.occlusion_calculator import OcclusionCalculator
from tests.unit_tests.utils import area_of_circular_segment, generate_arc, rotated_asymmetric_rectangle | 2,744 | EyeOrientation(angle=eye_orientation_angle),
EyeCenters(pupil_x=0.0, pupil_y=0.0, iris_x=0.0, iris_y=0.0),
)
result = np.vstack([result_xs, result_ys]).T
assert np.mean(np.abs(np.sort(result) - np.sort(expected_result))) < 0.5
@pytest.mark.parametrize(
"quantile_angle,upper_noise_distance,lower_noise_distance,upper_eyelid_distance,lower_eyelid_distance,eye_orientation",
[
(90, 200, 200, 200, 200, 0),
(30, 200, 200, 200, 200, 0),
(90, 200, 200, 200, 200, np.pi / 4),
(30, 200, 200, 200, 200, np.pi / 4),
(90, 100, 200, 200, 200, 0),
(90, 100, 200, 200, 200, np.pi / 6),
(30, 200, 100, 200, 200, -np.pi / 6),
(90, 0, 200, 200, 200, np.pi / 6),
(90, 100, 100, 200, 200, np.pi / 6),
(90, 0, 0, 200, 200, -np.pi / 6),
(30, 0, 0, 200, 200, -np.pi / 6),
(30, 50, 200, 200, 200, -np.pi / 6),
(90, 200, 200, 100, 100, -np.pi / 6),
(30, 200, 200, 0, 100, -np.pi / 6),
(30, 200, 200, 0, 0, -np.pi / 6),
(0, 200, 200, 0, 0, -np.pi / 6),
(45, 80, 10, 60, 50, -np.pi / 2),
],
ids=[
"occ90 - no occlusion - 0 degrees",
"occ30 - no occlusion - 0 degrees",
"occ90 - no occlusion - 45 degrees",
"occ30 - no occlusion - 45 degrees",
"occ90 - upper eyelashes half closed - 0 degrees",
"occ90 - upper eyelashes half closed - 30 degrees",
"occ30 - lower eyelashes half closed - -30 degrees",
"occ90 - upper eyelashes closed - 30 degrees",
"occ90 - both eyelashes half closed",
"occ90 - eye completely closed (eyelashes)",
"occ30 - eye completely closed (eyelashes)",
"occ30 - upper eyelashes half occluded",
"occ90 - both eyelids half occluded",
"occ30 - upper eyelid occluded",
"occ30 - eye completely closed (eyelids)",
"occ0",
"occ45 - some eyelash and eyelid occlusion - 90 degrees",
],
)
def test_occlusion_calculation(
quantile_angle: float,
upper_noise_distance: int,
lower_noise_distance: int,
upper_eyelid_distance: int,
lower_eyelid_distance: int,
eye_orientation: float,
) -> None:
"""This function tests the occlusion_calculator in an exhaustive number of eye configurations.
Args:
quantile_angle (float): quantile of the occlusion, e.g. 90, in degrees.
upper_noise_distance (int): distance between the center of the iris and the upper eyelashes in pixels.
lower_noise_distance (int): distance between the center of the iris and the lower eyelashes in pixels.
upper_eyelid_distance (int): distance between the center of the iris and the upper eyelid in pixels.
lower_eyelid_distance (int): distance between the center of the iris and the lower eyelid in pixels.
eye_orientation (float): eye orientation in radians.
"""
# Extra hardcoded parameters
img_w, img_h = 1440, 1080
img_center_x, img_center_y = img_w / 2, img_h / 2
iris_radius = 200
pupil_radius = 50
# Mathematically computing the expected occlusion fraction
theta_occlusion = 2 * (np.pi / 2 - quantile_angle * 2 * np.pi / 360)
quantile_area_removed = iris_radius**2 / 2 * (theta_occlusion - np.sin(theta_occlusion))
area_upper_eyelashes = area_of_circular_segment(iris_radius, upper_noise_distance)
area_lower_eyelashes = area_of_circular_segment(iris_radius, lower_noise_distance)
area_upper_eyelid = area_of_circular_segment(iris_radius, upper_eyelid_distance)
area_lower_eyelid = area_of_circular_segment(iris_radius, lower_eyelid_distance)
pupil_area_not_included_in_masks = (
np.pi * pupil_radius**2
- max(
area_of_circular_segment(pupil_radius, upper_noise_distance),
area_of_circular_segment(pupil_radius, upper_eyelid_distance),
)
- max(
area_of_circular_segment(pupil_radius, lower_noise_distance),
area_of_circular_segment(pupil_radius, lower_eyelid_distance),
)
)
expected_visible_fraction = (
np.pi * iris_radius**2
- pupil_area_not_included_in_masks
- max(quantile_area_removed, area_upper_eyelid, area_upper_eyelashes)
- max(quantile_area_removed, area_lower_eyelid, area_lower_eyelashes)
) / (np.pi * iris_radius**2 - np.pi * pupil_radius**2 - 2 * quantile_area_removed)
if np.isnan(expected_visible_fraction):
expected_visible_fraction = 0.0
# Constructing the mock data
mock_eye_orientation = EyeOrientation(angle=eye_orientation)
mock_eye_centers = EyeCenters(pupil_x=img_center_x, pupil_y=img_center_y, iris_x=img_center_x, iris_y=img_center_y)
mock_pupil = generate_arc(
radius=pupil_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
)
mock_iris = generate_arc(
radius=iris_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
)
|
@pytest.fixture
def algorithm() -> OcclusionCalculator:
return OcclusionCalculator(quantile_angle=30.0)
@pytest.mark.parametrize(
"quantile_angle,eye_orientation_angle,expected_result",
[
(
90.0,
np.radians(10.0),
generate_arc(1.0, 0.0, 0.0, from_angle=0.0, to_angle=2 * np.pi, num_points=360),
),
(
30.0,
np.radians(10.0),
np.concatenate(
[
generate_arc(1.0, 0.0, 0.0, from_angle=np.radians(0), to_angle=np.radians(40), num_points=40),
generate_arc(1.0, 0.0, 0.0, from_angle=np.radians(340), to_angle=np.radians(360), num_points=20),
generate_arc(1.0, 0.0, 0.0, from_angle=np.radians(160), to_angle=np.radians(220), num_points=60),
]
),
),
],
ids=["90 degrees", "30 degrees"],
)
def test_get_quantile_points(
quantile_angle: float,
eye_orientation_angle: np.float64,
expected_result: np.ndarray,
) -> None:
mock_iris_coords = generate_arc(
radius=1.0, center_x=0.0, center_y=0.0, from_angle=0.0, to_angle=2 * np.pi, num_points=360
)
algorithm = OcclusionCalculator(quantile_angle=quantile_angle)
result_xs, result_ys = algorithm._get_quantile_points(
mock_iris_coords,
EyeOrientation(angle=eye_orientation_angle),
EyeCenters(pupil_x=0.0, pupil_y=0.0, iris_x=0.0, iris_y=0.0),
)
result = np.vstack([result_xs, result_ys]).T
assert np.mean(np.abs(np.sort(result) - np.sort(expected_result))) < 0.5
@pytest.mark.parametrize(
"quantile_angle,upper_noise_distance,lower_noise_distance,upper_eyelid_distance,lower_eyelid_distance,eye_orientation",
[
(90, 200, 200, 200, 200, 0),
(30, 200, 200, 200, 200, 0),
(90, 200, 200, 200, 200, np.pi / 4),
(30, 200, 200, 200, 200, np.pi / 4),
(90, 100, 200, 200, 200, 0),
(90, 100, 200, 200, 200, np.pi / 6),
(30, 200, 100, 200, 200, -np.pi / 6),
(90, 0, 200, 200, 200, np.pi / 6),
(90, 100, 100, 200, 200, np.pi / 6),
(90, 0, 0, 200, 200, -np.pi / 6),
(30, 0, 0, 200, 200, -np.pi / 6),
(30, 50, 200, 200, 200, -np.pi / 6),
(90, 200, 200, 100, 100, -np.pi / 6),
(30, 200, 200, 0, 100, -np.pi / 6),
(30, 200, 200, 0, 0, -np.pi / 6),
(0, 200, 200, 0, 0, -np.pi / 6),
(45, 80, 10, 60, 50, -np.pi / 2),
],
ids=[
"occ90 - no occlusion - 0 degrees",
"occ30 - no occlusion - 0 degrees",
"occ90 - no occlusion - 45 degrees",
"occ30 - no occlusion - 45 degrees",
"occ90 - upper eyelashes half closed - 0 degrees",
"occ90 - upper eyelashes half closed - 30 degrees",
"occ30 - lower eyelashes half closed - -30 degrees",
"occ90 - upper eyelashes closed - 30 degrees",
"occ90 - both eyelashes half closed",
"occ90 - eye completely closed (eyelashes)",
"occ30 - eye completely closed (eyelashes)",
"occ30 - upper eyelashes half occluded",
"occ90 - both eyelids half occluded",
"occ30 - upper eyelid occluded",
"occ30 - eye completely closed (eyelids)",
"occ0",
"occ45 - some eyelash and eyelid occlusion - 90 degrees",
],
)
def test_occlusion_calculation(
quantile_angle: float,
upper_noise_distance: int,
lower_noise_distance: int,
upper_eyelid_distance: int,
lower_eyelid_distance: int,
eye_orientation: float,
) -> None:
"""This function tests the occlusion_calculator in an exhaustive number of eye configurations.
Args:
quantile_angle (float): quantile of the occlusion, e.g. 90, in degrees.
upper_noise_distance (int): distance between the center of the iris and the upper eyelashes in pixels.
lower_noise_distance (int): distance between the center of the iris and the lower eyelashes in pixels.
upper_eyelid_distance (int): distance between the center of the iris and the upper eyelid in pixels.
lower_eyelid_distance (int): distance between the center of the iris and the lower eyelid in pixels.
eye_orientation (float): eye orientation in radians.
"""
# Extra hardcoded parameters
img_w, img_h = 1440, 1080
img_center_x, img_center_y = img_w / 2, img_h / 2
iris_radius = 200
pupil_radius = 50
# Mathematically computing the expected occlusion fraction
theta_occlusion = 2 * (np.pi / 2 - quantile_angle * 2 * np.pi / 360)
quantile_area_removed = iris_radius**2 / 2 * (theta_occlusion - np.sin(theta_occlusion))
area_upper_eyelashes = area_of_circular_segment(iris_radius, upper_noise_distance)
area_lower_eyelashes = area_of_circular_segment(iris_radius, lower_noise_distance)
area_upper_eyelid = area_of_circular_segment(iris_radius, upper_eyelid_distance)
area_lower_eyelid = area_of_circular_segment(iris_radius, lower_eyelid_distance)
pupil_area_not_included_in_masks = (
np.pi * pupil_radius**2
- max(
area_of_circular_segment(pupil_radius, upper_noise_distance),
area_of_circular_segment(pupil_radius, upper_eyelid_distance),
)
- max(
area_of_circular_segment(pupil_radius, lower_noise_distance),
area_of_circular_segment(pupil_radius, lower_eyelid_distance),
)
)
expected_visible_fraction = (
np.pi * iris_radius**2
- pupil_area_not_included_in_masks
- max(quantile_area_removed, area_upper_eyelid, area_upper_eyelashes)
- max(quantile_area_removed, area_lower_eyelid, area_lower_eyelashes)
) / (np.pi * iris_radius**2 - np.pi * pupil_radius**2 - 2 * quantile_area_removed)
if np.isnan(expected_visible_fraction):
expected_visible_fraction = 0.0
# Constructing the mock data
mock_eye_orientation = EyeOrientation(angle=eye_orientation)
mock_eye_centers = EyeCenters(pupil_x=img_center_x, pupil_y=img_center_y, iris_x=img_center_x, iris_y=img_center_y)
mock_pupil = generate_arc(
radius=pupil_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
)
mock_iris = generate_arc(
radius=iris_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
) | mock_eyeball = rotated_asymmetric_rectangle( | 2 | 2023-12-09 22:43:09+00:00 | 4k |
DiffusionLight/DiffusionLight | relighting/pipeline_inpaintonly.py | [
{
"identifier": "custom_prepare_latents",
"path": "relighting/pipeline_utils.py",
"snippet": "def custom_prepare_latents(\n self,\n batch_size,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n image=None,\n timestep=None,\n is_strength_max=True,\n use_noise_moving=True,\n return_noise=False,\n return_image_latents=False,\n newx=0,\n newy=0,\n newr=256,\n current_seed=None,\n ):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if (image is None or timestep is None) and not is_strength_max:\n raise ValueError(\n \"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.\"\n \"However, either the image or the noise timestep has not been provided.\"\n )\n\n if image.shape[1] == 4:\n image_latents = image.to(device=device, dtype=dtype)\n elif return_image_latents or (latents is None and not is_strength_max):\n image = image.to(device=device, dtype=dtype)\n image_latents = self._encode_vae_image(image=image, generator=generator)\n\n if latents is None and use_noise_moving:\n # random big noise map\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n noise = expand_noise(noise, shape, seed=current_seed, device=device, dtype=dtype)\n \n # ensure noise is the same regardless of inpainting location (top-left corner notation)\n newys = [newy] if not isinstance(newy, list) else newy\n newxs = [newx] if not isinstance(newx, list) else newx\n big_noise = noise.clone()\n prev_noise = None\n for newy, newx in zip(newys, newxs):\n # find patch location within big noise map\n sy = big_noise.shape[2] // 4 + ((512 - 128) - newy) // self.vae_scale_factor\n sx = big_noise.shape[3] // 4 + ((512 - 128) - newx) // self.vae_scale_factor\n\n if prev_noise is not None:\n new_noise = big_noise[:, :, sy:sy+shape[2], sx:sx+shape[3]]\n\n ball_mask = torch.zeros(shape, device=device, dtype=bool)\n top_left = (newy // self.vae_scale_factor, newx // self.vae_scale_factor)\n bottom_right = (top_left[0] + newr // self.vae_scale_factor, top_left[1] + newr // self.vae_scale_factor) # fixed ball size r = 256\n ball_mask[:, :, top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]] = True\n\n noise = prev_noise.clone()\n noise[ball_mask] = new_noise[ball_mask]\n else:\n noise = big_noise[:, :, sy:sy+shape[2], sx:sx+shape[3]]\n\n prev_noise = noise.clone()\n\n # if strength is 1. then initialise the latents to noise, else initial to image + noise\n latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)\n # if pure noise then scale the initial latents by the Scheduler's init sigma\n latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents\n elif latents is None:\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n latents = image_latents.to(device)\n else:\n noise = latents.to(device)\n latents = noise * self.scheduler.init_noise_sigma\n\n outputs = (latents,)\n\n if return_noise:\n outputs += (noise,)\n\n if return_image_latents:\n outputs += (image_latents,)\n\n return outputs"
},
{
"identifier": "custom_prepare_mask_latents",
"path": "relighting/pipeline_utils.py",
"snippet": "def custom_prepare_mask_latents(\n self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance\n):\n # resize the mask to latents shape as we concatenate the mask to the latents\n # we do that before converting to dtype to avoid breaking in case we're using cpu_offload\n # and half precision\n mask = torch.nn.functional.interpolate(\n mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"bilinear\", align_corners=False #PURE: We add this to avoid sharp border of the ball\n )\n mask = mask.to(device=device, dtype=dtype)\n\n # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method\n if mask.shape[0] < batch_size:\n if not batch_size % mask.shape[0] == 0:\n raise ValueError(\n \"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to\"\n f\" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number\"\n \" of masks that you pass is divisible by the total requested batch size.\"\n )\n mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)\n\n mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask\n\n masked_image_latents = None\n if masked_image is not None:\n masked_image = masked_image.to(device=device, dtype=dtype)\n masked_image_latents = self._encode_vae_image(masked_image, generator=generator)\n if masked_image_latents.shape[0] < batch_size:\n if not batch_size % masked_image_latents.shape[0] == 0:\n raise ValueError(\n \"The passed images and the required batch size don't match. Images are supposed to be duplicated\"\n f\" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed.\"\n \" Make sure the number of images that you pass is divisible by the total requested batch size.\"\n )\n masked_image_latents = masked_image_latents.repeat(\n batch_size // masked_image_latents.shape[0], 1, 1, 1\n )\n\n masked_image_latents = (\n torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents\n )\n\n # aligning device to prevent device errors when concating it with the latent model input\n masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)\n\n return mask, masked_image_latents"
},
{
"identifier": "rescale_noise_cfg",
"path": "relighting/pipeline_utils.py",
"snippet": "def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):\n \"\"\"\n Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4\n \"\"\"\n std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)\n std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)\n # rescale the results from guidance (fixes overexposure)\n noise_pred_rescaled = noise_cfg * (std_text / std_cfg)\n # mix with the original results from guidance by factor guidance_rescale to avoid \"plain looking\" images\n noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg\n return noise_cfg"
}
] | import torch
from typing import List, Union, Dict, Any, Callable, Optional, Tuple
from diffusers.image_processor import PipelineImageInput
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline
from diffusers.models import AsymmetricAutoencoderKL
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
from relighting.pipeline_utils import custom_prepare_latents, custom_prepare_mask_latents, rescale_noise_cfg | 2,392 |
class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
masked_image_latents: torch.FloatTensor = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 1.0,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
newx: int = 0,
newy: int = 0,
newr: int = 256,
current_seed=0,
use_noise_moving=True,
):
# OVERWRITE METHODS
self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline)
|
class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
masked_image_latents: torch.FloatTensor = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 1.0,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
newx: int = 0,
newy: int = 0,
newr: int = 256,
current_seed=0,
use_noise_moving=True,
):
# OVERWRITE METHODS
self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline) | self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionInpaintPipeline) | 0 | 2023-12-07 14:03:31+00:00 | 4k |
laixintao/mactop | mactop/panels/m1_gpu.py | [
{
"identifier": "DynamicText",
"path": "mactop/widgets/dynamic_text.py",
"snippet": "class DynamicText(Static):\n value = reactive(None)\n\n DEFAULT_CSS = \"\"\"\n DynamicText {\n layout: horizontal;\n }\n \n \"\"\"\n\n def __init__(\n self,\n prefix_label,\n update_fn,\n value_render_fn,\n update_interval,\n warning_threshold=None,\n error_threshold=None,\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n\n self.update_fn = update_fn\n self.value_render_fn = value_render_fn\n self.update_interval = update_interval\n self.prefix_label = prefix_label\n\n self.warning_threshold = warning_threshold\n self.error_threshold = error_threshold\n\n def on_mount(self) -> None:\n self.set_interval(self.update_interval, self.update_value)\n\n def update_value(self) -> None:\n result = self.update_fn()\n if result is not None:\n self.value = result\n\n def watch_value(self, value) -> None:\n if value is not None:\n try:\n number = self.query_one(\"Static.value\")\n except textual.css.query.NoMatches:\n logger.warning(\n \"Can not found DOM element in Static.value in DynamicText\"\n )\n return\n rendered_str = self.value_render_fn(value)\n number.update(rendered_str)\n\n def compose(self) -> ComposeResult:\n yield Label(f\"{self.prefix_label}\", classes=\"label\")\n yield Static(\"loading\", classes=\"value\")"
},
{
"identifier": "metrics",
"path": "mactop/metrics_store.py",
"snippet": "class ProcessorType(enum.Enum):\nclass Smc:\nclass PowerMetricsBattery:\nclass Netowrk:\nclass CPU:\nclass M1GPU:\nclass CPUCore:\nclass ProcessorPackage:\nclass M1CPUCluster:\nclass M1ProcessorPackage:\nclass ProcessorIntel:\nclass Disk:\nclass PowerMetrics:\nclass AdapterDetails:\nclass AppleSmartBattery:\nclass IORegMetrics:\nclass CPUTimesPercent:\nclass SwapMemory:\nclass VirtualMemory:\nclass LoadAvg:\nclass PsutilMetrics:\nclass Metrics:\n INTEL = \"intel\"\n M1 = \"M1\"\n def get_core(self, core_index):\n def get_psutilmetrics(self):\n def set_psutilmetrics(self, p: PsutilMetrics):\n def get_powermetrics(self):\n def set_powermetrics(self, metrics):\n def get_ioregmetrics(self):\n def set_ioregmetrics(self, metrics):"
},
{
"identifier": "BaseStatic",
"path": "mactop/panels/_base.py",
"snippet": "class BaseStatic(Static):\n def __init__(self, refresh_interval, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.refresh_interval = float(refresh_interval)"
},
{
"identifier": "const",
"path": "mactop/const.py",
"snippet": "COLOR_USER=\"green\"\nCOLOR_NICE=\"blue\"\nCOLOR_SYSTEM=\"#006400\"\nCOLOR_IDLE=\"#2F4F4F\"\nCOLOR_C_STATE=\"#008000\"\nCOLOR_P_STATE=\"#FF8C00\""
},
{
"identifier": "LabeledSparkline",
"path": "mactop/widgets/labeled_sparkline.py",
"snippet": "class LabeledSparkline(Static):\n value = reactive(None)\n\n DEFAULT_CSS = \"\"\"\n LabeledSparkline {\n layout: horizontal;\n height: 1;\n }\n LabeledSparkline > Sparkline {\n width: 1fr;\n }\n\n LabeledSparkline .sparkline--max-color {\n color: $warning;\n }\n LabeledSparkline .sparkline--min-color {\n color: $warning 50%;\n }\n\n ReversedSparkline {\n text-style: reverse;\n }\n \"\"\"\n\n def __init__(\n self,\n prefix_label,\n update_fn: Callable[[], List[float]],\n value_render_fn,\n update_interval=1.0,\n sparkline_reverse=False,\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n\n self.update_fn = update_fn\n self.value_render_fn = value_render_fn\n self.update_interval = update_interval\n self.prefix_label = prefix_label\n self.sparkline_reverse = sparkline_reverse\n\n def on_mount(self) -> None:\n self.set_interval(self.update_interval, self.update_value)\n\n def update_value(self) -> None:\n result = self.update_fn()\n if result is not None:\n self.value = copy.copy(result)\n\n def watch_value(self, value) -> None:\n if not value:\n return\n last = value[-1]\n\n try:\n number_widget = self.query_one(\"Static.sparklineValue\")\n number_str = self.value_render_fn(last)\n number_widget.styles.width = len(number_str)\n number_widget.update(number_str)\n\n sparkline = self.query_one(\".sparkline-chart\")\n except textual.css.query.NoMatches:\n logger.warning(\n \"Can not found DOM element in Sparkline\"\n )\n return\n sparkline.data = value\n\n def compose(self) -> ComposeResult:\n yield Label(f\"{self.prefix_label} \", classes=\"sparklineLabel\")\n if self.sparkline_reverse:\n yield ReversedSparkline(self.value, classes=\"sparkline-chart\")\n else:\n Sparkline.DEFAULT_CSS = \"\"\n yield Sparkline(self.value, classes=\"sparkline-chart\")\n yield Static(\" \", classes=\"sparklineValue\")"
},
{
"identifier": "LabeledColorBar",
"path": "mactop/widgets/labeled_colorbar.py",
"snippet": "class LabeledColorBar(Static):\n percentages = reactive(None)\n\n DEFAULT_CSS = \"\"\"\n LabeledColorBar {\n layout: horizontal;\n }\n LabeledColorBar > ColorBar {\n width: 1fr;\n }\n \"\"\"\n\n def __init__(\n self,\n prefix_label,\n color_choices,\n update_interval,\n percentages_update_fn: Callable[[], List[float]],\n value_render_fn: Callable[[List[float]], str],\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n\n self.percentages_update_fn = percentages_update_fn\n self.color_choices = color_choices\n self.update_interval = update_interval\n self.prefix_label = prefix_label\n self.value_render_fn = value_render_fn\n\n def on_mount(self) -> None:\n self.set_interval(self.update_interval, self.update_percentages)\n\n def update_percentages(self) -> None:\n result = self.percentages_update_fn()\n if result is not None:\n self.percentages = copy.copy(result)\n\n def watch_percentages(self, percentages) -> None:\n if not percentages:\n return\n\n try:\n number_widget = self.query_one(\".colorbar-value\")\n except textual.css.query.NoMatches:\n logger.warning(\n \"Can not found DOM element in .colorbar-value in LabeledColorBar\"\n )\n return\n number_str = self.value_render_fn(percentages)\n number_widget.styles.width = len(number_str)\n number_widget.update(number_str)\n\n colorbar = self.query_one(\"ColorBar\")\n colorbar.percentages = percentages\n\n def compose(self) -> ComposeResult:\n yield Label(f\"{self.prefix_label}\", classes=\"colorbar-label\")\n yield ColorBar(self.color_choices)\n yield Static(\" \", classes=\"colorbar-value\")"
}
] | from textual.app import ComposeResult
from mactop.widgets import DynamicText
from mactop.metrics_store import metrics
from ._base import BaseStatic
from mactop import const
from mactop.widgets import LabeledColorBar, LabeledSparkline | 2,134 |
def refresh_callback(*_):
gpu_freq = metrics.get_powermetrics().m1_gpu.freq_hz
return gpu_freq
class GPUFreqText(BaseStatic):
BORDER_TITLE = "GPU Freq"
def __init__(self, label="GPU Freq: ", *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = label
def compose(self) -> ComposeResult:
yield DynamicText(
prefix_label=self.label,
update_fn=refresh_callback,
value_render_fn=lambda x: f"{x:.2f}MHz",
classes="gpu-freq-text",
update_interval=self.refresh_interval,
)
def get_gpu_usage():
idle = metrics.get_powermetrics().m1_gpu.idle_ratio
if idle is None:
return [0, 1]
busy = 1 - idle
return [busy, idle]
def display_gpu_ration(x):
if not x:
return "NA%"
return f"{x[0]*100:.2f}%"
class GPUUsageBarPanel(BaseStatic):
def __init__(
self,
color_busy=const.COLOR_USER,
color_idle=const.COLOR_IDLE,
label="GPU: ",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_busy = color_busy
self.color_idle = color_idle
self.label = label
def compose(self) -> ComposeResult:
|
def refresh_callback(*_):
gpu_freq = metrics.get_powermetrics().m1_gpu.freq_hz
return gpu_freq
class GPUFreqText(BaseStatic):
BORDER_TITLE = "GPU Freq"
def __init__(self, label="GPU Freq: ", *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = label
def compose(self) -> ComposeResult:
yield DynamicText(
prefix_label=self.label,
update_fn=refresh_callback,
value_render_fn=lambda x: f"{x:.2f}MHz",
classes="gpu-freq-text",
update_interval=self.refresh_interval,
)
def get_gpu_usage():
idle = metrics.get_powermetrics().m1_gpu.idle_ratio
if idle is None:
return [0, 1]
busy = 1 - idle
return [busy, idle]
def display_gpu_ration(x):
if not x:
return "NA%"
return f"{x[0]*100:.2f}%"
class GPUUsageBarPanel(BaseStatic):
def __init__(
self,
color_busy=const.COLOR_USER,
color_idle=const.COLOR_IDLE,
label="GPU: ",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_busy = color_busy
self.color_idle = color_idle
self.label = label
def compose(self) -> ComposeResult: | yield LabeledColorBar( | 5 | 2023-12-05 09:12:42+00:00 | 4k |
eliphatfs/zerorf | lib/core/mesh_gui.py | [
{
"identifier": "load_pose",
"path": "lib/datasets/shapenet_srn.py",
"snippet": "def load_pose(path):\n pose = np.loadtxt(path, dtype=np.float32, delimiter=' ').reshape(4, 4)\n return torch.from_numpy(pose)"
},
{
"identifier": "load_intrinsics",
"path": "lib/datasets/shapenet_srn.py",
"snippet": "def load_intrinsics(path):\n with open(path, 'r') as file:\n f, cx, cy, _ = map(float, file.readline().split())\n grid_barycenter = list(map(float, file.readline().split()))\n scale = float(file.readline())\n height, width = map(int, file.readline().split())\n fx = fy = f\n return fx, fy, cx, cy, height, width"
}
] | import copy
import numpy as np
import torch
import torch.nn.functional as F
import dearpygui.dearpygui as dpg
from scipy.spatial.transform import Rotation as R
from mmgen.models.builder import build_module
from mmgen.apis import set_random_seed # isort:skip # noqa
from lib.datasets.shapenet_srn import load_pose, load_intrinsics | 3,331 | ### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag='_texture')
### register window
# the rendered image, as the primary window
with dpg.window(tag='_primary_window', width=self.W, height=self.H):
# add the texture
dpg.add_image('_texture')
dpg.set_primary_window('_primary_window', True)
def update_camera_status():
if self.debug:
dpg.set_value('_log_pose', self.active_cam.pose2str())
dpg.set_value('fov', self.active_cam.fovy)
dpg.set_value('radius', self.active_cam.radius)
euler = self.active_cam.euler
dpg.set_value('roll', euler[0])
dpg.set_value('elevation', euler[1])
dpg.set_value('azimuth', euler[2])
center = self.active_cam.center
dpg.set_value('center_x', center[0])
dpg.set_value('center_y', center[1])
dpg.set_value('center_z', center[2])
# control window
with dpg.window(label='Control', tag='_control_window', width=380, height=self.H, pos=[self.W, 0]):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# time
with dpg.group(horizontal=True):
dpg.add_text('Infer time: ')
dpg.add_text('no data', tag='_log_infer_time')
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data):
| # modified from torch-ngp
class OrbitCamera:
def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):
self.name = name
self.W = W
self.H = H
self.radius = r # camera distance from center
self.fovy = fovy # in degree
self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])
self.rot = copy.deepcopy(self.default_rot)
self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!
self.set_euler(euler)
# pose
@property
def pose(self):
# first move camera to radius
res = np.eye(4, dtype=np.float32)
res[2, 3] -= self.radius
# rotate
rot = np.eye(4, dtype=np.float32)
rot[:3, :3] = self.rot.as_matrix()
res = rot @ res
# translate
res[:3, 3] -= self.center
return res
def set_pose(self, pose):
self.rot = R.from_matrix(pose[:3, :3])
self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius
@property
def intrinsics(self):
focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))
return np.array([focal, focal, self.W / 2, self.H / 2])
@property
def euler(self):
return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)
def set_euler(self, euler):
self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot
def orbit(self, dx, dy):
# rotate along camera up/side axis!
side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.
rotvec_x = self.up * np.radians(-0.1 * dx)
rotvec_y = side * np.radians(-0.1 * dy)
self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
def scale(self, delta):
self.radius *= 1.1 ** (-delta)
def pan(self, dx, dy, dz=0):
# pan in camera coordinate system (careful on the sensitivity!)
self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])
def pose2str(self):
with np.printoptions(precision=3, suppress=True):
return str(self.pose)
class MeshGUI:
default_cam_fovy = 52.0
default_cam_radius = 2.6
default_cam_euler = [0.0, 23.0, -47.4]
def __init__(self, mesh, renderer, W=512, H=512, debug=True):
self.W = W
self.H = H
self.default_cam = OrbitCamera(
'default', W, H, r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
self.active_cam = self.default_cam
self.debug = debug
self.bg_color = torch.ones(3, dtype=torch.float32) # default white bg
self.step = 0 # training step
self.mesh = mesh
self.renderer = renderer
self.video_sec = 4
self.video_fps = 30
self.video_res = 256
self.render_buffer = np.zeros((self.H, self.W, 3), dtype=np.float32)
self.need_update = True # camera moved, should reset accumulation
self.mode = 'image' # choose from ['image', 'depth']
self.image_enhancer = build_module(dict(
type='SRVGGNetCompact',
# num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu',
num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu',
# pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'
pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
)).half().eval().requires_grad_(False)
if torch.cuda.is_available():
self.image_enhancer.cuda()
self.use_image_enhancer = False
self.extrinsic_ndc_scale = 2.0 # default shapenet dataset value
dpg.create_context()
if self.debug:
dpg.configure_app(manual_callback_management=True)
self.register_dpg()
self.test_step()
def __del__(self):
dpg.destroy_context()
def prepare_buffer(self, outputs):
if self.mode == 'image':
return outputs['image']
elif self.mode == 'depth':
return np.expand_dims(outputs['depth'], -1).repeat(3, -1)
elif self.mode == 'alpha':
return np.expand_dims(outputs['alpha'], -1).repeat(3, -1)
elif self.mode == 'normal':
return outputs['normal']
else:
raise ValueError(f'Unknown mode {self.mode}')
def test_gui(self, pose, intrinsics, W, H):
with torch.no_grad():
if self.use_image_enhancer and self.mode == 'image':
rH, rW = H // 2, W // 2
intrinsics = intrinsics / 2
else:
rH, rW = H, W
results = self.renderer(
[self.mesh],
torch.tensor(pose, dtype=torch.float32, device=self.mesh.device)[None, None],
torch.tensor(intrinsics, dtype=torch.float32, device=self.mesh.device)[None, None],
rH, rW)
image = results['rgba'][..., :3] + self.bg_color.to(results['rgba']) * (1 - results['rgba'][..., 3:])
if self.use_image_enhancer and self.mode == 'image':
image = self.image_enhancer(image[0].half().permute(0, 3, 1, 2))
image = F.interpolate(image, size=(H, W), mode='area').permute(0, 2, 3, 1)[None].float()
results = dict(
image=image[0, 0].cpu().numpy(),
alpha=results['rgba'][0, 0, :, :, 3].cpu().numpy(),
depth=results['depth'][0, 0].cpu().numpy(),
normal=results['normal'][0, 0].cpu().numpy())
return results
def test_step(self):
if self.need_update:
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
outputs = self.test_gui(
self.active_cam.pose, self.active_cam.intrinsics,
self.W, self.H)
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
self.render_buffer = np.ascontiguousarray(self.prepare_buffer(outputs))
self.need_update = False
dpg.set_value('_log_infer_time', f'{t:.4f}ms ({int(1000 / t)} FPS)')
dpg.set_value('_texture', self.render_buffer)
def register_dpg(self):
### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag='_texture')
### register window
# the rendered image, as the primary window
with dpg.window(tag='_primary_window', width=self.W, height=self.H):
# add the texture
dpg.add_image('_texture')
dpg.set_primary_window('_primary_window', True)
def update_camera_status():
if self.debug:
dpg.set_value('_log_pose', self.active_cam.pose2str())
dpg.set_value('fov', self.active_cam.fovy)
dpg.set_value('radius', self.active_cam.radius)
euler = self.active_cam.euler
dpg.set_value('roll', euler[0])
dpg.set_value('elevation', euler[1])
dpg.set_value('azimuth', euler[2])
center = self.active_cam.center
dpg.set_value('center_x', center[0])
dpg.set_value('center_y', center[1])
dpg.set_value('center_z', center[2])
# control window
with dpg.window(label='Control', tag='_control_window', width=380, height=self.H, pos=[self.W, 0]):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# time
with dpg.group(horizontal=True):
dpg.add_text('Infer time: ')
dpg.add_text('no data', tag='_log_infer_time')
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data): | fx, fy, cx, cy, h, w = load_intrinsics(app_data['file_path_name']) | 1 | 2023-12-14 03:29:28+00:00 | 4k |
geopavlakos/hamer | hamer/models/components/pose_transformer.py | [
{
"identifier": "AdaptiveLayerNorm1D",
"path": "hamer/models/components/t_cond_mlp.py",
"snippet": "class AdaptiveLayerNorm1D(torch.nn.Module):\n def __init__(self, data_dim: int, norm_cond_dim: int):\n super().__init__()\n if data_dim <= 0:\n raise ValueError(f\"data_dim must be positive, but got {data_dim}\")\n if norm_cond_dim <= 0:\n raise ValueError(f\"norm_cond_dim must be positive, but got {norm_cond_dim}\")\n self.norm = torch.nn.LayerNorm(\n data_dim\n ) # TODO: Check if elementwise_affine=True is correct\n self.linear = torch.nn.Linear(norm_cond_dim, 2 * data_dim)\n torch.nn.init.zeros_(self.linear.weight)\n torch.nn.init.zeros_(self.linear.bias)\n\n def forward(self, x: torch.Tensor, t: torch.Tensor) -> torch.Tensor:\n # x: (batch, ..., data_dim)\n # t: (batch, norm_cond_dim)\n # return: (batch, data_dim)\n x = self.norm(x)\n alpha, beta = self.linear(t).chunk(2, dim=-1)\n\n # Add singleton dimensions to alpha and beta\n if x.dim() > 2:\n alpha = alpha.view(alpha.shape[0], *([1] * (x.dim() - 2)), alpha.shape[1])\n beta = beta.view(beta.shape[0], *([1] * (x.dim() - 2)), beta.shape[1])\n\n return x * (1 + alpha) + beta"
},
{
"identifier": "FrequencyEmbedder",
"path": "hamer/models/components/t_cond_mlp.py",
"snippet": "class FrequencyEmbedder(torch.nn.Module):\n def __init__(self, num_frequencies, max_freq_log2):\n super().__init__()\n frequencies = 2 ** torch.linspace(0, max_freq_log2, steps=num_frequencies)\n self.register_buffer(\"frequencies\", frequencies)\n\n def forward(self, x):\n # x should be of size (N,) or (N, D)\n N = x.size(0)\n if x.dim() == 1: # (N,)\n x = x.unsqueeze(1) # (N, D) where D=1\n x_unsqueezed = x.unsqueeze(-1) # (N, D, 1)\n scaled = self.frequencies.view(1, 1, -1) * x_unsqueezed # (N, D, num_frequencies)\n s = torch.sin(scaled)\n c = torch.cos(scaled)\n embedded = torch.cat([s, c, x_unsqueezed], dim=-1).view(\n N, -1\n ) # (N, D * 2 * num_frequencies + D)\n return embedded"
},
{
"identifier": "normalization_layer",
"path": "hamer/models/components/t_cond_mlp.py",
"snippet": "def normalization_layer(norm: Optional[str], dim: int, norm_cond_dim: int = -1):\n if norm == \"batch\":\n return torch.nn.BatchNorm1d(dim)\n elif norm == \"layer\":\n return torch.nn.LayerNorm(dim)\n elif norm == \"ada\":\n assert norm_cond_dim > 0, f\"norm_cond_dim must be positive, got {norm_cond_dim}\"\n return AdaptiveLayerNorm1D(dim, norm_cond_dim)\n elif norm is None:\n return torch.nn.Identity()\n else:\n raise ValueError(f\"Unknown norm: {norm}\")"
}
] | from inspect import isfunction
from typing import Callable, Optional
from einops import rearrange
from einops.layers.torch import Rearrange
from torch import nn
from .t_cond_mlp import (
AdaptiveLayerNorm1D,
FrequencyEmbedder,
normalization_layer,
)
import torch | 2,871 | self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args):
for attn, ff in self.layers:
x = attn(x, *args) + x
x = ff(x, *args) + x
return x
class TransformerCrossAttn(nn.Module):
def __init__(
self,
dim: int,
depth: int,
heads: int,
dim_head: int,
mlp_dim: int,
dropout: float = 0.0,
norm: str = "layer",
norm_cond_dim: int = -1,
context_dim: Optional[int] = None,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)
ca = CrossAttention(
dim, context_dim=context_dim, heads=heads, dim_head=dim_head, dropout=dropout
)
ff = FeedForward(dim, mlp_dim, dropout=dropout)
self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ca, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args, context=None, context_list=None):
if context_list is None:
context_list = [context] * len(self.layers)
if len(context_list) != len(self.layers):
raise ValueError(f"len(context_list) != len(self.layers) ({len(context_list)} != {len(self.layers)})")
for i, (self_attn, cross_attn, ff) in enumerate(self.layers):
x = self_attn(x, *args) + x
x = cross_attn(x, *args, context=context_list[i]) + x
x = ff(x, *args) + x
return x
class DropTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[0, :, 0], self.p).bernoulli().bool()
# TODO: permutation idx for each batch using torch.argsort
if zero_mask.any():
x = x[:, ~zero_mask, :]
return x
class ZeroTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[:, :, 0], self.p).bernoulli().bool()
# Zero-out the masked tokens
x[zero_mask, :] = 0
return x
class TransformerEncoder(nn.Module):
def __init__(
self,
num_tokens: int,
token_dim: int,
dim: int,
depth: int,
heads: int,
mlp_dim: int,
dim_head: int = 64,
dropout: float = 0.0,
emb_dropout: float = 0.0,
emb_dropout_type: str = "drop",
emb_dropout_loc: str = "token",
norm: str = "layer",
norm_cond_dim: int = -1,
token_pe_numfreq: int = -1,
):
super().__init__()
if token_pe_numfreq > 0:
token_dim_new = token_dim * (2 * token_pe_numfreq + 1)
self.to_token_embedding = nn.Sequential(
Rearrange("b n d -> (b n) d", n=num_tokens, d=token_dim),
|
# from .vit import Attention, FeedForward
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
class PreNorm(nn.Module):
def __init__(self, dim: int, fn: Callable, norm: str = "layer", norm_cond_dim: int = -1):
super().__init__()
self.norm = normalization_layer(norm, dim, norm_cond_dim)
self.fn = fn
def forward(self, x: torch.Tensor, *args, **kwargs):
if isinstance(self.norm, AdaptiveLayerNorm1D):
return self.fn(self.norm(x, *args), **kwargs)
else:
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.0):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout),
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head**-0.5
self.attend = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = (
nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout))
if project_out
else nn.Identity()
)
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(self, dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head**-0.5
self.attend = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
context_dim = default(context_dim, dim)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias=False)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_out = (
nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout))
if project_out
else nn.Identity()
)
def forward(self, x, context=None):
context = default(context, x)
k, v = self.to_kv(context).chunk(2, dim=-1)
q = self.to_q(x)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), [q, k, v])
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
dim: int,
depth: int,
heads: int,
dim_head: int,
mlp_dim: int,
dropout: float = 0.0,
norm: str = "layer",
norm_cond_dim: int = -1,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)
ff = FeedForward(dim, mlp_dim, dropout=dropout)
self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args):
for attn, ff in self.layers:
x = attn(x, *args) + x
x = ff(x, *args) + x
return x
class TransformerCrossAttn(nn.Module):
def __init__(
self,
dim: int,
depth: int,
heads: int,
dim_head: int,
mlp_dim: int,
dropout: float = 0.0,
norm: str = "layer",
norm_cond_dim: int = -1,
context_dim: Optional[int] = None,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)
ca = CrossAttention(
dim, context_dim=context_dim, heads=heads, dim_head=dim_head, dropout=dropout
)
ff = FeedForward(dim, mlp_dim, dropout=dropout)
self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ca, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args, context=None, context_list=None):
if context_list is None:
context_list = [context] * len(self.layers)
if len(context_list) != len(self.layers):
raise ValueError(f"len(context_list) != len(self.layers) ({len(context_list)} != {len(self.layers)})")
for i, (self_attn, cross_attn, ff) in enumerate(self.layers):
x = self_attn(x, *args) + x
x = cross_attn(x, *args, context=context_list[i]) + x
x = ff(x, *args) + x
return x
class DropTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[0, :, 0], self.p).bernoulli().bool()
# TODO: permutation idx for each batch using torch.argsort
if zero_mask.any():
x = x[:, ~zero_mask, :]
return x
class ZeroTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[:, :, 0], self.p).bernoulli().bool()
# Zero-out the masked tokens
x[zero_mask, :] = 0
return x
class TransformerEncoder(nn.Module):
def __init__(
self,
num_tokens: int,
token_dim: int,
dim: int,
depth: int,
heads: int,
mlp_dim: int,
dim_head: int = 64,
dropout: float = 0.0,
emb_dropout: float = 0.0,
emb_dropout_type: str = "drop",
emb_dropout_loc: str = "token",
norm: str = "layer",
norm_cond_dim: int = -1,
token_pe_numfreq: int = -1,
):
super().__init__()
if token_pe_numfreq > 0:
token_dim_new = token_dim * (2 * token_pe_numfreq + 1)
self.to_token_embedding = nn.Sequential(
Rearrange("b n d -> (b n) d", n=num_tokens, d=token_dim), | FrequencyEmbedder(token_pe_numfreq, token_pe_numfreq - 1), | 1 | 2023-12-08 09:07:07+00:00 | 4k |
rogeriochaves/driver | driver/executor.py | [
{
"identifier": "extract_high_level_plan_and_actions",
"path": "driver/brain.py",
"snippet": "def extract_high_level_plan_and_actions(input: str):\n pattern = r\"^A\\. High.?level([\\s\\S]*?)^B\\.\\s*([\\s\\S]*)\"\n\n match = re.search(pattern, input, re.DOTALL | re.MULTILINE | re.IGNORECASE)\n\n if match:\n between_tokens = match.group(1).strip()\n after_token_b = match.group(2).strip()\n\n return str(between_tokens), str(after_token_b)\n else:\n return None, None"
},
{
"identifier": "extract_structured_actions",
"path": "driver/brain.py",
"snippet": "def extract_structured_actions(input: str):\n actions = heuristics_extract_structured_actions(input)\n if not actions:\n actions = llm_structured_actions(input)\n return actions"
},
{
"identifier": "plan_next_step_actions",
"path": "driver/brain.py",
"snippet": "def plan_next_step_actions(context: Context, image_path: str):\n print_action(\"Looking at the screen to plan next steps\")\n print(\"Analyzing...\")\n\n initial_user_prompt = f\"\"\"\\\n Task: {context['task']}\n\n Here is a screenshot of the screen, tagged with labels like A1, A2, A3 on each interactive item, please do two things:\n\n A. High level list of steps to follow, using a numbered list in english text\n\n B. A list of actions to execute, being one of [CLICK A1] to click the A1 button for example, \\\n [TYPE \"message\"] to type \"message\", [PRESS ENTER] to \\\n press keys ENTER or shortcuts like CMD+F if needed, and [REFRESH] to end the list and get a new screenshot of the screen. \\\n Those are the ONLY options you have, work with that. If you need to switch apps,\n use [PRESS CMD+SPACE] to open the spotlight and then [REFRESH]. \\\n If you want to click or type on an element that is not on the screen, issue a [REFRESH] first. \\\n \"\"\"\n\n next_step_user_prompt = f\"\"\"\\\n Alright, I have executed the previous actions, let me share you the updated screenshot, so you can plan the next actions.\n Describe what you are seeing, and describe where it might have gone wrong, because usually the screen changes and we have to course correct.\n As a reminder my goal is: {context['task']}.\n\n Please create a list with the next actions to take if any (options are [CLICK <LABEL>], [TYPE \"<TEXT>\"], [SHORTCUT <shortcut>] or [REFRESH]).\n Pay extra attention to the labels on the screen, you need to name them correctly, and a place you want to click is not labeled, choose another one.\n \"\"\"\n\n user_prompt = (\n initial_user_prompt if len(context[\"history\"]) == 0 else next_step_user_prompt\n )\n\n user_message: List[ChatCompletionMessageParam] = [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": user_prompt,\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": image_to_base64(image_path),\n \"detail\": \"high\",\n },\n },\n ],\n }\n ]\n\n history = context[\"history\"]\n\n system_message: List[ChatCompletionMessageParam] = [\n {\n \"role\": \"system\",\n \"content\": \"\"\"\\\n You are an AI agent with capacity to see the user's screen, click buttons and type text.\n The user will ask you to do things, you will see the user screen, then you will first think\n in high level what are the steps you will need to follow to carry the task. Then, you will\n be given annotated screenshots with codes mapping buttons and texts on the screen, which you\n can choose to click and proceed, type in an input field, and get a refreshed update of the\n screen to continue until the task is completed. You are always very short and concise in your writing.\"\"\",\n },\n ]\n\n model = \"gpt-4-vision-preview\"\n messages = system_message + history + user_message\n\n response = client.chat.completions.create(\n model=model,\n messages=messages,\n stream=True,\n max_tokens=600,\n )\n\n content = \"\"\n for chunk in response:\n if delta := chunk.choices[0].delta.content:\n print(delta, end=\"\", flush=True)\n content += delta\n\n context[\"history\"].append(\n {\n \"role\": \"user\",\n \"content\": user_prompt,\n }\n )\n context[\"history\"].append(\n {\n \"role\": \"assistant\",\n \"content\": content,\n }\n )\n\n log_cost(\n model=model,\n messages=system_message + history,\n completion=content,\n image={\n \"text\": user_prompt,\n \"path\": image_path,\n \"detail\": \"high\",\n },\n )\n\n return content"
},
{
"identifier": "print_action",
"path": "driver/logger.py",
"snippet": "def print_action(str: str):\n print(Fore.YELLOW + \"\\n\\n> \" + str + \"\\n\" + Style.RESET_ALL)"
},
{
"identifier": "annotate_image",
"path": "driver/annotator.py",
"snippet": "def annotate_image(input_image_path, debug: DebugConfig):\n ocr_result = ocr_text_detection(input_image_path, debug)\n\n components = detect_components(\n input_image_path,\n ocr_result,\n showOCR=debug[\"ocr\"],\n showUIED=debug[\"uied\"],\n )\n\n original_image = Image.open(input_image_path)\n size = {\"width\": original_image.width, \"height\": original_image.height}\n img_multiplier_factor: ImgMultiplierFactor = {\n \"height\": components[\"img_shape\"][0] / size[\"height\"],\n \"width\": components[\"img_shape\"][1] / size[\"width\"],\n }\n\n label_counter = 1\n label_prefix = \"A\"\n drawn_positions = []\n label_map: LabelMap = {}\n\n label_width = 48 if is_retina_display() else 24\n label_height = 24 if is_retina_display() else 12\n\n # Most likely the biggest components are the most important ones to be clicked on the screen,\n # and seems like GPT-V will be biased anyway towards choosing A1, A2, etc the early labels,\n # so we try to play into that and label from the biggest to the smallest components\n sorted_components = sorted(\n components[\"compos\"],\n key=lambda x: (x.row_max - x.row_min) * (x.col_max - x.col_min),\n reverse=True\n )\n\n for component in sorted_components:\n if component.text_content and len(component.text_content) < 2:\n continue\n\n component_position = {\n \"x\": round(component.col_min / img_multiplier_factor[\"width\"]),\n \"y\": round(component.row_min / img_multiplier_factor[\"height\"]),\n \"x2\": round(component.col_max / img_multiplier_factor[\"width\"]),\n \"y2\": round(component.row_max / img_multiplier_factor[\"height\"]),\n }\n component_width = component_position[\"x2\"] - component_position[\"x\"]\n component_height = component_position[\"y2\"] - component_position[\"y\"]\n\n if component_height < label_height:\n continue\n\n label_position = (\n round(component_position[\"x\"] - label_width / 2),\n round(component_position[\"y\"] - label_height / 2),\n )\n\n # Draw label in the center of the component for big components\n big_component = 200 if is_retina_display() else 100\n if component_width > big_component and component_height > big_component:\n label_position = (\n round(component_position[\"x\"] + component_width / 2 - label_width),\n round(component_position[\"y\"] + component_height / 2 - label_height),\n )\n\n too_close = any(\n abs(label_position[0] - pos[0]) < label_width\n and abs(label_position[1] - pos[1]) < label_height * 2\n for pos in drawn_positions\n )\n if too_close:\n continue\n\n if label_counter > 9:\n label_counter = 1\n next_char = chr(ord(label_prefix[-1]) + 1)\n if next_char == \"I\":\n next_char = \"J\" # Skip 'I' to avoid confusion with 'l'\n if label_prefix[-1] == \"Z\":\n label_prefix += \"A\"\n else:\n label_prefix = label_prefix[:-1] + next_char\n label = f\"{label_prefix}{label_counter}\"\n draw_square(\n original_image,\n label_position,\n label,\n width=label_width,\n height=label_height,\n )\n drawn_positions.append(label_position)\n label_map[label] = {\n \"text\": component.text_content or \"\",\n \"position\": (\n component_position[\"x\"],\n component_position[\"y\"],\n ),\n \"size\": (\n component_width,\n component_height,\n ),\n }\n label_counter += 1\n\n os.makedirs(\"./output/annotated\", exist_ok=True)\n output_image_path = f\"./output/annotated/{os.path.basename(input_image_path)}\"\n original_image.save(output_image_path)\n\n print(f\"{len(label_map.keys())} elements found on the screen\", end=\"\")\n if debug[\"annotations\"]:\n show_image(\"Annotated\", cv2.imread(output_image_path))\n\n return label_map, output_image_path, img_multiplier_factor"
},
{
"identifier": "Action",
"path": "driver/types.py",
"snippet": "class Click(TypedDict):\nclass Type(TypedDict):\nclass Press(TypedDict):\nclass Refresh(TypedDict):\nclass LabelMapItem(TypedDict):\nclass ImgMultiplierFactor(TypedDict):\nclass DebugConfig(TypedDict):\nclass Context(TypedDict):\nclass Vertex:\nclass BoundingPoly:\nclass TextAnnotation:\nclass AnnotatedImage:"
},
{
"identifier": "is_retina_display",
"path": "driver/utils.py",
"snippet": "def is_retina_display():\n return is_retina"
}
] | import os
import subprocess
import sys
import time
import pyautogui
import pyperclip
import pygetwindow
from typing import List
from driver.brain import (
extract_high_level_plan_and_actions,
extract_structured_actions,
plan_next_step_actions,
)
from driver.logger import print_action
from driver.annotator import annotate_image
from driver.types import Action, DebugConfig, LabelMap, Context, LabelMapItem
from colorama import Fore, Style
from driver.utils import is_retina_display | 2,572 |
def take_screenshot():
screenshot = pyautogui.screenshot()
os.makedirs("./output", exist_ok=True)
screenshot.save("./output/screenshot.png")
return "./output/screenshot.png"
def start(task: str, debug: DebugConfig):
screenshot = take_screenshot()
label_map, output_image_path, img_multiplier_factor = annotate_image(
screenshot, debug=debug
)
|
def take_screenshot():
screenshot = pyautogui.screenshot()
os.makedirs("./output", exist_ok=True)
screenshot.save("./output/screenshot.png")
return "./output/screenshot.png"
def start(task: str, debug: DebugConfig):
screenshot = take_screenshot()
label_map, output_image_path, img_multiplier_factor = annotate_image(
screenshot, debug=debug
)
| context: Context = { | 5 | 2023-12-10 17:18:28+00:00 | 4k |
baidubce/app-builder | appbuilder/core/components/retriever/bes_retriever.py | [
{
"identifier": "Component",
"path": "appbuilder/core/component.py",
"snippet": "class ComponentArguments(BaseModel):\nclass Component:\n def extract_values_to_dict(self):\n def __init__(self,\n meta: Optional[ComponentArguments] = ComponentArguments(),\n secret_key: Optional[str] = None,\n gateway: str = \"\"\n ):\n def __call__(self, *inputs, **kwargs):\n def run(self, *inputs, **kwargs):\n def batch(self, *args, **kwargs) -> List[Message]:\n async def arun(self, *args, **kwargs) -> Optional[Message]:\n async def abatch(self, *args, **kwargs) -> List[Message]:\n def _trace(self, **data) -> None:\n def _debug(self, **data) -> None:"
},
{
"identifier": "Embedding",
"path": "appbuilder/core/components/embeddings/component.py",
"snippet": "class Embedding(EmbeddingBaseComponent):\n \"\"\"\n Embedding\n\n Embedding-V1是基于百度文心大模型技术的文本表示模型,将文本转化为用数值表示的向量形式,用于文本检索、信息推荐、知识挖掘等场景。\n\n Examples:\n\n .. code-block:: python\n\n import appbuilder\n from appbuilder import Message\n\n os.environ[\"APPBUILDER_TOKEN\"] = '...'\n\n embedding = appbuilder.Embedding()\n\n embedding_single = embedding(Message(\"hello world!\"))\n\n embedding_batch = embedding.batch(Message([\"hello\", \"world\"]))\n \"\"\"\n\n name: str = \"embedding\"\n version: str = \"v1\"\n\n meta = EmbeddingArgs\n accepted_models = [\"Embedding-V1\"]\n\n base_urls = {\n 'Embedding-V1' : \"/v1/bce/wenxinworkshop/ai_custom/v1/embeddings/embedding-v1\"\n }\n\n def __init__(self, model=\"Embedding-V1\"):\n \"\"\"Embedding\"\"\"\n\n if model not in self.accepted_models:\n raise ModelNotSupportedException(f\"Model {model} not supported, only support {self.accepted_models}\")\n\n if model in self.base_urls:\n self.base_url = self.base_urls[model]\n else:\n raise ModelNotSupportedException(f\"Model {model} is not yet supported, only support {self.base_urls.keys()}\")\n\n super().__init__(self.meta)\n\n def _check_response_json(self, data: dict):\n \"\"\"\n check_response_json for embedding\n \"\"\"\n\n self.http_client.check_response_json(data)\n if \"error_code\" in data and \"error_msg\" in data:\n raise AppBuilderServerException(\n service_err_code=data['error_code'],\n service_err_message=data['error_msg'],\n )\n\n def _request(self, payload: dict) -> dict:\n \"\"\"\n request to gateway\n \"\"\"\n headers = self.http_client.auth_header()\n headers[\"Content-Type\"] = \"application/json\"\n resp = self.http_client.session.post(\n url=self.http_client.service_url(self.base_url),\n headers=headers,\n json=payload,\n )\n self.http_client.check_response_header(resp)\n self._check_response_json(resp.json())\n\n return resp.json()\n\n def _batchify(self, texts: List[str], batch_size: int = 16) -> List[List[str]]:\n \"\"\"\n batchify input text list\n \"\"\"\n\n if batch_size > 16:\n raise ValueError(f\"The max Embedding batch_size is 16, but got {batch_size}\")\n\n return [\n texts[i : i + batch_size] for i in range(0, len(texts), batch_size)\n ]\n\n def _batch(self, texts: List[str]) -> Message[List[List[float]]]:\n \"\"\"\n batch run implement\n \"\"\"\n\n batches = self._batchify(texts)\n results = []\n for batch in batches:\n result = self._request({\"input\": batch})\n results.extend(result['data'])\n results = Message([result['embedding'] for result in results])\n\n return results\n\n def run(self, text: Union[Message[str], str]) -> Message[List[float]]:\n \"\"\"\n run\n \"\"\"\n \n _text = text if isinstance(text, str) else text.content\n\n return Message(self._batch([_text]).content[0])\n\n def batch(self, texts: Union[Message[List[str]], List[str]]) -> Message[List[List[float]]]:\n \"\"\"\n batch run\n \"\"\"\n\n _texts = texts if isinstance(texts, list) else texts.content\n\n return self._batch(_texts)"
},
{
"identifier": "GATEWAY_URL",
"path": "appbuilder/core/constants.py",
"snippet": "GATEWAY_URL = \"https://appbuilder.baidu.com\""
},
{
"identifier": "logger",
"path": "appbuilder/utils/logger_util.py",
"snippet": "LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s.%(msecs)03d] %(filename)s [line:%(lineno)d] %(levelname)s [%(logid)s] %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout', # Use standard output\n },\n },\n 'loggers': {\n 'appbuilder': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n }\n}\nclass LoggerWithLoggerId(logging.LoggerAdapter):\n def __init__(self, logger, extra, loglevel):\n def set_auto_logid(self):\n def set_logid(self, logid):\n def get_logid(self):\n def level(self):\n def process(self, msg, kwargs):\ndef _setup_logging():"
}
] | import importlib
import os
import random
import string
from typing import Dict, Any
from appbuilder.core.component import Component, Message
from appbuilder.core.components.embeddings.component import Embedding
from appbuilder.core.constants import GATEWAY_URL
from appbuilder.utils.logger_util import logger
from elasticsearch import Elasticsearch, helpers | 2,895 | """
secret_key = os.getenv("APPBUILDER_TOKEN")
if not secret_key.startswith("Bearer"):
secret_key = "Bearer {}".format(secret_key)
gateway = os.getenv("GATEWAY_URL") if os.getenv("GATEWAY_URL") else GATEWAY_URL
connection_params = {
"hosts": [gateway + self.prefix + self.base_es_url + cluster_id],
"http_auth": (user_name, password),
"headers": {'X-Appbuilder-Authorization': f"{secret_key}"}
}
bes_client = self.es(**connection_params)
try:
bes_client.info()
except Exception as e:
logger.error("connecting to bes error: {}".format(e))
raise ConnectionError(e)
return bes_client
def as_retriever(self):
"""
转化为retriever
"""
return BESRetriever(embedding=self.embedding, index_name=self.index_name, bes_client=self.bes_client,
index_type=self.index_type)
@staticmethod
def create_index_mappings(index_type, vector_dims):
"""
创建索引的mapping
"""
mappings = {
'properties': {
"vector": {
"type": "bpack_vector",
"dims": vector_dims,
},
}
}
if index_type == "hnsw":
mappings["properties"]["vector"]["index_type"] = "hnsw"
mappings["properties"]["vector"]["space_type"] = "cosine"
mappings["properties"]["vector"]["parameters"] = {"m": 4, "ef_construction": 200}
return mappings
def add_segments(self, segments: Message, metadata=""):
"""
向bes中插入数据
参数:
query (Message[str]): 需要插入的内容
返回:
"""
segment_vectors = self.embedding.batch(segments)
segment_vectors = segment_vectors.content
vector_dims = len(segment_vectors[0])
segments = segments.content
documents = [
{"_index": self.index_name,
"_source": {"text": segment, "vector": vector, "metadata": metadata,
"id": BESVectorStoreIndex.generate_id()}}
for segment, vector in zip(segments, segment_vectors)]
mappings = BESVectorStoreIndex.create_index_mappings(self.index_type, vector_dims)
self.bes_client.indices.create(index=self.index_name,
body={"settings": {"index": {"knn": True}}, "mappings": mappings})
self.helpers.bulk(self.bes_client, documents)
@classmethod
def from_segments(cls, segments, cluster_id, user_name, password, embedding=None, **kwargs):
"""
根据段落创建一个bes向量索引
参数:
segments: 切分的文本段落
cluster_id: bes集群ID
user_name: bes用户名
password: bes用户密码
embedding: 文本段落embedding工具
kwargs: 其他初始化参数
返回:
bes索引实例
"""
if embedding is None:
embedding = Embedding()
index_name = kwargs.get("index_name", None)
index_type = kwargs.get("index_type", "hnsw")
prefix = kwargs.get("prefix", "/rpc/2.0/cloud_hub")
vector_index = cls(cluster_id, user_name, password, embedding, index_name, index_type, prefix)
vector_index.add_segments(segments)
return vector_index
def delete_all_segments(self):
"""
删除索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
resp = self.bes_client.delete_by_query(index=self.index_name, body=query)
logger.debug("deleted {} documents in index {}".format(resp['deleted'], self.index_name))
def get_all_segments(self):
"""
获取索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
return self.bes_client.search(index=self.index_name, body=query)
| # Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
基于baidu ES的retriever
"""
class BESVectorStoreIndex:
"""
BES向量存储检索工具
"""
base_es_url: str = "/v1/bce/bes/cluster/"
def __init__(self, cluster_id, user_name, password, embedding=None, index_name=None,
index_type="hnsw", prefix="/rpc/2.0/cloud_hub"):
if embedding is None:
embedding = Embedding()
self.embedding = embedding
self.index_name = index_name if index_name else BESVectorStoreIndex.generate_id()
self.index_type = index_type
self.prefix = prefix
self._es = None
self._helpers = None
self.bes_client = self._create_bes_client(cluster_id, user_name, password)
@property
def es(self):
self._lazy_import_es()
return self._es
@property
def helpers(self):
self._lazy_import_es()
return self._helpers
def _lazy_import_es(self):
if self._es is None or self._helpers is None:
try:
self._es = Elasticsearch
self._helpers = helpers
except ImportError:
raise ImportError("Elasticsearch module is not installed. "
"Please install it using 'pip install elasticsearch==7.11.0'.")
@staticmethod
def generate_id(length=16):
"""
生成随机的ID
"""
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
def _create_bes_client(self, cluster_id, user_name, password):
"""
创建一个bes的client
"""
secret_key = os.getenv("APPBUILDER_TOKEN")
if not secret_key.startswith("Bearer"):
secret_key = "Bearer {}".format(secret_key)
gateway = os.getenv("GATEWAY_URL") if os.getenv("GATEWAY_URL") else GATEWAY_URL
connection_params = {
"hosts": [gateway + self.prefix + self.base_es_url + cluster_id],
"http_auth": (user_name, password),
"headers": {'X-Appbuilder-Authorization': f"{secret_key}"}
}
bes_client = self.es(**connection_params)
try:
bes_client.info()
except Exception as e:
logger.error("connecting to bes error: {}".format(e))
raise ConnectionError(e)
return bes_client
def as_retriever(self):
"""
转化为retriever
"""
return BESRetriever(embedding=self.embedding, index_name=self.index_name, bes_client=self.bes_client,
index_type=self.index_type)
@staticmethod
def create_index_mappings(index_type, vector_dims):
"""
创建索引的mapping
"""
mappings = {
'properties': {
"vector": {
"type": "bpack_vector",
"dims": vector_dims,
},
}
}
if index_type == "hnsw":
mappings["properties"]["vector"]["index_type"] = "hnsw"
mappings["properties"]["vector"]["space_type"] = "cosine"
mappings["properties"]["vector"]["parameters"] = {"m": 4, "ef_construction": 200}
return mappings
def add_segments(self, segments: Message, metadata=""):
"""
向bes中插入数据
参数:
query (Message[str]): 需要插入的内容
返回:
"""
segment_vectors = self.embedding.batch(segments)
segment_vectors = segment_vectors.content
vector_dims = len(segment_vectors[0])
segments = segments.content
documents = [
{"_index": self.index_name,
"_source": {"text": segment, "vector": vector, "metadata": metadata,
"id": BESVectorStoreIndex.generate_id()}}
for segment, vector in zip(segments, segment_vectors)]
mappings = BESVectorStoreIndex.create_index_mappings(self.index_type, vector_dims)
self.bes_client.indices.create(index=self.index_name,
body={"settings": {"index": {"knn": True}}, "mappings": mappings})
self.helpers.bulk(self.bes_client, documents)
@classmethod
def from_segments(cls, segments, cluster_id, user_name, password, embedding=None, **kwargs):
"""
根据段落创建一个bes向量索引
参数:
segments: 切分的文本段落
cluster_id: bes集群ID
user_name: bes用户名
password: bes用户密码
embedding: 文本段落embedding工具
kwargs: 其他初始化参数
返回:
bes索引实例
"""
if embedding is None:
embedding = Embedding()
index_name = kwargs.get("index_name", None)
index_type = kwargs.get("index_type", "hnsw")
prefix = kwargs.get("prefix", "/rpc/2.0/cloud_hub")
vector_index = cls(cluster_id, user_name, password, embedding, index_name, index_type, prefix)
vector_index.add_segments(segments)
return vector_index
def delete_all_segments(self):
"""
删除索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
resp = self.bes_client.delete_by_query(index=self.index_name, body=query)
logger.debug("deleted {} documents in index {}".format(resp['deleted'], self.index_name))
def get_all_segments(self):
"""
获取索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
return self.bes_client.search(index=self.index_name, body=query)
| class BESRetriever(Component): | 0 | 2023-12-05 01:48:12+00:00 | 4k |
corfyi/UCMCTrack | demo.py | [
{
"identifier": "UCMCTrack",
"path": "tracker/ucmc.py",
"snippet": "class UCMCTrack(object):\n def __init__(self,a1,a2,wx, wy,vmax, max_age, fps, dataset, high_score, use_cmc,detector = None):\n self.wx = wx\n self.wy = wy\n self.vmax = vmax\n self.dataset = dataset\n self.high_score = high_score\n self.max_age = max_age\n self.a1 = a1\n self.a2 = a2\n self.dt = 1.0/fps\n\n self.use_cmc = use_cmc\n\n self.trackers = []\n self.confirmed_idx = []\n self.coasted_idx = []\n self.tentative_idx = []\n\n self.detector = detector\n\n\n def update(self, dets,frame_id):\n \n self.data_association(dets,frame_id)\n \n self.associate_tentative(dets)\n \n self.initial_tentative(dets)\n \n self.delete_old_trackers()\n \n self.update_status(dets)\n \n def data_association(self, dets,frame_id):\n # Separate detections into high score and low score\n detidx_high = []\n detidx_low = []\n for i in range(len(dets)):\n if dets[i].conf >= self.high_score:\n detidx_high.append(i)\n else:\n detidx_low.append(i)\n\n # Predcit new locations of tracks\n for track in self.trackers:\n track.predict()\n if self.use_cmc:\n x,y = self.detector.cmc(track.kf.x[0,0],track.kf.x[2,0],track.w,track.h,frame_id)\n track.kf.x[0,0] = x\n track.kf.x[2,0] = y\n \n trackidx_remain = []\n self.detidx_remain = []\n\n # Associate high score detections with tracks\n trackidx = self.confirmed_idx + self.coasted_idx\n num_det = len(detidx_high)\n num_trk = len(trackidx)\n\n for trk in self.trackers:\n trk.detidx = -1\n\n if num_det*num_trk > 0:\n cost_matrix = np.zeros((num_det, num_trk))\n for i in range(num_det):\n det_idx = detidx_high[i]\n for j in range(num_trk):\n trk_idx = trackidx[j]\n cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)\n \n matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix, self.a1)\n \n for i in unmatched_a:\n self.detidx_remain.append(detidx_high[i])\n for i in unmatched_b:\n trackidx_remain.append(trackidx[i])\n \n for i,j in matched_indices:\n det_idx = detidx_high[i]\n trk_idx = trackidx[j]\n self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)\n self.trackers[trk_idx].death_count = 0\n self.trackers[trk_idx].detidx = det_idx\n self.trackers[trk_idx].status = TrackStatus.Confirmed\n dets[det_idx].track_id = self.trackers[trk_idx].id\n\n else:\n self.detidx_remain = detidx_high\n trackidx_remain = trackidx\n\n \n # Associate low score detections with remain tracks\n num_det = len(detidx_low)\n num_trk = len(trackidx_remain)\n if num_det*num_trk > 0:\n cost_matrix = np.zeros((num_det, num_trk))\n for i in range(num_det):\n det_idx = detidx_low[i]\n for j in range(num_trk):\n trk_idx = trackidx_remain[j]\n cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)\n \n matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix,self.a2)\n \n\n for i in unmatched_b:\n trk_idx = trackidx_remain[i]\n self.trackers[trk_idx].status = TrackStatus.Coasted\n # self.trackers[trk_idx].death_count += 1\n self.trackers[trk_idx].detidx = -1\n\n for i,j in matched_indices:\n det_idx = detidx_low[i]\n trk_idx = trackidx_remain[j]\n self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)\n self.trackers[trk_idx].death_count = 0\n self.trackers[trk_idx].detidx = det_idx\n self.trackers[trk_idx].status = TrackStatus.Confirmed\n dets[det_idx].track_id = self.trackers[trk_idx].id\n\n\n def associate_tentative(self, dets):\n num_det = len(self.detidx_remain)\n num_trk = len(self.tentative_idx)\n\n cost_matrix = np.zeros((num_det, num_trk))\n for i in range(num_det):\n det_idx = self.detidx_remain[i]\n for j in range(num_trk):\n trk_idx = self.tentative_idx[j]\n cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)\n \n matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix,self.a1)\n\n for i,j in matched_indices:\n det_idx = self.detidx_remain[i]\n trk_idx = self.tentative_idx[j]\n self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)\n self.trackers[trk_idx].death_count = 0\n self.trackers[trk_idx].birth_count += 1\n self.trackers[trk_idx].detidx = det_idx\n dets[det_idx].track_id = self.trackers[trk_idx].id\n if self.trackers[trk_idx].birth_count >= 2:\n self.trackers[trk_idx].birth_count = 0\n self.trackers[trk_idx].status = TrackStatus.Confirmed\n\n for i in unmatched_b:\n trk_idx = self.tentative_idx[i]\n # self.trackers[trk_idx].death_count += 1\n self.trackers[trk_idx].detidx = -1\n\n \n unmatched_detidx = []\n for i in unmatched_a:\n unmatched_detidx.append(self.detidx_remain[i])\n self.detidx_remain = unmatched_detidx\n\n \n \n def initial_tentative(self,dets):\n for i in self.detidx_remain: \n self.trackers.append(KalmanTracker(dets[i].y,dets[i].R,self.wx,self.wy,self.vmax, dets[i].bb_width,dets[i].bb_height,self.dt))\n self.trackers[-1].status = TrackStatus.Tentative\n self.trackers[-1].detidx = i\n self.detidx_remain = []\n\n def delete_old_trackers(self):\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n trk.death_count += 1\n i -= 1 \n if ( trk.status == TrackStatus.Coasted and trk.death_count >= self.max_age) or ( trk.status == TrackStatus.Tentative and trk.death_count >= 2):\n self.trackers.pop(i)\n\n def update_status(self,dets):\n self.confirmed_idx = []\n self.coasted_idx = []\n self.tentative_idx = []\n for i in range(len(self.trackers)):\n\n detidx = self.trackers[i].detidx\n if detidx >= 0 and detidx < len(dets):\n self.trackers[i].h = dets[detidx].bb_height\n self.trackers[i].w = dets[detidx].bb_width\n\n if self.trackers[i].status == TrackStatus.Confirmed:\n self.confirmed_idx.append(i)\n elif self.trackers[i].status == TrackStatus.Coasted:\n self.coasted_idx.append(i)\n elif self.trackers[i].status == TrackStatus.Tentative:\n self.tentative_idx.append(i)"
},
{
"identifier": "Mapper",
"path": "detector/mapper.py",
"snippet": "class Mapper(object):\n def __init__(self, campara_file,dataset= \"kitti\"):\n self.A = np.zeros((3, 3))\n if dataset == \"kitti\":\n self.KiKo, self.is_ok = readKittiCalib(campara_file)\n z0 = -1.73\n else:\n self.KiKo, self.is_ok = readCamParaFile(campara_file)\n z0 = 0\n\n self.A[:, :2] = self.KiKo[:, :2]\n self.A[:, 2] = z0 * self.KiKo[:, 2] + self.KiKo[:, 3]\n self.InvA = np.linalg.inv(self.A)\n\n def uv2xy(self, uv, sigma_uv):\n if self.is_ok == False:\n return None, None\n\n uv1 = np.zeros((3, 1))\n uv1[:2,:] = uv\n uv1[2,:] = 1\n b = np.dot(self.InvA, uv1)\n gamma = 1 / b[2,:]\n C = gamma * self.InvA[:2, :2] - (gamma**2) * b[:2,:] * self.InvA[2, :2]\n xy = b[:2,:] * gamma\n sigma_xy = np.dot(np.dot(C, sigma_uv), C.T)\n return xy, sigma_xy\n \n def xy2uv(self,x,y):\n if self.is_ok == False:\n return None, None\n xy1 = np.zeros((3, 1))\n xy1[0,0] = x\n xy1[1,0] = y\n xy1[2,0] = 1\n uv1 = np.dot(self.A, xy1)\n return uv1[0,0]/uv1[2,0],uv1[1,0]/uv1[2,0]\n \n def mapto(self,box):\n uv = np.array([[box[0]+box[2]/2], [box[1]+box[3]]])\n u_err,v_err = getUVError(box)\n sigma_uv = np.identity(2)\n sigma_uv[0,0] = u_err*u_err\n sigma_uv[1,1] = v_err*v_err\n y,R = self.uv2xy(uv, sigma_uv)\n return y,R"
}
] | from ultralytics import YOLO
from tracker.ucmc import UCMCTrack
from detector.mapper import Mapper
import os,cv2
import argparse
import numpy as np | 3,527 |
# 定义一个Detection类,包含id,bb_left,bb_top,bb_width,bb_height,conf,det_class
class Detection:
def __init__(self, id, bb_left = 0, bb_top = 0, bb_width = 0, bb_height = 0, conf = 0, det_class = 0):
self.id = id
self.bb_left = bb_left
self.bb_top = bb_top
self.bb_width = bb_width
self.bb_height = bb_height
self.conf = conf
self.det_class = det_class
self.track_id = 0
self.y = np.zeros((2, 1))
self.R = np.eye(4)
def __str__(self):
return 'd{}, bb_box:[{},{},{},{}], conf={:.2f}, class{}, uv:[{:.0f},{:.0f}], mapped to:[{:.1f},{:.1f}]'.format(
self.id, self.bb_left, self.bb_top, self.bb_width, self.bb_height, self.conf, self.det_class,
self.bb_left+self.bb_width/2,self.bb_top+self.bb_height,self.y[0,0],self.y[1,0])
def __repr__(self):
return self.__str__()
# Detector类,用于从Yolo检测器获取目标检测的结果
class Detector:
def __init__(self):
self.seq_length = 0
self.gmc = None
def load(self,cam_para_file):
self.mapper = Mapper(cam_para_file,"MOT17")
self.model = YOLO('pretrained/yolov8x.pt')
def get_dets(self, img,conf_thresh = 0,det_classes = [0]):
dets = []
# 将帧从 BGR 转换为 RGB(因为 OpenCV 使用 BGR 格式)
frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 使用 RTDETR 进行推理
results = self.model(frame,imgsz = 1088)
det_id = 0
for box in results[0].boxes:
conf = box.conf.cpu().numpy()[0]
bbox = box.xyxy.cpu().numpy()[0]
cls_id = box.cls.cpu().numpy()[0]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w <= 10 and h <= 10 or cls_id not in det_classes or conf <= conf_thresh:
continue
# 新建一个Detection对象
det = Detection(det_id)
det.bb_left = bbox[0]
det.bb_top = bbox[1]
det.bb_width = w
det.bb_height = h
det.conf = conf
det.det_class = cls_id
det.y,det.R = self.mapper.mapto([det.bb_left,det.bb_top,det.bb_width,det.bb_height])
det_id += 1
dets.append(det)
return dets
def main(args):
class_list = [2,5,7]
cap = cv2.VideoCapture(args.video)
# 获取视频的 fps
fps = cap.get(cv2.CAP_PROP_FPS)
# 获取视频的宽度和高度
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_out = cv2.VideoWriter('output/output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
# 打开一个cv的窗口,指定高度和宽度
cv2.namedWindow("demo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("demo", width, height)
detector = Detector()
detector.load(args.cam_para)
|
# 定义一个Detection类,包含id,bb_left,bb_top,bb_width,bb_height,conf,det_class
class Detection:
def __init__(self, id, bb_left = 0, bb_top = 0, bb_width = 0, bb_height = 0, conf = 0, det_class = 0):
self.id = id
self.bb_left = bb_left
self.bb_top = bb_top
self.bb_width = bb_width
self.bb_height = bb_height
self.conf = conf
self.det_class = det_class
self.track_id = 0
self.y = np.zeros((2, 1))
self.R = np.eye(4)
def __str__(self):
return 'd{}, bb_box:[{},{},{},{}], conf={:.2f}, class{}, uv:[{:.0f},{:.0f}], mapped to:[{:.1f},{:.1f}]'.format(
self.id, self.bb_left, self.bb_top, self.bb_width, self.bb_height, self.conf, self.det_class,
self.bb_left+self.bb_width/2,self.bb_top+self.bb_height,self.y[0,0],self.y[1,0])
def __repr__(self):
return self.__str__()
# Detector类,用于从Yolo检测器获取目标检测的结果
class Detector:
def __init__(self):
self.seq_length = 0
self.gmc = None
def load(self,cam_para_file):
self.mapper = Mapper(cam_para_file,"MOT17")
self.model = YOLO('pretrained/yolov8x.pt')
def get_dets(self, img,conf_thresh = 0,det_classes = [0]):
dets = []
# 将帧从 BGR 转换为 RGB(因为 OpenCV 使用 BGR 格式)
frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 使用 RTDETR 进行推理
results = self.model(frame,imgsz = 1088)
det_id = 0
for box in results[0].boxes:
conf = box.conf.cpu().numpy()[0]
bbox = box.xyxy.cpu().numpy()[0]
cls_id = box.cls.cpu().numpy()[0]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w <= 10 and h <= 10 or cls_id not in det_classes or conf <= conf_thresh:
continue
# 新建一个Detection对象
det = Detection(det_id)
det.bb_left = bbox[0]
det.bb_top = bbox[1]
det.bb_width = w
det.bb_height = h
det.conf = conf
det.det_class = cls_id
det.y,det.R = self.mapper.mapto([det.bb_left,det.bb_top,det.bb_width,det.bb_height])
det_id += 1
dets.append(det)
return dets
def main(args):
class_list = [2,5,7]
cap = cv2.VideoCapture(args.video)
# 获取视频的 fps
fps = cap.get(cv2.CAP_PROP_FPS)
# 获取视频的宽度和高度
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_out = cv2.VideoWriter('output/output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
# 打开一个cv的窗口,指定高度和宽度
cv2.namedWindow("demo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("demo", width, height)
detector = Detector()
detector.load(args.cam_para)
| tracker = UCMCTrack(args.a, args.a, args.wx, args.wy, args.vmax, args.cdt, fps, "MOT", args.high_score,False,None) | 0 | 2023-12-12 07:29:20+00:00 | 4k |
ingra14m/Specular-Gaussians | scene/gaussian_model.py | [
{
"identifier": "inverse_sigmoid",
"path": "utils/general_utils.py",
"snippet": "def inverse_sigmoid(x):\n return torch.log(x / (1 - x))"
},
{
"identifier": "get_expon_lr_func",
"path": "utils/general_utils.py",
"snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper"
},
{
"identifier": "build_rotation",
"path": "utils/general_utils.py",
"snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:, 0] * r[:, 0] + r[:, 1] * r[:, 1] + r[:, 2] * r[:, 2] + r[:, 3] * r[:, 3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n\n R[:, 0, 0] = 1 - 2 * (y * y + z * z)\n R[:, 0, 1] = 2 * (x * y - r * z)\n R[:, 0, 2] = 2 * (x * z + r * y)\n R[:, 1, 0] = 2 * (x * y + r * z)\n R[:, 1, 1] = 1 - 2 * (x * x + z * z)\n R[:, 1, 2] = 2 * (y * z - r * x)\n R[:, 2, 0] = 2 * (x * z - r * y)\n R[:, 2, 1] = 2 * (y * z + r * x)\n R[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return R"
},
{
"identifier": "get_linear_noise_func",
"path": "utils/general_utils.py",
"snippet": "def get_linear_noise_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = lr_init * (1 - t) + lr_final * t\n return delay_rate * log_lerp\n\n return helper"
},
{
"identifier": "mkdir_p",
"path": "utils/system_utils.py",
"snippet": "def mkdir_p(folder_path):\n # Creates a directory. equivalent to using mkdir -p on the command line\n try:\n makedirs(folder_path)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and path.isdir(folder_path):\n pass\n else:\n raise"
},
{
"identifier": "RGB2SH",
"path": "utils/sh_utils.py",
"snippet": "def RGB2SH(rgb):\n return (rgb - 0.5) / C0"
},
{
"identifier": "BasicPointCloud",
"path": "utils/graphics_utils.py",
"snippet": "class BasicPointCloud(NamedTuple):\n points: np.array\n colors: np.array\n normals: np.array"
},
{
"identifier": "strip_symmetric",
"path": "utils/general_utils.py",
"snippet": "def strip_symmetric(sym):\n return strip_lowerdiag(sym)"
},
{
"identifier": "build_scaling_rotation",
"path": "utils/general_utils.py",
"snippet": "def build_scaling_rotation(s, r):\n L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device=\"cuda\")\n R = build_rotation(r)\n\n L[:, 0, 0] = s[:, 0]\n L[:, 1, 1] = s[:, 1]\n L[:, 2, 2] = s[:, 2]\n\n L = R @ L\n return L"
},
{
"identifier": "flip_align_view",
"path": "utils/general_utils.py",
"snippet": "def flip_align_view(normal, viewdir):\n # normal: (N, 3), viewdir: (N, 3)\n dotprod = torch.sum(\n normal * -viewdir, dim=-1, keepdims=True) # (N, 1)\n non_flip = dotprod >= 0 # (N, 1)\n normal_flipped = normal * torch.where(non_flip, 1, -1) # (N, 3)\n return normal_flipped, non_flip"
},
{
"identifier": "get_minimum_axis",
"path": "utils/general_utils.py",
"snippet": "def get_minimum_axis(scales, rotations):\n sorted_idx = torch.argsort(scales, descending=False, dim=-1)\n R = build_rotation(rotations)\n R_sorted = torch.gather(R, dim=1, index=sorted_idx[:, :, None].repeat(1, 1, 3)).squeeze()\n x_axis = R_sorted[:, 0, :] # normalized by defaut\n\n return x_axis"
}
] | import torch
import numpy as np
import os
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation, get_linear_noise_func
from torch import nn
from utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from utils.sh_utils import RGB2SH
from simple_knn._C import distCUDA2
from utils.graphics_utils import BasicPointCloud
from utils.general_utils import strip_symmetric, build_scaling_rotation, flip_align_view, get_minimum_axis | 2,512 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class GaussianModel:
def __init__(self, sh_degree: int, asg_degree: int):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self.max_asg_degree = asg_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self._features_asg = torch.empty(0)
self._normal = torch.empty(0)
self._normal2 = torch.empty(0)
self._roughness = torch.empty(0)
self._albedo = torch.empty(0)
self._metallic = torch.empty(0)
self.optimizer = None
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
@property
def get_asg_features(self):
return self._features_asg
@property
def get_roughness(self):
return self._roughness
@property
def get_albedo(self):
return self._albedo
@property
def get_metallic(self):
return self._metallic
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier=1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def get_normal(self, dir_pp_normalized=None, return_delta=False):
normal_axis = self.get_minimum_axis
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class GaussianModel:
def __init__(self, sh_degree: int, asg_degree: int):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self.max_asg_degree = asg_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self._features_asg = torch.empty(0)
self._normal = torch.empty(0)
self._normal2 = torch.empty(0)
self._roughness = torch.empty(0)
self._albedo = torch.empty(0)
self._metallic = torch.empty(0)
self.optimizer = None
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
@property
def get_asg_features(self):
return self._features_asg
@property
def get_roughness(self):
return self._roughness
@property
def get_albedo(self):
return self._albedo
@property
def get_metallic(self):
return self._metallic
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier=1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def get_normal(self, dir_pp_normalized=None, return_delta=False):
normal_axis = self.get_minimum_axis | normal_axis, positive = flip_align_view(normal_axis, dir_pp_normalized) | 9 | 2023-12-12 14:59:01+00:00 | 4k |
Artiprocher/DiffSynth-Studio | diffsynth/models/sd_vae_decoder.py | [
{
"identifier": "Attention",
"path": "diffsynth/models/attention.py",
"snippet": "class Attention(torch.nn.Module):\n\n def __init__(self, q_dim, num_heads, head_dim, kv_dim=None, bias_q=False, bias_kv=False, bias_out=False):\n super().__init__()\n dim_inner = head_dim * num_heads\n kv_dim = kv_dim if kv_dim is not None else q_dim\n self.num_heads = num_heads\n self.head_dim = head_dim\n\n self.to_q = torch.nn.Linear(q_dim, dim_inner, bias=bias_q)\n self.to_k = torch.nn.Linear(kv_dim, dim_inner, bias=bias_kv)\n self.to_v = torch.nn.Linear(kv_dim, dim_inner, bias=bias_kv)\n self.to_out = torch.nn.Linear(dim_inner, q_dim, bias=bias_out)\n\n def torch_forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):\n if encoder_hidden_states is None:\n encoder_hidden_states = hidden_states\n\n batch_size = encoder_hidden_states.shape[0]\n\n q = self.to_q(hidden_states)\n k = self.to_k(encoder_hidden_states)\n v = self.to_v(encoder_hidden_states)\n\n q = q.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)\n k = k.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)\n v = v.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)\n\n hidden_states = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)\n hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_dim)\n hidden_states = hidden_states.to(q.dtype)\n\n hidden_states = self.to_out(hidden_states)\n\n return hidden_states\n \n def xformers_forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):\n if encoder_hidden_states is None:\n encoder_hidden_states = hidden_states\n\n q = self.to_q(hidden_states)\n k = self.to_k(encoder_hidden_states)\n v = self.to_v(encoder_hidden_states)\n\n q = rearrange(q, \"b f (n d) -> (b n) f d\", n=self.num_heads)\n k = rearrange(k, \"b f (n d) -> (b n) f d\", n=self.num_heads)\n v = rearrange(v, \"b f (n d) -> (b n) f d\", n=self.num_heads)\n\n if attn_mask is not None:\n hidden_states = low_version_attention(q, k, v, attn_bias=attn_mask)\n else:\n import xformers.ops as xops\n hidden_states = xops.memory_efficient_attention(q, k, v)\n hidden_states = rearrange(hidden_states, \"(b n) f d -> b f (n d)\", n=self.num_heads)\n\n hidden_states = hidden_states.to(q.dtype)\n hidden_states = self.to_out(hidden_states)\n\n return hidden_states\n\n def forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):\n return self.torch_forward(hidden_states, encoder_hidden_states=encoder_hidden_states, attn_mask=attn_mask)"
},
{
"identifier": "ResnetBlock",
"path": "diffsynth/models/sd_unet.py",
"snippet": "class ResnetBlock(torch.nn.Module):\n def __init__(self, in_channels, out_channels, temb_channels=None, groups=32, eps=1e-5):\n super().__init__()\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n if temb_channels is not None:\n self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)\n self.norm2 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)\n self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)\n self.nonlinearity = torch.nn.SiLU()\n self.conv_shortcut = None\n if in_channels != out_channels:\n self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=True)\n\n def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):\n x = hidden_states\n x = self.norm1(x)\n x = self.nonlinearity(x)\n x = self.conv1(x)\n if time_emb is not None:\n emb = self.nonlinearity(time_emb)\n emb = self.time_emb_proj(emb)[:, :, None, None]\n x = x + emb\n x = self.norm2(x)\n x = self.nonlinearity(x)\n x = self.conv2(x)\n if self.conv_shortcut is not None:\n hidden_states = self.conv_shortcut(hidden_states)\n hidden_states = hidden_states + x\n return hidden_states, time_emb, text_emb, res_stack"
},
{
"identifier": "UpSampler",
"path": "diffsynth/models/sd_unet.py",
"snippet": "class UpSampler(torch.nn.Module):\n def __init__(self, channels):\n super().__init__()\n self.conv = torch.nn.Conv2d(channels, channels, 3, padding=1)\n\n def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):\n hidden_states = torch.nn.functional.interpolate(hidden_states, scale_factor=2.0, mode=\"nearest\")\n hidden_states = self.conv(hidden_states)\n return hidden_states, time_emb, text_emb, res_stack"
},
{
"identifier": "TileWorker",
"path": "diffsynth/models/tiler.py",
"snippet": "class TileWorker:\n def __init__(self):\n pass\n\n\n def mask(self, height, width, border_width):\n # Create a mask with shape (height, width).\n # The centre area is filled with 1, and the border line is filled with values in range (0, 1].\n x = torch.arange(height).repeat(width, 1).T\n y = torch.arange(width).repeat(height, 1)\n mask = torch.stack([x + 1, height - x, y + 1, width - y]).min(dim=0).values\n mask = (mask / border_width).clip(0, 1)\n return mask\n\n\n def tile(self, model_input, tile_size, tile_stride, tile_device, tile_dtype):\n # Convert a tensor (b, c, h, w) to (b, c, tile_size, tile_size, tile_num)\n batch_size, channel, _, _ = model_input.shape\n model_input = model_input.to(device=tile_device, dtype=tile_dtype)\n unfold_operator = torch.nn.Unfold(\n kernel_size=(tile_size, tile_size),\n stride=(tile_stride, tile_stride)\n )\n model_input = unfold_operator(model_input)\n model_input = model_input.view((batch_size, channel, tile_size, tile_size, -1))\n\n return model_input\n\n\n def tiled_inference(self, forward_fn, model_input, tile_batch_size, inference_device, inference_dtype, tile_device, tile_dtype):\n # Call y=forward_fn(x) for each tile\n tile_num = model_input.shape[-1]\n model_output_stack = []\n\n for tile_id in range(0, tile_num, tile_batch_size):\n\n # process input\n tile_id_ = min(tile_id + tile_batch_size, tile_num)\n x = model_input[:, :, :, :, tile_id: tile_id_]\n x = x.to(device=inference_device, dtype=inference_dtype)\n x = rearrange(x, \"b c h w n -> (n b) c h w\")\n\n # process output\n y = forward_fn(x)\n y = rearrange(y, \"(n b) c h w -> b c h w n\", n=tile_id_-tile_id)\n y = y.to(device=tile_device, dtype=tile_dtype)\n model_output_stack.append(y)\n\n model_output = torch.concat(model_output_stack, dim=-1)\n return model_output\n\n\n def io_scale(self, model_output, tile_size):\n # Determine the size modification happend in forward_fn\n # We only consider the same scale on height and width.\n io_scale = model_output.shape[2] / tile_size\n return io_scale\n \n\n def untile(self, model_output, height, width, tile_size, tile_stride, border_width, tile_device, tile_dtype):\n # The reversed function of tile\n mask = self.mask(tile_size, tile_size, border_width)\n mask = mask.to(device=tile_device, dtype=tile_dtype)\n mask = rearrange(mask, \"h w -> 1 1 h w 1\")\n model_output = model_output * mask\n\n fold_operator = torch.nn.Fold(\n output_size=(height, width),\n kernel_size=(tile_size, tile_size),\n stride=(tile_stride, tile_stride)\n )\n mask = repeat(mask[0, 0, :, :, 0], \"h w -> 1 (h w) n\", n=model_output.shape[-1])\n model_output = rearrange(model_output, \"b c h w n -> b (c h w) n\")\n model_output = fold_operator(model_output) / fold_operator(mask)\n\n return model_output\n\n\n def tiled_forward(self, forward_fn, model_input, tile_size, tile_stride, tile_batch_size=1, tile_device=\"cpu\", tile_dtype=torch.float32, border_width=None):\n # Prepare\n inference_device, inference_dtype = model_input.device, model_input.dtype\n height, width = model_input.shape[2], model_input.shape[3]\n border_width = int(tile_stride*0.5) if border_width is None else border_width\n\n # tile\n model_input = self.tile(model_input, tile_size, tile_stride, tile_device, tile_dtype)\n\n # inference\n model_output = self.tiled_inference(forward_fn, model_input, tile_batch_size, inference_device, inference_dtype, tile_device, tile_dtype)\n\n # resize\n io_scale = self.io_scale(model_output, tile_size)\n height, width = int(height*io_scale), int(width*io_scale)\n tile_size, tile_stride = int(tile_size*io_scale), int(tile_stride*io_scale)\n border_width = int(border_width*io_scale)\n\n # untile\n model_output = self.untile(model_output, height, width, tile_size, tile_stride, border_width, tile_device, tile_dtype)\n \n # Done!\n model_output = model_output.to(device=inference_device, dtype=inference_dtype)\n return model_output"
}
] | import torch
from .attention import Attention
from .sd_unet import ResnetBlock, UpSampler
from .tiler import TileWorker | 3,000 |
class VAEAttentionBlock(torch.nn.Module):
def __init__(self, num_attention_heads, attention_head_dim, in_channels, num_layers=1, norm_num_groups=32, eps=1e-5):
super().__init__()
inner_dim = num_attention_heads * attention_head_dim
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=eps, affine=True)
self.transformer_blocks = torch.nn.ModuleList([
Attention(
inner_dim,
num_attention_heads,
attention_head_dim,
bias_q=True,
bias_kv=True,
bias_out=True
)
for d in range(num_layers)
])
def forward(self, hidden_states, time_emb, text_emb, res_stack):
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
for block in self.transformer_blocks:
hidden_states = block(hidden_states)
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
hidden_states = hidden_states + residual
return hidden_states, time_emb, text_emb, res_stack
class SDVAEDecoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.scaling_factor = 0.18215
self.post_quant_conv = torch.nn.Conv2d(4, 4, kernel_size=1)
self.conv_in = torch.nn.Conv2d(4, 512, kernel_size=3, padding=1)
self.blocks = torch.nn.ModuleList([
# UNetMidBlock2D
|
class VAEAttentionBlock(torch.nn.Module):
def __init__(self, num_attention_heads, attention_head_dim, in_channels, num_layers=1, norm_num_groups=32, eps=1e-5):
super().__init__()
inner_dim = num_attention_heads * attention_head_dim
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=eps, affine=True)
self.transformer_blocks = torch.nn.ModuleList([
Attention(
inner_dim,
num_attention_heads,
attention_head_dim,
bias_q=True,
bias_kv=True,
bias_out=True
)
for d in range(num_layers)
])
def forward(self, hidden_states, time_emb, text_emb, res_stack):
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
for block in self.transformer_blocks:
hidden_states = block(hidden_states)
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
hidden_states = hidden_states + residual
return hidden_states, time_emb, text_emb, res_stack
class SDVAEDecoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.scaling_factor = 0.18215
self.post_quant_conv = torch.nn.Conv2d(4, 4, kernel_size=1)
self.conv_in = torch.nn.Conv2d(4, 512, kernel_size=3, padding=1)
self.blocks = torch.nn.ModuleList([
# UNetMidBlock2D | ResnetBlock(512, 512, eps=1e-6), | 1 | 2023-12-07 16:52:15+00:00 | 4k |
vikhyat/mixtral-inference | main.py | [
{
"identifier": "RotatingBufferCache",
"path": "mixtral/cache.py",
"snippet": "class RotatingBufferCache:\n \"\"\"\n This is an example that implements a less naive rotating buffer cache, allowing for variable length sequences.\n Allocated cache is rectangular which is wasteful (see PagedAttention for better mechanisms)\n \"\"\"\n def __init__(self, n_layers: int, max_batch_size: int, sliding_window: int, n_kv_heads: int, head_dim: int):\n\n self.sliding_window = sliding_window\n self.n_kv_heads = n_kv_heads\n self.head_dim = head_dim\n\n self.cache_k = torch.empty((\n n_layers,\n max_batch_size,\n sliding_window,\n n_kv_heads,\n head_dim\n ))\n self.cache_v = torch.empty((\n n_layers,\n max_batch_size,\n sliding_window,\n n_kv_heads,\n head_dim\n ))\n # holds the valid length for each batch element in the cache\n self.kv_seqlens = None\n\n def get_view(self, layer_id: int, metadata: RotatingCacheInputMetadata) -> CacheView:\n return CacheView(self.cache_k[layer_id], self.cache_v[layer_id], metadata, self.kv_seqlens)\n\n def reset(self):\n self.kv_seqlens = None\n\n def init_kvseqlens(self, batch_size: int):\n self.kv_seqlens = torch.zeros((batch_size,), device=self.device, dtype=torch.long)\n\n @property\n def device(self):\n return self.cache_k.device\n\n def to(self, device: torch.device, dtype: torch.dtype):\n self.cache_k = self.cache_k.to(device=device, dtype=dtype)\n self.cache_v = self.cache_v.to(device=device, dtype=dtype)\n\n return self\n\n def update_seqlens(self, seqlens: List[int]):\n self.kv_seqlens += torch.tensor(seqlens, device=self.device, dtype=torch.long)\n\n def get_input_metadata(self, seqlens: List[int]) -> RotatingCacheInputMetadata:\n \"\"\"\n inpput = seqlens [5,7,2] // seqpos [0, 1, 3] // sliding_window 3\n --> only cache last 3 tokens in each sequence\n - to_cache_mask = [0 0 1 1 1 | 0 0 0 0 1 1 1 | 1 1]\n - cached_elements = [3 | 3 | 2]\n --> absolute positions are used for rope\n - positions = [0 1 2 3 4 | 1 2 3 4 5 6 7 | 3 4]\n --> cache positions are positions cache_masked, modulo sliding_window + batch_idx * sliding_window\n - cache_positions = [2 0 1 | 5 3 4 | 6 7]\n \"\"\"\n if self.kv_seqlens is None:\n self.init_kvseqlens(len(seqlens))\n assert len(seqlens) == len(self.kv_seqlens), f\"Batch size is {len(self.kv_seqlens)}, got {len(seqlens)}, did you forget to reset cache?\"\n seqpos = self.kv_seqlens.tolist()\n\n assert len(seqlens) > 0, seqlens\n masks = [\n [x >= seqlen - self.sliding_window for x in range(seqlen)]\n for seqlen in seqlens\n ]\n to_cache_mask = torch.tensor(sum(masks, []), device=self.device, dtype=torch.bool)\n cached_elements = torch.tensor([sum(mask) for mask in masks], device=self.device, dtype=torch.long)\n positions = torch.cat([torch.arange(pos, pos + seqlen) for pos, seqlen in zip(seqpos, seqlens)]).to(device=self.device, dtype=torch.long)\n batch_idx = torch.tensor(sum([[i]*seqlen for i, seqlen in enumerate(seqlens)], []), device=self.device, dtype=torch.long)\n cache_positions = positions % self.sliding_window + batch_idx * self.sliding_window\n\n first_prefill = seqpos[0] == 0\n subsequent_prefill = any(seqlen > 1 for seqlen in seqlens)\n if first_prefill:\n assert all([pos == 0 for pos in seqpos]), (seqpos)\n mask = BlockDiagonalCausalMask.from_seqlens(seqlens).make_local_attention(self.sliding_window)\n elif subsequent_prefill:\n mask = BlockDiagonalMask.from_seqlens(\n q_seqlen=seqlens,\n kv_seqlen=[s + cached_s.clamp(max=self.sliding_window).item() for (s, cached_s) in zip(seqlens, self.kv_seqlens)]\n ).make_local_attention_from_bottomright(self.sliding_window)\n else:\n mask = BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(\n q_seqlen=seqlens,\n kv_padding=self.sliding_window,\n kv_seqlen=(self.kv_seqlens + cached_elements).clamp(max=self.sliding_window).tolist()\n )\n\n return RotatingCacheInputMetadata(\n positions=positions,\n to_cache_mask=to_cache_mask,\n cached_elements=cached_elements,\n cache_positions=cache_positions[to_cache_mask],\n prefill=first_prefill or subsequent_prefill,\n mask=mask,\n seqlens=seqlens,\n )"
},
{
"identifier": "Transformer",
"path": "mixtral/model.py",
"snippet": "class Transformer(nn.Module):\n def __init__(self, args: ModelArgs, devices: List[str], dtype=torch.float16):\n super().__init__()\n self.args = args\n self.vocab_size = args.vocab_size\n self.n_layers = args.n_layers\n assert self.vocab_size > 0\n\n self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim, device='meta', dtype=dtype)\n self.tok_embeddings.to_empty(device=devices[0])\n\n self.layers = torch.nn.ModuleList(\n [\n TransformerBlock(args=args, device=devices[(i * len(devices)) // args.n_layers], dtype=dtype)\n for i in range(args.n_layers)\n ]\n )\n\n self.norm = RMSNorm(args.dim, eps=args.norm_eps).to(devices[0], dtype=dtype)\n\n self.output = nn.Linear(\n args.dim,\n args.vocab_size,\n bias=False,\n device='meta',\n dtype=dtype\n )\n self.output.to_empty(device=devices[0])\n\n self.freqs_cis = precompute_freqs_cis(self.args.head_dim, 128_000, 1e6).to(devices[0])\n\n @property\n def dtype(self) -> torch.dtype:\n return self.tok_embeddings.weight.dtype\n\n @property\n def device(self) -> torch.device:\n return self.tok_embeddings.weight.device\n\n def forward_partial(\n self,\n input_ids: torch.Tensor,\n seqlens: List[int],\n cache: Optional[RotatingBufferCache]=None,\n ) -> torch.Tensor:\n assert len(seqlens) <= self.args.max_batch_size, f\"Max batch size is {self.args.max_batch_size}, got batch size of {len(seqlens)}\"\n assert sum(seqlens) == input_ids.shape[0], (sum(seqlens), input_ids.shape[0])\n if cache is not None:\n input_metadata = cache.get_input_metadata(seqlens)\n else:\n input_metadata = SimpleInputMetadata.from_seqlens(seqlens, self.device)\n h = self.tok_embeddings(input_ids)\n freqs_cis = self.freqs_cis[input_metadata.positions]\n\n for layer_id, layer in enumerate(self.layers):\n cache_view = None if cache is None else cache.get_view(layer_id, input_metadata)\n h = layer(h, freqs_cis, cache_view)\n \n h = h.to(self.norm.weight.device)\n \n if cache is not None:\n cache.update_seqlens(seqlens)\n\n return self.norm(h)\n\n def forward(\n self,\n input_ids: torch.Tensor,\n seqlens: List[int],\n cache: Optional[RotatingBufferCache]=None,\n ) -> torch.Tensor:\n return self.output(self.forward_partial(\n input_ids, seqlens, cache=cache\n )).float()\n\n @staticmethod\n def from_folder(folder: Path, max_batch_size: int = 1, devices=['cuda'], dtype=torch.float16) -> \"Transformer\":\n with open(folder / 'params.json', 'r') as f:\n model_args = ModelArgs(**json.loads(f.read()))\n model_args.max_batch_size = max_batch_size\n model = Transformer(model_args, devices, dtype=dtype)\n loaded = torch.load(folder / 'consolidated.00.pth')\n model.load_state_dict(loaded)\n return model"
},
{
"identifier": "Tokenizer",
"path": "mixtral/tokenizer.py",
"snippet": "class Tokenizer:\n def __init__(self, model_path: str):\n assert Path(model_path).exists(), model_path\n self._model = SentencePieceProcessor(model_file=model_path)\n assert self._model.vocab_size() == self._model.get_piece_size()\n\n @property\n def n_words(self) -> int:\n return self._model.vocab_size()\n\n @property\n def bos_id(self) -> int:\n return self._model.bos_id()\n\n @property\n def eos_id(self) -> int:\n return self._model.eos_id()\n\n @property\n def pad_id(self) -> int:\n return self._model.pad_id()\n\n def encode(self, s: str, bos: bool = True) -> List[int]:\n assert isinstance(s, str)\n t = self._model.encode(s)\n if bos:\n t = [self.bos_id, *t]\n return t\n\n def decode(self, t: List[int]) -> str:\n return self._model.decode(t)"
}
] | from mixtral.cache import RotatingBufferCache
from typing import List
from pathlib import Path
from mixtral.model import Transformer
from mixtral.tokenizer import Tokenizer
import torch | 2,770 |
def sample_top_p(probs: torch.Tensor, p: float):
assert 0 <= p <= 1
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
return torch.gather(probs_idx, -1, next_token)
def sample(logits: torch.Tensor, temperature: float, top_p: float):
if temperature > 0:
probs = torch.softmax(logits / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits, dim=-1).unsqueeze(0)
return next_token.reshape(-1)
@torch.inference_mode()
def generate(prompts: List[str], model: Transformer, tokenizer: Tokenizer, *, max_tokens: int, chunk_size: int = None, temperature: float = 0.7, stdout=False):
model = model.eval()
B, V = len(prompts), model.args.vocab_size
# Tokenize
encoded_prompts = [tokenizer.encode(prompt, bos=True) for prompt in prompts]
seqlens = [len(x) for x in encoded_prompts]
# Cache
cache_window = max(seqlens) + max_tokens
|
def sample_top_p(probs: torch.Tensor, p: float):
assert 0 <= p <= 1
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
return torch.gather(probs_idx, -1, next_token)
def sample(logits: torch.Tensor, temperature: float, top_p: float):
if temperature > 0:
probs = torch.softmax(logits / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits, dim=-1).unsqueeze(0)
return next_token.reshape(-1)
@torch.inference_mode()
def generate(prompts: List[str], model: Transformer, tokenizer: Tokenizer, *, max_tokens: int, chunk_size: int = None, temperature: float = 0.7, stdout=False):
model = model.eval()
B, V = len(prompts), model.args.vocab_size
# Tokenize
encoded_prompts = [tokenizer.encode(prompt, bos=True) for prompt in prompts]
seqlens = [len(x) for x in encoded_prompts]
# Cache
cache_window = max(seqlens) + max_tokens | cache = RotatingBufferCache(model.args.n_layers, model.args.max_batch_size, cache_window, model.args.n_kv_heads, model.args.head_dim) | 0 | 2023-12-08 22:48:32+00:00 | 4k |
u2seg/U2Seg | u2seg/UnsupervisedSelectiveLabeling/shared/utils/nn_utils_imagenet.py | [
{
"identifier": "cfg",
"path": "u2seg/UnsupervisedSelectiveLabeling/shared/utils/config_utils.py",
"snippet": "class CustomFormatter(logging.Formatter):\n FORMATS = {\n logging.DEBUG: format,\n logging.INFO: grey + format + reset,\n logging.WARNING: yellow + format + reset,\n logging.ERROR: red + format + reset,\n logging.CRITICAL: bold_red + format + reset\n }\n def format(self, record):\ndef init(default_config_file):\ndef is_notebook():\ndef init_cfg(default_config_file=None):"
},
{
"identifier": "get_transform",
"path": "u2seg/UnsupervisedSelectiveLabeling/shared/utils/nn_utils.py",
"snippet": "class ModelEMA(object):\n def __init__(self, model, decay):\n def update(self, model):\ndef seed_everything(seed):\ndef save_npy(filename, content):\ndef load_npy(filename, allow_pickle=False):\ndef get_transform(transform_name):\ndef single_model(state_dict):\ndef get_feats_list(model, train_memory_loader, CLIP=False, feat_dim=None, recompute=False, dataparallel=False, force_no_extra_kwargs=False, **kwargs):\ndef kNN(x_train, x_test, K=20):\ndef partitioned_kNN(feats_list, K=20, recompute=False, partitions_size=130000, verify=False):\n def get_sampled_data(ind):\n def dist(a, b):\ndef KMeans(x, seed, K=10, Niter=10, init_inds=None, verbose=True, force_no_lazy_tensor=False):\n def _LazyTensor(x): return x\ndef run_kMeans(feats_list, num_centroids, final_sample_num, Niter, recompute=False, use_cuda=True, seed=None, force_no_lazy_tensor=False, save=True):\ndef get_selection_without_reg(cluster_labels, neighbors_dist, centroid_ordering, final_sample_num):\ndef get_selection_without_reg_outliers(data, neighbors_dist, cluster_labels, num_centroids, iters=1, final_sample_num=None, w=1, momentum=0.5, horizon_num=256, alpha=1, exclude_same_cluster=False, verbose=False):\ndef get_selection(selection_fn, *args, seed=None, recompute=True, save=True, pass_seed=False, **kwargs):\n N, D = x.shape # Number of samples, dimension of the ambient space"
}
] | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import pandas as pd
import clip
from glob import glob
from PIL import Image, ImageFilter
from pykeops.torch import LazyTensor
from tqdm import tqdm
from .config_utils import cfg
from .nn_utils import get_transform, normalization_kwargs_dict | 3,125 |
# Credit: https://github.com/amazon-research/exponential-moving-average-normalization
def build_hidden_head(num_mlp, dim_mlp, dim, normed=False):
modules = []
for _ in range(1, num_mlp):
modules.append(nn.Linear(dim_mlp, dim_mlp))
modules.append(nn.ReLU())
modules.append(nn.Linear(dim_mlp, dim))
if normed:
modules.append(L2NormLayer())
return nn.Sequential(*modules)
# Credit: https://github.com/wvangansbeke/Unsupervised-Classification/blob/master/data/imagenet.py
class ImageNet(datasets.ImageFolder):
def __init__(self, root, split='train', transform=None):
super(ImageNet, self).__init__(root=os.path.join(root, split),
transform=None)
self.transform = transform
self.split = split
self.resize = transforms.Resize(256)
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
im_size = img.size
img = self.resize(img)
if self.transform is not None:
img = self.transform(img)
out = {'image': img, 'target': target, 'meta': {
'im_size': im_size, 'index': index}}
return out
def get_image(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
img = self.resize(img)
return img
# class ImageNetSubset(torch.utils.data.Dataset):
# def __init__(self, subset_file, root, split='train',
# transform=None, return_dict=True):
# super(ImageNetSubset, self).__init__()
# self.root = os.path.join(root, split)
# self.transform = transform
# self.split = split
# # Read the subset of classes to include (sorted)
# with open(subset_file, 'r') as f:
# result = f.read().splitlines()
# subdirs, class_names = [], []
# for line in result:
# subdir, class_name = line.split(' ', 1)
# subdirs.append(subdir)
# class_names.append(class_name)
# # Gather the files (sorted)
# imgs = []
# targets = []
# for i, subdir in enumerate(subdirs):
# # subdir_path = os.path.join(self.root, subdir)
# files = sorted(glob(os.path.join(self.root, subdir, '*.JPEG')))
# for f in files:
# imgs.append((f, i))
# targets.append(i)
# self.imgs = imgs
# self.classes = class_names
# self.targets = targets
# self.resize = transforms.Resize(256)
# self.return_dict = return_dict
# def get_image(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# img = self.resize(img)
# return img
# def __len__(self):
# return len(self.imgs)
# def __getitem__(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# im_size = img.size
# if self.return_dict:
# img = self.resize(img)
# class_name = self.classes[target]
# if self.transform is not None:
# img = self.transform(img)
# if self.return_dict:
# out = {'image': img, 'target': target, 'meta': {
# 'im_size': im_size, 'index': index, 'class_name': class_name}}
# return out
# return img, target
def train_dataset_imagenet(transform_name, add_memory_bank_dataset=False):
# Uses MoCov2 aug: https://github.com/facebookresearch/moco/blob/main/main_moco.py
if transform_name == "imagenet" or transform_name == "imagenet100":
| # Datasets
def get_clip_model(name):
model, preprocess = clip.load(name)
model.eval()
model.cuda()
return model, preprocess
def get_img_path_from_full_path(full_path):
img_path = full_path[0]
sep = "/"
img_path = sep.join(img_path.split(sep)[-2:])
return img_path
def gen_csv_data(save_filename, selected_inds, train_memory_dataset, gen_rem=False):
if isinstance(selected_inds, torch.Tensor):
print("***Please use numpy array as selected_inds***")
# gen_rem: generate remaining data by excluding the selected data
if gen_rem:
rem_set = set(range(len(train_memory_dataset.imgs)))
rem_set = rem_set - set(list(selected_inds))
selected_inds = np.array(list(rem_set))
selected_inds = np.sort(selected_inds)
print(len(selected_inds))
d = []
for ind in selected_inds:
d.append([ind, get_img_path_from_full_path(train_memory_dataset.imgs[ind])])
filename = "{}.csv".format(save_filename)
assert not os.path.exists(filename), "path {} exists".format(filename)
df = pd.DataFrame(data=d, columns=["Index", "ImageID"])
df.to_csv(filename, index=False)
def save_data(gen_mode, stratified_density_selected_data_output, ours_filename_part, feats_list, final_sample_num, chosen_percent, train_memory_dataset):
print("Generation mode:", gen_mode)
if gen_mode == "ours":
selected_inds = stratified_density_selected_data_output
filename_part = ours_filename_part
elif gen_mode == "random":
np.random.seed(0)
selected_inds = np.random.choice(feats_list.size(0), size=(final_sample_num,), replace=False)
filename_part = "random"
else:
raise ValueError("gen_mode: " + gen_mode)
for gen_rem in [False, True]:
if gen_rem:
filename = "train_{}p_gen_{}_index".format(100 - chosen_percent, filename_part)
else:
filename = "train_{}p_gen_{}_index".format(chosen_percent, filename_part)
filename = os.path.join(cfg.RUN_DIR, filename)
print("Filename:", filename)
gen_csv_data(filename, selected_inds, train_memory_dataset, gen_rem=gen_rem)
def get_sample_info_imagenet1(final_sample_num):
if final_sample_num == 4:
# 0.3 percent
num_centroids = 4
chosen_percent = 0.3
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_sample_info_imagenet100(final_sample_num):
if final_sample_num == 400:
# 0.3 percent
num_centroids = 400
chosen_percent = 0.3
elif final_sample_num == 4000:
# 0.3 percent
num_centroids = 4000
chosen_percent = 3
elif final_sample_num == 8000:
# 0.3 percent
num_centroids = 8000
chosen_percent = 6
elif final_sample_num == 16000:
# 0.3 percent
num_centroids = 16000
chosen_percent = 12
elif final_sample_num == 32000:
# 0.3 percent
num_centroids = 32000
chosen_percent = 24
elif final_sample_num == 64000:
# 0.3 percent
num_centroids = 64000
chosen_percent = 48
elif final_sample_num == 100:
# 0.3 percent
num_centroids = 100
chosen_percent = 0.075
elif final_sample_num == 3200:
# 0.3 percent
num_centroids = 3200
chosen_percent = 2.4
elif final_sample_num == 200:
# 0.3 percent
num_centroids = 200
chosen_percent = 0.15
elif final_sample_num == 800:
# 0.3 percent
num_centroids = 800
chosen_percent = 0.6
elif final_sample_num == 1200:
# 0.3 percent
num_centroids = 1200
chosen_percent = 0.9
elif final_sample_num == 1600:
# 0.3 percent
num_centroids = 1600
chosen_percent = 1.2
elif final_sample_num == 300:
# 0.3 percent
num_centroids = 300
chosen_percent = 0.225
elif final_sample_num == 200:
# 0.3 percent
num_centroids = 200
chosen_percent = 0.15
elif final_sample_num == 100:
# 0.3 percent
num_centroids = 400
chosen_percent = 0.075
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_sample_info_coco(final_sample_num):
if final_sample_num == 80:
# 0.3 percent
num_centroids = 80
chosen_percent = 0.016
elif final_sample_num == 300:
# 0.3 percent
num_centroids = 300
chosen_percent = 0.06
elif final_sample_num == 800:
# 0.3 percent
num_centroids = 800
chosen_percent = 0.16
elif final_sample_num == 2911:
# 0.3 percent
num_centroids = 2911
chosen_percent = 0.582
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_sample_info_imagenet(final_sample_num):
if final_sample_num == 12820:
# 1 percent
num_centroids = 12900
chosen_percent = 1
if final_sample_num == 1282:
# 0.1 percent
num_centroids = 1290
chosen_percent = 0.1
elif final_sample_num == 2911:
# 0.2 percent
num_centroids = 2911
chosen_percent = 0.2
elif final_sample_num == 290:
# 0.2 percent
num_centroids = 290
chosen_percent = 0.02
elif final_sample_num == 300:
# 0.2 percent
num_centroids = 300
chosen_percent = 0.021
elif final_sample_num == 800:
# 0.2 percent
num_centroids = 800
chosen_percent = 0.06
elif final_sample_num == 80:
# 0.2 percent
num_centroids = 80
chosen_percent = 0.006
elif final_sample_num == 1600:
# 0.2 percent
num_centroids = 1600
chosen_percent = 0.12
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_selection_with_reg_imagenet_outliers(data, neighbors_dist, cluster_labels, num_centroids,
iters=1, final_sample_num=None, w=1, momentum=0.5, horizon_num=256, alpha=1, exclude_same_cluster=False, verbose=False):
# Intuition: horizon_num = dimension * 2
cluster_labels_cuda = cluster_labels.cuda()
neighbors_dist_cuda = neighbors_dist.cuda()
selection_regularizer = torch.zeros_like(neighbors_dist_cuda)
data = data.cuda()
N, D = data.shape # Number of samples, dimension of the ambient space
data_expanded_lazy = LazyTensor(data.view(N, 1, D))
for iter_ind in tqdm(range(iters)):
selected_inds = []
if verbose:
print("Computing selected ids")
print("selection_regularizer", selection_regularizer)
for iter_ind in tqdm(range(iters)):
selected_inds = []
if verbose:
print("Computing selected ids")
print("selection_regularizer", selection_regularizer)
for cls_ind in range(num_centroids):
if len(selected_inds) == final_sample_num:
break
match_arr = cluster_labels_cuda == cls_ind
match = torch.where(match_arr)[0]
if len(match) == 0:
continue
scores = 1 / neighbors_dist_cuda[match_arr] - w * selection_regularizer[match_arr]
if iter_ind != 0 and cls_ind == 0 and verbose:
print("original score:", (1 / neighbors_dist_cuda[match_arr]).mean(),
"regularizer adjustment:", (w * selection_regularizer[match_arr]).mean())
max_dist_ind = scores.argmin() # 选出距离最远的样本
selected_inds.append(match[max_dist_ind].item())
selected_inds = torch.tensor(selected_inds)
if iter_ind < iters - 1: # Not the last iteration
if verbose:
print("Updating selection regularizer")
selected_data = data[selected_inds]
if not exclude_same_cluster:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
new_selection_regularizer = new_selection_regularizer.Kmin(
horizon_num, dim=1)
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to oneself should be ignored
new_selection_regularizer[new_selection_regularizer == 0] = 1e10
else:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
# We take the horizon_num samples with the min distance to the other centroids
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
# indices within selected data
new_selection_regularizer, selected_data_ind = new_selection_regularizer.Kmin_argKmin(horizon_num,
dim=1, backend="GPU")
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to the instance in the same cluster should be ignored (including oneself if the sample is currently selected)
# **NOTE**: if some clusters are skipped, select_data_ind may not match cluster_labels
# This does not happen in 0.2% case, but could happen in 1% case.
same_cluster_selected_data_ind_mask = (
selected_data_ind == cluster_labels_cuda.view((-1, 1))).float()
# It's true that if cluster is not in the list, some instances will have one more regularizer item, but this is a small contribution.
new_selection_regularizer = (1 - same_cluster_selected_data_ind_mask) * \
new_selection_regularizer + same_cluster_selected_data_ind_mask * 1e10
assert not torch.any(new_selection_regularizer == 0), "{}".format(
torch.where(new_selection_regularizer == 0))
if verbose:
print("Min:", new_selection_regularizer.min())
# selection_regularizer: N_full
if alpha != 1:
new_selection_regularizer = (
1 / new_selection_regularizer ** alpha).sum(dim=1)
else:
new_selection_regularizer = (
1 / new_selection_regularizer).sum(dim=1)
selection_regularizer = selection_regularizer * \
momentum + new_selection_regularizer * (1 - momentum)
del cluster_labels_cuda
del neighbors_dist_cuda
del data
# import pdb
# pdb.set_trace()
print("selected_inds:"+str(len(selected_inds)))
print("final_sample_num:"+str(final_sample_num))
# assert len(selected_inds) == final_sample_num
return selected_inds.numpy()
def get_selection_with_reg_imagenet(data, neighbors_dist, cluster_labels, num_centroids,
iters=1, final_sample_num=None, w=1, momentum=0.5, horizon_num=256, alpha=1, exclude_same_cluster=False, verbose=False):
# Intuition: horizon_num = dimension * 2
cluster_labels_cuda = cluster_labels.cuda()
neighbors_dist_cuda = neighbors_dist.cuda()
selection_regularizer = torch.zeros_like(neighbors_dist_cuda)
data = data.cuda()
N, D = data.shape # Number of samples, dimension of the ambient space
data_expanded_lazy = LazyTensor(data.view(N, 1, D))
for iter_ind in tqdm(range(iters)):
selected_inds = []
if verbose:
print("Computing selected ids")
print("selection_regularizer", selection_regularizer)
for cls_ind in range(num_centroids):
if len(selected_inds) == final_sample_num:
break
match_arr = cluster_labels_cuda == cls_ind
match = torch.where(match_arr)[0]
if len(match) == 0:
continue
# scores in the selection process
# No prior:
# scores = 1 / neighbors_dist[match_arr]
scores = 1 / \
neighbors_dist_cuda[match_arr] - w * \
selection_regularizer[match_arr]
if iter_ind != 0 and cls_ind == 0 and verbose:
print("original score:", (1 / neighbors_dist_cuda[match_arr]).mean(),
"regularizer adjustment:", (w * selection_regularizer[match_arr]).mean())
min_dist_ind = scores.argmax()
# min_dist_ind = scores.argmax()
selected_inds.append(match[min_dist_ind].item())
selected_inds = torch.tensor(selected_inds)
if iter_ind < iters - 1: # Not the last iteration
if verbose:
print("Updating selection regularizer")
selected_data = data[selected_inds]
if not exclude_same_cluster:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
new_selection_regularizer = new_selection_regularizer.Kmin(
horizon_num, dim=1)
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to oneself should be ignored
new_selection_regularizer[new_selection_regularizer == 0] = 1e10
else:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
# We take the horizon_num samples with the min distance to the other centroids
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
# indices within selected data
new_selection_regularizer, selected_data_ind = new_selection_regularizer.Kmin_argKmin(horizon_num,
dim=1, backend="GPU")
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to the instance in the same cluster should be ignored (including oneself if the sample is currently selected)
# **NOTE**: if some clusters are skipped, select_data_ind may not match cluster_labels
# This does not happen in 0.2% case, but could happen in 1% case.
same_cluster_selected_data_ind_mask = (
selected_data_ind == cluster_labels_cuda.view((-1, 1))).float()
# It's true that if cluster is not in the list, some instances will have one more regularizer item, but this is a small contribution.
new_selection_regularizer = (1 - same_cluster_selected_data_ind_mask) * \
new_selection_regularizer + same_cluster_selected_data_ind_mask * 1e10
assert not torch.any(new_selection_regularizer == 0), "{}".format(
torch.where(new_selection_regularizer == 0))
if verbose:
print("Min:", new_selection_regularizer.min())
# selection_regularizer: N_full
if alpha != 1:
new_selection_regularizer = (
1 / new_selection_regularizer ** alpha).sum(dim=1)
else:
new_selection_regularizer = (
1 / new_selection_regularizer).sum(dim=1)
selection_regularizer = selection_regularizer * \
momentum + new_selection_regularizer * (1 - momentum)
del cluster_labels_cuda
del neighbors_dist_cuda
del data
# import pdb
# pdb.set_trace()
assert len(selected_inds) == final_sample_num
return selected_inds.numpy()
# Credit: MoCov2 https://github.com/facebookresearch/moco/blob/main/moco/loader.py
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class L2NormLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.normalize(x, dim=1)
# Credit: https://github.com/amazon-research/exponential-moving-average-normalization
def build_hidden_head(num_mlp, dim_mlp, dim, normed=False):
modules = []
for _ in range(1, num_mlp):
modules.append(nn.Linear(dim_mlp, dim_mlp))
modules.append(nn.ReLU())
modules.append(nn.Linear(dim_mlp, dim))
if normed:
modules.append(L2NormLayer())
return nn.Sequential(*modules)
# Credit: https://github.com/wvangansbeke/Unsupervised-Classification/blob/master/data/imagenet.py
class ImageNet(datasets.ImageFolder):
def __init__(self, root, split='train', transform=None):
super(ImageNet, self).__init__(root=os.path.join(root, split),
transform=None)
self.transform = transform
self.split = split
self.resize = transforms.Resize(256)
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
im_size = img.size
img = self.resize(img)
if self.transform is not None:
img = self.transform(img)
out = {'image': img, 'target': target, 'meta': {
'im_size': im_size, 'index': index}}
return out
def get_image(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
img = self.resize(img)
return img
# class ImageNetSubset(torch.utils.data.Dataset):
# def __init__(self, subset_file, root, split='train',
# transform=None, return_dict=True):
# super(ImageNetSubset, self).__init__()
# self.root = os.path.join(root, split)
# self.transform = transform
# self.split = split
# # Read the subset of classes to include (sorted)
# with open(subset_file, 'r') as f:
# result = f.read().splitlines()
# subdirs, class_names = [], []
# for line in result:
# subdir, class_name = line.split(' ', 1)
# subdirs.append(subdir)
# class_names.append(class_name)
# # Gather the files (sorted)
# imgs = []
# targets = []
# for i, subdir in enumerate(subdirs):
# # subdir_path = os.path.join(self.root, subdir)
# files = sorted(glob(os.path.join(self.root, subdir, '*.JPEG')))
# for f in files:
# imgs.append((f, i))
# targets.append(i)
# self.imgs = imgs
# self.classes = class_names
# self.targets = targets
# self.resize = transforms.Resize(256)
# self.return_dict = return_dict
# def get_image(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# img = self.resize(img)
# return img
# def __len__(self):
# return len(self.imgs)
# def __getitem__(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# im_size = img.size
# if self.return_dict:
# img = self.resize(img)
# class_name = self.classes[target]
# if self.transform is not None:
# img = self.transform(img)
# if self.return_dict:
# out = {'image': img, 'target': target, 'meta': {
# 'im_size': im_size, 'index': index, 'class_name': class_name}}
# return out
# return img, target
def train_dataset_imagenet(transform_name, add_memory_bank_dataset=False):
# Uses MoCov2 aug: https://github.com/facebookresearch/moco/blob/main/main_moco.py
if transform_name == "imagenet" or transform_name == "imagenet100": | normalization_kwargs = normalization_kwargs_dict[transform_name] | 1 | 2023-12-05 01:13:31+00:00 | 4k |
upfusion3d/upfusion | control_net/ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "SpatialTransformer",
"path": "control_net/ldm/modules/attention.py",
"snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "SLTQueryTransformer",
"path": "control_net/ldm/modules/attention.py",
"snippet": "class SLTQueryTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n\n # self.in_channels = in_channels\n in_channels_with_rays = in_channels + 6 # TODO: Verify\n\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels_with_rays,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels_with_rays, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n @staticmethod\n def _get_ray_condition(query_cameras, x_shape, device):\n\n b, _, h, w = x_shape\n\n if query_cameras[0] is not None:\n grid_rays, _ = get_grid_rays_gpu(\n query_cameras, image_size=(h, w), min_x=1, max_x=-1,\n min_y=1, max_y=-1\n ) # (B, H*W, 6)\n\n plucker_rays = get_plucker_parameterization(grid_rays) # (B, H*W, 6)\n plucker_rays = torch.reshape(plucker_rays, (b, h, w, 6))\n plucker_rays = torch.permute(plucker_rays, (0, 3, 1, 2)).contiguous()\n\n else:\n plucker_rays = torch.zeros((b, 6, h, w), dtype=torch.float32).to(device)\n\n return plucker_rays # (B, 6, H, W)\n\n def forward(self, x, slt, query_cameras):\n # slt is the Set Latent Representation\n if not isinstance(slt, list):\n slt = [slt]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n\n # NOTE: Current design decision is to not apply group norm on rays.\n # TODO: Ask if the above decision is fine or not.\n rays = self._get_ray_condition(query_cameras, x.shape, x.device)\n x = torch.cat((x, rays), dim = 1) # (B, C+6, H, W)\n\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=slt[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "exists",
"path": "control_net/ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | from abc import abstractmethod
from control_net.ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from control_net.ldm.modules.attention import SpatialTransformer, SLTQueryTransformer
from control_net.ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 3,154 |
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, slt=None, query_cameras=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, slt=None, query_cameras=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context) | elif isinstance(layer, SLTQueryTransformer): | 8 | 2023-12-12 00:49:11+00:00 | 4k |
modelscope/normal-depth-diffusion | tools/draw_imgs/draw_vae_epoch_curve.py | [
{
"identifier": "Txt2ImgIterableBaseDataset",
"path": "ldm/data/base.py",
"snippet": "class Txt2ImgIterableBaseDataset(IterableDataset):\n '''\n Define an interface to make the IterableDatasets for text2img data chainable\n '''\n\n def __init__(self, num_records=0, valid_ids=None, size=256):\n super().__init__()\n self.num_records = num_records\n self.valid_ids = valid_ids\n self.sample_ids = valid_ids\n self.size = size\n\n print(\n f'{self.__class__.__name__} dataset contains {self.__len__()} examples.'\n )\n\n def __len__(self):\n return self.num_records\n\n @abstractmethod\n def __iter__(self):\n pass"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not 'target' in config:\n\n print(config)\n if config == '__is_first_stage__':\n return None\n elif config == '__is_unconditional__':\n return None\n raise KeyError('Expected key `target` to instantiate.')\n return get_obj_from_str(config['target'])(**config.get('params', dict()))"
}
] | import argparse
import csv
import datetime
import glob
import importlib
import multiprocessing
import os
import pdb
import sys
import time
import warnings
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
import pudb
import signal
from functools import partial
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
from omegaconf import OmegaConf
from packaging import version
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import (Callback, LearningRateMonitor,
ModelCheckpoint)
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.utilities import rank_zero_info
from torch import autocast
from torch.utils.data import DataLoader, Dataset, Subset, random_split
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.utilities.distributed import rank_zero_only | 1,664 | type=str2bool,
nargs='?',
const=True,
default=False,
help='enable post-mortem debugging',
)
parser.add_argument(
'-s',
'--seed',
type=int,
default=23,
help='seed for seed_everything',
)
parser.add_argument(
'-f',
'--postfix',
type=str,
default='',
help='post-postfix for default name',
)
parser.add_argument(
'-l',
'--logdir',
type=str,
default='logs',
help='directory for logging dat shit',
)
parser.add_argument(
'--scale_lr',
type=str2bool,
nargs='?',
const=True,
default=True,
help='scale base-lr by ngpu * batch_size * n_accumulate',
)
parser.add_argument(
'--resume_epoch',
type=str,
help='resume_epoch',
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id
* split_size:(worker_id + 1)
* split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id)
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self,
batch_size,
train=None,
validation=None,
test=None,
predict=None,
wrap=False,
num_workers=None,
shuffle_test_loader=False,
use_worker_init_fn=False,
shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else min(
batch_size * 2, multiprocessing.cpu_count())
self.use_worker_init_fn = use_worker_init_fn
if train is not None:
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs['validation'] = validation
self.val_dataloader = partial(
self._val_dataloader, shuffle=shuffle_val_dataloader)
if test is not None:
self.dataset_configs['test'] = test
self.test_dataloader = partial(
self._test_dataloader, shuffle=shuffle_test_loader)
if predict is not None:
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
self.datasets = dict(
| '''
using to test the difference between
'''
if version.parse(pl.__version__) > version.parse('1.4.2'):
else:
warnings.filterwarnings('ignore')
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
'-n',
'--name',
type=str,
const=True,
default='',
nargs='?',
help='postfix for logdir',
)
parser.add_argument(
'-r',
'--resume',
type=str,
const=True,
default='',
nargs='?',
help='resume from logdir or checkpoint in logdir',
)
parser.add_argument(
'-b',
'--base',
nargs='*',
metavar='base_config.yaml',
help='paths to base configs. Loaded from left-to-right. '
'Parameters can be overwritten or added with command-line options of the form `--key value`.',
default=list(),
)
parser.add_argument(
'-t',
'--train',
type=str2bool,
const=True,
default=False,
nargs='?',
help='train',
)
parser.add_argument(
'--no-test',
type=str2bool,
const=True,
default=False,
nargs='?',
help='disable test',
)
parser.add_argument(
'-p', '--project', help='name of new or path to existing project')
parser.add_argument(
'-d',
'--debug',
type=str2bool,
nargs='?',
const=True,
default=False,
help='enable post-mortem debugging',
)
parser.add_argument(
'-s',
'--seed',
type=int,
default=23,
help='seed for seed_everything',
)
parser.add_argument(
'-f',
'--postfix',
type=str,
default='',
help='post-postfix for default name',
)
parser.add_argument(
'-l',
'--logdir',
type=str,
default='logs',
help='directory for logging dat shit',
)
parser.add_argument(
'--scale_lr',
type=str2bool,
nargs='?',
const=True,
default=True,
help='scale base-lr by ngpu * batch_size * n_accumulate',
)
parser.add_argument(
'--resume_epoch',
type=str,
help='resume_epoch',
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id
* split_size:(worker_id + 1)
* split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id)
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self,
batch_size,
train=None,
validation=None,
test=None,
predict=None,
wrap=False,
num_workers=None,
shuffle_test_loader=False,
use_worker_init_fn=False,
shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else min(
batch_size * 2, multiprocessing.cpu_count())
self.use_worker_init_fn = use_worker_init_fn
if train is not None:
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs['validation'] = validation
self.val_dataloader = partial(
self._val_dataloader, shuffle=shuffle_val_dataloader)
if test is not None:
self.dataset_configs['test'] = test
self.test_dataloader = partial(
self._test_dataloader, shuffle=shuffle_test_loader)
if predict is not None:
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
self.datasets = dict( | (k, instantiate_from_config(self.dataset_configs[k])) | 1 | 2023-12-06 07:29:34+00:00 | 4k |
facebookresearch/DCI | reproduction/crowdsourcing/annotate/preprocessing/preprocess_assets_segev.py | [
{
"identifier": "get_groups_simple",
"path": "reproduction/crowdsourcing/annotate/preprocessing/mask_creation_utils.py",
"snippet": "TARGET_STEP = 100\nSKIP_LOGGING = True\nclass GroupItem(TypedDict):\nclass FinalGroup(TypedDict):\ndef jitter(size: float) -> float:\ndef bound(v, lo, hi):\ndef _load_final_group_from_json(json_dict) -> FinalGroup:\ndef load_final_group_from_json(json_dict) -> FinalGrouping:\ndef get_grid(\n step: int, \n top_left: Point, \n bottom_right: Point, \n noise: Optional[float] = None\n) -> List[Point]:\ndef get_missing_points_greedy(mask: np.ndarray, min_size: int) -> List[Point]:\ndef get_points_from_canny_greedy(\n image: np.ndarray, \n distance_threshold: int = 40, \n jitter_amount: int = 40,\n num_extra: int = 3,\n) -> List[Point]:\ndef predict_all(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n step: int = TARGET_STEP, \n top_left: Optional[Point] = None, \n bottom_right: Optional[Point] = None, \n containing_mask: Optional[np.ndarray] = None\n) -> Dict[Point, List[EfficientMask]]:\ndef predict_for_points(\n predictor: \"SamPredictor\", \n points: List[Point],\n) -> Dict[Point, List[EfficientMask]]:\ndef predict_for_bounded_points(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n points: List[Point], \n mask: EfficientMask,\n) -> Dict[Point, List[EfficientMask]]:\ndef get_canny_masks(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n distance_threshold: int = 40, \n jitter_amount: int = 40\n):\ndef process_best_largest(\n results: Dict[Point, List[EfficientMask]], \n penalty_gap: float = 0.2,\n) -> Dict[Point, Dict[MaskMergeKey, EfficientMask]]:\ndef get_groups(\n processed_results: Dict[Point, Dict[MaskMergeKey, EfficientMask]], \n merge_key: MaskMergeKey = 'best', \n groups: Optional[GroupDict] = None,\n) -> GroupDict:\ndef get_groups_simple(\n sam_results: List[EfficientMask],\n) -> FinalGrouping:\ndef print_groups(groups: FinalGrouping) -> None:\n def _get_group_map(curr_g: FinalGrouping) -> Dict[Union[int, str], Any]:\ndef refine_groups_simple(groups: FinalGrouping, merge_thresh = 0.03) -> FinalGrouping:\ndef first_iteration_groups(\n predictor: \"SamPredictor\",\n processed_results: Dict[Point, Dict[MaskMergeKey, EfficientMask]], \n step: int, \n merge_key: MaskMergeKey = \"largest\",\n) -> GroupDict:\ndef get_subgroup_mask_lists(\n groups: GroupDict, \n base_masks: Dict[Point, List[EfficientMask]], \n canny_masks: Dict[Point, List[EfficientMask]], \n score_cutoff: float = 0.7, \n retain_best: bool = False,\n) -> GroupDict:\ndef compute_subgroups(\n group_mask_item: GroupItem, \n contained_in_thresh: float = 0.90, \n outer_sim_thresh: float = 0.77, \n mutual_sim_thresh: float = 0.85, \n retain_best: bool = False,\n) -> GroupDict:\ndef add_points_in_mask(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n item: GroupItem, \n score_cutoff: float = 0.7,\n num_points = 5,\n) -> GroupItem:\ndef compute_subgroup_recursively(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n group_mask_item: GroupItem, \n score_cutoff: float = 0.7, \n contained_in_thresh: float = 0.90, \n outer_sim_thresh: float = 0.77, \n mutual_sim_thresh: float = 0.85, \n retain_best: bool = False, \n depth: int = 0,\n) -> FinalGroup:\ndef compute_group_tree(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n score_cutoff: float = 0.7, \n contained_in_thresh: float = 0.9, \n outer_sim_thresh: float = 0.8, \n mutual_sim_thresh: float = 0.9, \n retain_best: bool = False,\n) -> FinalGrouping:"
},
{
"identifier": "EfficientMask",
"path": "reproduction/crowdsourcing/annotate/preprocessing/efficient_mask.py",
"snippet": "class EfficientMask():\n \"\"\"Class for more efficient mask mask over full numpy ndarrays\"\"\"\n def __init__(self, mask: np.ndarray, score: float, size: Optional[int] = None):\n self.mask = mask\n self.score = score\n self._size: Optional[int] = size\n self._tlbr: Optional[Tuple[Point, Point]] = None\n \n def __repr__(self) -> str:\n return f\"<EM : {self.get_size()}, {self.get_tlbr()}>\"\n \n def _reset_cache(self):\n self._tlbr = None\n self._size = None\n \n def set_to(self, other: \"EfficientMask\"):\n \"\"\"Set this mask's values to that of other\"\"\"\n self.mask = other.mask\n self.score = other.score\n self._size = other._size\n self._tlbr = other._tlbr\n \n def get_tlbr(self) -> Tuple[Point, Point]:\n \"\"\"Return the top left and bottom right bounds of this mask\"\"\"\n if self._tlbr is None:\n try:\n np_where = np.where(self.mask == True)\n left = np.min(np_where[1])\n right = np.max(np_where[1]) + 1\n top = np.min(np_where[0])\n bottom = np.max(np_where[0]) + 1\n except ValueError:\n top, left, bottom, right = (0, 0, 0, 0)\n self._tlbr = ((cast(Ydm, top), cast(Xdm, left)), (cast(Ydm, bottom), cast(Xdm, right)))\n return self._tlbr\n \n def get_size(self) -> int:\n \"\"\"Return the total number of true pixels in this mask\"\"\"\n if self._size is None:\n (top, left), (bottom, right) = self.get_tlbr()\n self._size = np.sum(self.mask[top:bottom,left:right]*1)\n return self._size\n \n def get_density(self) -> float:\n \"\"\"Provide rough density with number of pixels and bbox size\"\"\"\n size = self.get_size()\n (t, l), (b, r) = self.get_tlbr()\n area = (b-t) * (r-l) + 1\n return size / area\n \n def dense_score(self) -> float:\n \"\"\"Return the score times the density, a heuristic for quality\"\"\"\n return self.score * math.sqrt(self.get_density())\n \n def _bbox_overlaps(self, other: \"EfficientMask\") -> bool:\n \"\"\"Check points of opposite diagonals in each other bbox\"\"\"\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n return (\n point_in_box(t1, l1, other.get_tlbr()) or \n point_in_box(b1, r1, other.get_tlbr()) or \n point_in_box(t2, r2, self.get_tlbr()) or \n point_in_box(b2, l2, self.get_tlbr()) \n )\n \n def _get_overlap_submask(self, other: \"EfficientMask\") -> np.ndarray:\n \"\"\"Get a classic ndarray of pixels in the overlap between this and other\"\"\"\n if not self._bbox_overlaps(other):\n return np.array([])\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n maxt, maxl = max(t1, t2), max(l1, l2)\n minb, minr = min(b1, b2), min(r1, r2)\n return (self.mask[maxt:minb,maxl:minr]*1 + other.mask[maxt:minb,maxl:minr]*1 == 2)\n \n def _get_xor_submask(self, other: \"EfficientMask\") -> np.ndarray:\n \"\"\"Get a classic ndarray of pixels in the xor between this and other\"\"\"\n if not self._bbox_overlaps(other):\n return np.array([])\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n mint, minl = min(t1, t2), min(l1, l2)\n maxb, maxr = max(b1, b2), max(r1, r2)\n return (self.mask[mint:maxb,minl:maxr]*1 + other.mask[mint:maxb,minl:maxr]*1 == 1)\n \n def intersect(self, other: \"EfficientMask\") -> \"EfficientMask\":\n \"\"\"Return an efficient mask of the overlap between this and other\"\"\"\n res = np.full(self.mask.shape, False)\n submask = self._get_overlap_submask(other)\n if len(submask) != 0:\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n maxt, maxl = max(t1, t2), max(l1, l2)\n minb, minr = min(b1, b2), min(r1, r2)\n res[maxt:minb,maxl:minr] = submask\n return EfficientMask(res, (self.score + other.score)/2)\n\n def mostly_contained_in(self, out_mask: \"EfficientMask\", thresh: float = 0.95) -> bool:\n \"\"\"Returns True if thresh of self's pixels are in out_mask\"\"\"\n size_in = self.get_size() + 1\n overlap = mask_size(self._get_overlap_submask(out_mask))\n return overlap / size_in > thresh\n \n def overlaps_threshold(self, other: \"EfficientMask\", thresh: float = 0.50) -> bool:\n \"\"\"Returns true if over thresh of either mask is contained in the other\"\"\"\n size_1 = self.get_size() + 1\n size_2 = other.get_size() + 1\n overlap = mask_size(self._get_overlap_submask(other))\n return overlap / size_1 > thresh or overlap / size_2 > thresh\n \n def near_equivalent_to(self, other: \"EfficientMask\", thresh: float = 0.96) -> bool:\n \"\"\"Return true if these two masks have prop overlapping pixels > thresh\"\"\"\n size_1 = self.get_size() + 1\n size_2 = other.get_size() + 1\n if size_1 / size_2 < thresh or size_2 / size_1 < thresh:\n return False\n difference = mask_size(self._get_xor_submask(other))\n if (difference / size_1) > (1-thresh) or (difference / size_2) > (1-thresh):\n return False\n return True\n \n def union(self, other: \"EfficientMask\") -> \"EfficientMask\":\n \"\"\"Return a new efficient mask unioning these\"\"\"\n new_mask = self.mask * 1\n (t2, l2), (b2, r2) = other.get_tlbr()\n new_mask[t2:b2,l2:r2] += other.mask[t2:b2,l2:r2]*1\n return EfficientMask(\n mask=cast(np.ndarray, new_mask > 0),\n score=(self.score + other.score) / 2, # may be more appropriate as weighted mask sizes\n )\n\n def subtract(self, other: \"EfficientMask\") -> \"EfficientMask\":\n \"\"\"Subtract the other mask from this one\"\"\"\n new_mask = self.mask * 1\n (t2, l2), (b2, r2) = other.get_tlbr()\n new_mask[t2:b2,l2:r2] -= other.mask[t2:b2,l2:r2]*1\n return EfficientMask(\n mask=cast(np.ndarray, new_mask == 1),\n score=self.score,\n )"
}
] | import time
import sys
import numpy as np
import os
import base64
import cv2
import json
from segment_anything import sam_model_registry
from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator
from .mask_creation_utils import get_groups_simple, refine_groups_simple, FinalGrouping, FinalGroup, get_points_from_canny_greedy
from .efficient_mask import EfficientMask
from PIL import Image
from io import BytesIO
from typing import TypedDict, List | 3,393 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
| def fold_group_tree(g: FinalGrouping): | 0 | 2023-12-13 16:16:48+00:00 | 4k |
daswer123/xtts-webui | scripts/funcs.py | [
{
"identifier": "denoise",
"path": "scripts/resemble_enhance/enhancer/inference.py",
"snippet": "@torch.inference_mode()\ndef denoise(dwav, sr, device, run_dir=None):\n enhancer = load_enhancer(run_dir, device)\n return inference(model=enhancer.denoiser, dwav=dwav, sr=sr, device=device)"
},
{
"identifier": "enhance",
"path": "scripts/resemble_enhance/enhancer/inference.py",
"snippet": "@torch.inference_mode()\ndef enhance(chunk_seconds, chunks_overlap,dwav, sr, device, nfe=32, solver=\"midpoint\", lambd=0.5, tau=0.5, run_dir=None):\n assert 0 < nfe <= 128, f\"nfe must be in (0, 128], got {nfe}\"\n assert solver in (\"midpoint\", \"rk4\", \"euler\"), f\"solver must be in ('midpoint', 'rk4', 'euler'), got {solver}\"\n assert 0 <= lambd <= 1, f\"lambd must be in [0, 1], got {lambd}\"\n assert 0 <= tau <= 1, f\"tau must be in [0, 1], got {tau}\"\n enhancer = load_enhancer(run_dir, device)\n enhancer.configurate_(nfe=nfe, solver=solver, lambd=lambd, tau=tau)\n return inference(model=enhancer, chunk_seconds=chunk_seconds, overlap_seconds=chunks_overlap, dwav=dwav, sr=sr, device=device)"
}
] | import gc
import torchaudio
import torch
import numpy as np
import os
import ffmpeg
import shutil
import uuid
import subprocess
import soundfile as sf
import noisereduce
import tempfile
from scripts.resemble_enhance.enhancer.inference import denoise, enhance
from scipy.io import wavfile
from pathlib import Path
from pedalboard import (
Pedalboard,
NoiseGate,
Compressor,
LowShelfFilter,
Gain,
) | 2,345 | Gain(gain_db=0),
])
reduced_noise = noisereduce.reduce_noise(y=audio_data,
sr=sample_rate,
stationary=True,
prop_decrease=0.75)
processed_audio = board(reduced_noise.astype('float32'), sample_rate)
# processed_audio = board(audio_data.astype('float32'), sample_rate)
# Create a temporary file for the processed audio
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
sf.write(temp_file.name, processed_audio.T if processed_audio.ndim >
1 else processed_audio, sample_rate)
temp_file_path = temp_file.name
# Defining an output file name with a new extension in the same folder
output_path = f"{os.path.splitext(audio_path)[0]}_improved.{type_audio}"
# Convert the processed wav file to the target format using FFmpeg
stream = (
ffmpeg
.input(temp_file_path)
.output(output_path)
.overwrite_output()
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = stream.communicate()
if stream.returncode != 0:
raise Exception(f"FFmpeg error:\n{err.decode()}")
# Deleting a temporary wav file after it has been used
os.unlink(temp_file_path)
return output_path
def cut_audio(input_wav_path, duration):
output_wav_path = input_wav_path.with_name(
f"{input_wav_path.stem}_cut{input_wav_path.suffix}")
try:
(
ffmpeg
.input(str(input_wav_path))
.output(str(output_wav_path), t=duration)
.run(overwrite_output=True)
)
except ffmpeg.Error as e: # Catching specific ffmpeg Error here.
# Check if stderr or stdout have been captured before trying to decode.
stderr = e.stderr.decode('utf8') if e.stderr else "No stderr"
stdout = e.stdout.decode('utf8') if e.stdout else "No stdout"
print(f"stdout: {stdout}")
# More detailed error information will be printed/logged here.
print(f"stderr: {stderr}")
raise # Re-raise exception after logging details
return output_wav_path
# RESEMBLE ENHANCE
def save_audio(out_folder, file_name, rate, audio_data):
os.makedirs(out_folder, exist_ok=True)
file_path = os.path.join(out_folder, file_name)
with open(file_path, 'wb') as f:
wavfile.write(f, rate, audio_data)
return file_path
def clear_gpu_cache():
# del model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def resemble_enhance_audio(audio_path,
use_enhance,
use_denoise=False,
solver='Midpoint',
nfe=64,
tau=0.5,
chunk_seconds=8,
chunks_overlap=1,
denoising=False,
output_type="wav",
output_folder=""):
if audio_path is None:
return None, None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dwav, orig_sr = torchaudio.load(audio_path)
dwav = dwav.mean(dim=0).to(device)
denoise_path = None
enhance_path = None
if use_denoise:
wav1, new_sr_1 = denoise(dwav.cpu(), orig_sr, device)
denoise_file_name = f"{Path(audio_path).stem}_denoise.{output_type}"
out_folder = Path("./output") / output_folder
denoise_path = save_audio(
out_folder, denoise_file_name, new_sr_1, wav1.numpy())
if use_enhance:
lambd = 0.9 if denoising else 0.1
solver = solver.lower()
nfe = int(nfe)
|
def save_audio_to_wav(rate, y, this_dir, max_duration=None):
# Determine the bit rate of the source audio.
bit_depth = y.dtype.itemsize * 8
# Convert to 16-bit data if necessary.
if not (bit_depth == 16):
if bit_depth == 32:
audio_data = np.asarray(
y / np.max(np.abs(y)) * 32767, dtype=np.int16)
elif bit_depth == 24:
audio_data = np.asarray(
(y / (2**8)) // (2**(bit_depth - 16)), dtype=np.int16)
else: # For other types of bitness we apply the general normalization method.
max_val = float(np.iinfo(np.int16).max)
min_val = float(np.iinfo(np.int16).min)
audio_data = np.asarray(
((y - y.min()) / (y.max() - y.min())) * (max_val - min_val) + min_val, dtype=np.int16)
else:
# If the data is already in int16 format, use it directly.
audio_data = np.asarray(y, dtype=np.int16)
temp_folder = Path(this_dir) / 'temp'
# print(rate,y)
os.makedirs(temp_folder, exist_ok=True)
wav_name = f'speaker_ref_{uuid.uuid4()}.wav'
original_wav_path = str(temp_folder / wav_name)
# Save the audio data to a file without changing the sampling rate.
wavfile.write(original_wav_path, rate, audio_data)
if max_duration is not None and max_duration != 0:
output_wav_path = str(temp_folder / f'cut_{wav_name}')
(
ffmpeg.input(original_wav_path)
.output(output_wav_path, t=max_duration)
.run(overwrite_output=True, quiet=True)
)
os.remove(original_wav_path)
return output_wav_path
return original_wav_path
def resample_audio(input_wav_path, this_dir, target_rate=22050):
temp_folder = Path(this_dir) / 'temp'
temp_folder.mkdir(parents=True, exist_ok=True)
output_wav_name = f"resampled_audio_{uuid.uuid4()}.wav"
output_wav_path = temp_folder / output_wav_name
(
ffmpeg
.input(str(input_wav_path))
.output(str(output_wav_path), ar=target_rate, acodec='pcm_s16le', ac=1)
.run(overwrite_output=True, quiet=True)
)
return str(output_wav_path)
def improve_ref_audio(input_wav_path, this_dir):
input_wav_path = Path(input_wav_path)
this_dir = Path(this_dir)
temp_folder = Path(this_dir) / 'temp'
temp_folder.mkdir(parents=True, exist_ok=True)
# Generating output file name
out_filename = temp_folder / f"{input_wav_path.stem}_refined.wav"
print(input_wav_path)
# Applying filters to an audio stream using ffmpeg-python
(
ffmpeg
.input(str(input_wav_path))
.filter('lowpass', frequency=8000)
.filter('highpass', frequency=75)
.filter_('areverse')
.filter_('silenceremove', start_periods=1, start_silence=0, start_threshold=0.02)
.filter_('areverse')
.filter_('silenceremove', start_periods=1, start_silence=0, start_threshold=0.02)
.output(str(out_filename))
.overwrite_output()
.run(quiet=True)
)
return str(out_filename)
def move_and_rename_file(file_path, target_folder_path, new_file_name):
# Make sure that the new file name contains the correct .wav extension
if not new_file_name.lower().endswith('.wav'):
new_file_name += '.wav'
# Create Path objects for easy handling of paths
file_path = Path(file_path)
target_folder_path = Path(target_folder_path)
# Creating a target directory if it does not exist
target_folder_path.mkdir(parents=True, exist_ok=True)
# Full path to the new file in the destination folder
target_file_path = target_folder_path / new_file_name
# Move and rename a file
file_path.rename(target_file_path)
def improve_and_convert_audio(audio_path, type_audio):
# Read audio file and apply effects via Pedalboard
audio_data, sample_rate = sf.read(audio_path)
board = Pedalboard([
NoiseGate(threshold_db=-30, ratio=1.5, release_ms=250),
Compressor(threshold_db=12, ratio=2.5),
LowShelfFilter(cutoff_frequency_hz=400, gain_db=5),
Gain(gain_db=0),
])
reduced_noise = noisereduce.reduce_noise(y=audio_data,
sr=sample_rate,
stationary=True,
prop_decrease=0.75)
processed_audio = board(reduced_noise.astype('float32'), sample_rate)
# processed_audio = board(audio_data.astype('float32'), sample_rate)
# Create a temporary file for the processed audio
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
sf.write(temp_file.name, processed_audio.T if processed_audio.ndim >
1 else processed_audio, sample_rate)
temp_file_path = temp_file.name
# Defining an output file name with a new extension in the same folder
output_path = f"{os.path.splitext(audio_path)[0]}_improved.{type_audio}"
# Convert the processed wav file to the target format using FFmpeg
stream = (
ffmpeg
.input(temp_file_path)
.output(output_path)
.overwrite_output()
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = stream.communicate()
if stream.returncode != 0:
raise Exception(f"FFmpeg error:\n{err.decode()}")
# Deleting a temporary wav file after it has been used
os.unlink(temp_file_path)
return output_path
def cut_audio(input_wav_path, duration):
output_wav_path = input_wav_path.with_name(
f"{input_wav_path.stem}_cut{input_wav_path.suffix}")
try:
(
ffmpeg
.input(str(input_wav_path))
.output(str(output_wav_path), t=duration)
.run(overwrite_output=True)
)
except ffmpeg.Error as e: # Catching specific ffmpeg Error here.
# Check if stderr or stdout have been captured before trying to decode.
stderr = e.stderr.decode('utf8') if e.stderr else "No stderr"
stdout = e.stdout.decode('utf8') if e.stdout else "No stdout"
print(f"stdout: {stdout}")
# More detailed error information will be printed/logged here.
print(f"stderr: {stderr}")
raise # Re-raise exception after logging details
return output_wav_path
# RESEMBLE ENHANCE
def save_audio(out_folder, file_name, rate, audio_data):
os.makedirs(out_folder, exist_ok=True)
file_path = os.path.join(out_folder, file_name)
with open(file_path, 'wb') as f:
wavfile.write(f, rate, audio_data)
return file_path
def clear_gpu_cache():
# del model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def resemble_enhance_audio(audio_path,
use_enhance,
use_denoise=False,
solver='Midpoint',
nfe=64,
tau=0.5,
chunk_seconds=8,
chunks_overlap=1,
denoising=False,
output_type="wav",
output_folder=""):
if audio_path is None:
return None, None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dwav, orig_sr = torchaudio.load(audio_path)
dwav = dwav.mean(dim=0).to(device)
denoise_path = None
enhance_path = None
if use_denoise:
wav1, new_sr_1 = denoise(dwav.cpu(), orig_sr, device)
denoise_file_name = f"{Path(audio_path).stem}_denoise.{output_type}"
out_folder = Path("./output") / output_folder
denoise_path = save_audio(
out_folder, denoise_file_name, new_sr_1, wav1.numpy())
if use_enhance:
lambd = 0.9 if denoising else 0.1
solver = solver.lower()
nfe = int(nfe)
| wav2, new_sr_2 = enhance(dwav=dwav.cpu(), sr=orig_sr, device=device, | 1 | 2023-12-14 06:34:12+00:00 | 4k |
FrozenBurning/PrimDiffusion | dva/io.py | [
{
"identifier": "AttrDict",
"path": "dva/attr_dict.py",
"snippet": "class AttrDict:\n def __init__(self, entries):\n self.add_entries_(entries)\n\n def keys(self):\n return self.__dict__.keys()\n\n def values(self):\n return self.__dict__.values()\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __delitem__(self, key):\n return self.__dict__.__delitem__(key)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return self.__dict__.__repr__()\n\n def __getattr__(self, attr):\n if attr.startswith(\"__\"):\n return self.__getattribute__(attr)\n return self.__dict__[attr]\n\n def items(self):\n return self.__dict__.items()\n\n def __iter__(self):\n return iter(self.items())\n\n def add_entries_(self, entries, overwrite=True):\n for key, value in entries.items():\n if key not in self.__dict__:\n if isinstance(value, dict):\n self.__dict__[key] = AttrDict(value)\n else:\n self.__dict__[key] = value\n else:\n if isinstance(value, dict):\n self.__dict__[key].add_entries_(entries=value, overwrite=overwrite)\n elif overwrite or self.__dict__[key] is None:\n self.__dict__[key] = value\n\n def serialize(self):\n return json.dumps(self, default=self.obj_to_dict, indent=4)\n\n def obj_to_dict(self, obj):\n return obj.__dict__\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)"
},
{
"identifier": "compute_v2uv",
"path": "dva/geom.py",
"snippet": "def compute_v2uv(n_verts, vi, vti, n_max=4):\n \"\"\"Computes mapping from vertex indices to texture indices.\n\n Args:\n vi: [F, 3], triangles\n vti: [F, 3], texture triangles\n n_max: int, max number of texture locations\n\n Returns:\n [n_verts, n_max], texture indices\n \"\"\"\n v2uv_dict = {}\n for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)):\n v2uv_dict.setdefault(i_v, set()).add(i_uv)\n assert len(v2uv_dict) == n_verts\n v2uv = np.zeros((n_verts, n_max), dtype=np.int32)\n for i in range(n_verts):\n vals = sorted(list(v2uv_dict[i]))\n v2uv[i, :] = vals[0]\n v2uv[i, : len(vals)] = np.array(vals)\n return v2uv"
},
{
"identifier": "compute_neighbours",
"path": "dva/geom.py",
"snippet": "def compute_neighbours(n_verts, vi, n_max_values=10):\n \"\"\"Computes first-ring neighbours given vertices and faces.\"\"\"\n n_vi = vi.shape[0]\n\n adj = {i: set() for i in range(n_verts)}\n for i in range(n_vi):\n for idx in vi[i]:\n adj[idx] |= set(vi[i]) - set([idx])\n\n nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values))\n nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32)\n\n for idx in range(n_verts):\n n_values = min(len(adj[idx]), n_max_values)\n nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values]\n nbs_weights[idx, :n_values] = -1.0 / n_values\n\n return nbs_idxs, nbs_weights"
}
] | import json
import cv2
import numpy as np
import copy
import importlib
import pickle
import os
from typing import Any, Dict
from dva.attr_dict import AttrDict
from dva.geom import compute_v2uv, compute_neighbours | 1,758 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
}
topology["v2uv"] = compute_v2uv(
topology["n_verts"], topology["vi"], topology["vti"]
)
nbs_idxs, nbs_weights = compute_neighbours(
topology["v"].shape[0], topology["vi"], 8
)
topology.update({"nbs_idxs": nbs_idxs, "nbs_weights": nbs_weights})
return {
"topology": topology,
"lbs_template_verts": data_struct["v_template"].astype(np.float32),
}
def read_pickle(pkl_path):
with open(pkl_path, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def load_static_assets_crossid_smpl(config):
# with chumpy dependency!!!
data_struct = read_pickle(config.data.smpl_topology)
vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))
ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))
n_verts = data_struct["v_template"].shape[0]
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
}
topology["v2uv"] = compute_v2uv(
topology["n_verts"], topology["vi"], topology["vti"]
)
nbs_idxs, nbs_weights = compute_neighbours(
topology["v"].shape[0], topology["vi"], 8
)
topology.update({"nbs_idxs": nbs_idxs, "nbs_weights": nbs_weights})
return {
"topology": topology,
"lbs_template_verts": data_struct["v_template"].astype(np.float32),
}
def read_pickle(pkl_path):
with open(pkl_path, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def load_static_assets_crossid_smpl(config):
# with chumpy dependency!!!
data_struct = read_pickle(config.data.smpl_topology)
vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))
ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))
n_verts = data_struct["v_template"].shape[0]
| topology = AttrDict( | 0 | 2023-12-06 05:12:55+00:00 | 4k |
Nearcyan/papers.day | scrape_abs.py | [
{
"identifier": "ArxivPaper",
"path": "backend/models.py",
"snippet": "class ArxivPaper(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n arxiv_id = models.CharField(max_length=20, unique=True)\n\n # fields scraped from the paper page:\n title = models.CharField(max_length=255, db_index=True)\n abstract = models.TextField(db_index=True)\n authors = models.ManyToManyField(Author)\n primary_subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)\n subjects = models.ManyToManyField(Subject, related_name=\"papers\")\n comment = models.TextField(null=True, blank=True)\n doi = models.CharField(max_length=255, null=True, blank=True)\n journal_ref = models.CharField(max_length=255, null=True, blank=True)\n publication_date = models.DateField()\n\n # fields we create\n summary = models.TextField(db_index=True)\n total_author_citations = models.IntegerField(default=0, db_index=True)\n citations = models.IntegerField(default=0, db_index=True)\n\n # file fields\n pdf = models.FileField(upload_to=\"pdfs\", null=True, blank=True)\n screenshot = models.ImageField(upload_to=\"screenshots\", null=True, blank=True)\n source_tar = models.FileField(upload_to=\"tar_sources\", null=True, blank=True)\n images = models.ManyToManyField(PaperImage, related_name=\"paper_images\")\n sources = models.ManyToManyField(PaperSource, related_name=\"paper_sources\")\n\n def abstract_link(self) -> str:\n return f\"https://arxiv.org/abs/{self.arxiv_id}\"\n\n def pdf_link(self) -> str:\n return f\"https://arxiv.org/pdf/{self.arxiv_id}.pdf\"\n\n def source_link(self) -> str:\n return f\"https://arxiv.org/e-print/{self.arxiv_id}\"\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Author",
"path": "backend/models.py",
"snippet": "class Author(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n name = models.CharField(max_length=255, db_index=True)\n affiliation = models.CharField(max_length=255, null=True, blank=True, db_index=True)\n email = models.EmailField(null=True, blank=True)\n email_domain = models.CharField(max_length=255, null=True, blank=True, db_index=True)\n citations = models.IntegerField(default=0, db_index=True)\n scholar_id = models.CharField(max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name"
},
{
"identifier": "Subject",
"path": "backend/models.py",
"snippet": "class Subject(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n short_name = models.CharField(max_length=255)\n full_name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.full_name"
},
{
"identifier": "PaperImage",
"path": "backend/models.py",
"snippet": "class PaperImage(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n image = models.ImageField(upload_to=\"images\")\n paper = models.ForeignKey(\"ArxivPaper\", on_delete=models.CASCADE)"
},
{
"identifier": "PaperSource",
"path": "backend/models.py",
"snippet": "class PaperSource(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n content = models.TextField()\n paper = models.ForeignKey(\"ArxivPaper\", on_delete=models.CASCADE)"
}
] | import argparse
import shutil
import tempfile
import django
import fitz
import random
import requests
import re
import tarfile
import os
from openai import OpenAI
from scholarly import scholarly # if this breaks, run pip install --upgrade httpx
from scholarly import ProxyGenerator
from datetime import datetime
from bs4 import BeautifulSoup
from django.core.files.base import ContentFile
from django.conf import settings
from backend.models import ArxivPaper, Author, Subject, PaperImage, PaperSource | 3,589 | jref = None
comments = soup.find('td', class_='tablecell comments')
if comments:
comments = comments.get_text(strip=True)
comments = re.sub(r'Comments:', '', comments)
comments = re.sub(r'\n', '', comments)
comments = re.sub(r' ', '', comments)
print(f'[{arxiv_id}] Comments: {comments}')
else:
comments = None
doi = soup.find('td', class_='tablecell arxivdoi')
if doi:
doi = doi.find('a')
doi = doi.get_text(strip=True)
doi = re.sub(r'DOI:', '', doi)
doi = re.sub(r'\n', '', doi)
doi = re.sub(r' ', '', doi)
print(f'[{arxiv_id}] DOI: {doi}')
else:
doi = None
# Get the date
date_tag = soup.find('div', class_='dateline')
date_string = date_tag.get_text(strip=True)
date_string = re.sub(r' \(v.*\)', '', date_string)
date_match = re.search(r'\[Submitted on (.+)\]', date_string)
if date_match:
date_string = date_match.group(1)
date = datetime.strptime(date_string, '%d %b %Y').date()
else:
date = None
# Download the pdf
pdf_url = f'https://arxiv.org/pdf/{arxiv_id}.pdf'
try:
pdf_response = requests.get(pdf_url)
if pdf_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}: {e}')
return None
pdf_content = pdf_response.content
pdf_file = ContentFile(pdf_content, name=f'{arxiv_id}.pdf')
# Download the source
source_url = f'https://arxiv.org/e-print/{arxiv_id}'
try:
source_response = requests.get(source_url)
print(f'[{arxiv_id}] Downloading source from {source_url}')
if source_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}: {e}')
return None
source_content = source_response.content
source_tar = ContentFile(source_content, name=f'{arxiv_id}.tar.gz')
paper = ArxivPaper.objects.create(title=title, abstract=abstract, publication_date=date, arxiv_id=arxiv_id, doi=doi,
pdf=pdf_file, primary_subject=prim_subject, journal_ref=jref, comment=comments,
source_tar=source_tar)
# extract the source:
temp_dir = tempfile.mkdtemp()
try:
extract_tar_gz(paper.source_tar.path, temp_dir)
# grab all images from the source:
images = create_image_objects(temp_dir, paper)
for image in images:
paper.images.add(image)
print(f'[{arxiv_id}] Added {len(images)} images')
sources = create_tex_files(temp_dir, paper)
for source in sources:
paper.sources.add(source)
print(f'[{arxiv_id}] Added {len(sources)} sources')
except Exception as e:
print(f'[{arxiv_id}] Error occurred while extracting source: {e}')
# not a fatal exception, some papers do not provide tar.gz files and the source can just be e.g. a pdf
finally:
delete_files(temp_dir)
# Get a screenshot
screenshot_path = get_paper_screenshot_from_pdf(paper.pdf.path)
if screenshot_path:
screenshot = ContentFile(open(screenshot_path, 'rb').read(), name=f'{arxiv_id}.png')
paper.screenshot = screenshot
os.remove(screenshot_path)
# get a summary
try:
summary = get_paper_summary_from_abstract(paper.abstract)
paper.summary = summary
paper.save()
except Exception as e:
print(f"Exception while generating completion: {e}")
paper.delete()
return None
# get number of citations
if google_scholar:
try:
search_query = scholarly.search_pubs(f'"{paper.title}"', patents=False, citations=False)
first_paper_result = next(search_query)
citations = first_paper_result['num_citations']
paper.citations = citations
paper.save()
print(f'[{arxiv_id}] Citations: {citations}')
if citations > 1000:
interesting_paper = True
print(f'[{arxiv_id}] Interesting paper: {citations} citations')
except Exception as e:
print(f'[{arxiv_id}] Could not find paper on Google Scholar')
total_author_citations = 0
for author_name in authors:
# get author if exists:
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'papers.settings')
django.setup()
def extract_tar_gz(file_path: str, output_dir: str) -> None:
"""
Extract a tar.gz file to the specified output directory
:param file_path: The path to the tar.gz file
:param output_dir: The directory to extract the tar.gz file to
:return: None
"""
with tarfile.open(file_path, 'r:gz') as tar:
tar.extractall(output_dir)
def create_image_objects(directory: str, paper) -> list:
"""
Given a directory which contains images, this function will create PaperImage objects for each image
:param directory: The directory containing the images
:return: The list of PaperImage objects
"""
image_files = [os.path.join(root, f) for root, _, files in os.walk(directory) for f in files if
f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))]
images = []
for image_file in image_files:
image_path = os.path.join(directory, image_file)
with open(image_path, 'rb') as file:
filename = paper.arxiv_id + '_' + os.path.basename(image_path)
django_file = ContentFile(file.read(), name=filename)
image = PaperImage(image=django_file, paper=paper)
image.save()
images.append(image)
return images
def create_tex_files(directory: str, paper) -> list:
"""
Given a directory which contains tex files, this function will create PaperSource objects for each tex file
:param directory: The directory containing the tex files
:return: The list of PaperSource objects
"""
tex_files = [f for f in os.listdir(directory) if f.lower().endswith('.tex')]
sources = []
for tex_file in tex_files:
tex_path = os.path.join(directory, tex_file)
with open(tex_path, 'r') as f:
tex_content = f.read()
source = PaperSource(content=tex_content, paper=paper)
source.save()
sources.append(source)
return sources
def delete_files(directory: str) -> None:
"""
Delete all files in a directory
:param directory: The directory to delete the files from
:return: None
"""
for root, dirs, files in os.walk(directory):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def get_paper_screenshot_from_pdf(pdf_path) -> str:
"""
Get a screenshot of the first page of the pdf
:param pdf_path: The path to the pdf
:return: The path to the screenshot
"""
try:
pdf = fitz.open(pdf_path)
page = pdf.load_page(0)
pix = page.get_pixmap(alpha=False)
random_int = random.randint(0, 1000000)
temp_filename = f'temp_{random_int}.png'
pix.save(temp_filename, "png")
return temp_filename
except Exception as e:
print(f'Error occurred while getting screenshot of pdf: {pdf_path}')
return None
def get_paper_summary_from_abstract(abstract: str) -> str:
"""
Get a summary of the paper from the abstract
:param abstract: The abstract of the paper
:return: The summary of the paper
"""
client = OpenAI()
client.api_key = settings.OPENAI_API_KEY
prompt = f"Summarize the following AI paper abstract in two sentences:\nAbstract: {abstract}\nSummary:"
response = client.completions.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.9,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
summary = response.choices[0].text
return summary.strip()
def scrape_paper(arxiv_id, google_scholar=False):
"""
Scrape the paper with the given arxiv_id and save it to the database
:param arxiv_id: The arxiv_id of the paper
:param google_scholar: True if google scholar lookups should be performed, else false
:return: The saved ArxivPaper object
"""
# Send a GET request to the URL and retrieve the HTML content
url = f'https://arxiv.org/abs/{arxiv_id}'
if ArxivPaper.objects.filter(arxiv_id=arxiv_id).exists():
print(f'[{arxiv_id}] Paper with id {arxiv_id} already exists')
return None
else:
print(f'[{arxiv_id}] Scraping paper: {url}')
try:
response = requests.get(url)
html_content = response.content
except Exception as e:
print(f'[{arxiv_id}] Error occurred while scraping {url}')
return None
# Create a BeautifulSoup object to parse the HTML
soup = BeautifulSoup(html_content, 'html.parser')
# Get the title
title_tag = soup.find('h1', class_='title')
title = title_tag.get_text(strip=True)
title = re.sub(r'Title:', '', title)
print(f'[{arxiv_id}] Title: {title}')
# Get the abstract
abstract_tag = soup.find('blockquote', class_='abstract')
abstract = abstract_tag.get_text(strip=True)
# remove various things
abstract = re.sub(r'Abstract:', '', abstract)
abstract = re.sub(r'\n', ' ', abstract)
abstract = re.sub(r' ', ' ', abstract)
# Get the authors
author_div = soup.find('div', class_='authors')
author_tags = author_div.find_all('a')
authors = [author.get_text(strip=True) for author in author_tags]
# Get the primary subject
primary_subject = soup.find('span', class_='primary-subject').get_text(strip=True)
short_name = primary_subject.split('(')[1].replace(')', '').strip()
full_name = primary_subject.split('(')[0].strip()
print(f'[{arxiv_id}] Primary subject: {short_name} - {full_name}')
prim_subject = Subject.objects.filter(short_name=short_name).first()
if not prim_subject:
prim_subject = Subject.objects.create(short_name=short_name, full_name=full_name)
print(f'[{arxiv_id}] Creating subject: {short_name} - {full_name}')
# get everything inside of 'subjects' that is not in a <span>:
subject_div = soup.find('td', class_='subjects')
subject_text = subject_div.get_text(strip=True)
subject_text = re.sub(r'<span.*span>', '', subject_text)
subject_list = subject_text.split(';')
subject_list = [subject.strip() for subject in subject_list]
subjects = [subject for subject in subject_list if subject]
jref = soup.find('td', class_='tablecell jref')
if jref:
jref = jref.get_text(strip=True)
jref = re.sub(r'Journal ref:', '', jref)
jref = re.sub(r'\n', '', jref)
jref = re.sub(r' ', '', jref)
print(f'[{arxiv_id}] Journal ref: {jref}')
else:
jref = None
comments = soup.find('td', class_='tablecell comments')
if comments:
comments = comments.get_text(strip=True)
comments = re.sub(r'Comments:', '', comments)
comments = re.sub(r'\n', '', comments)
comments = re.sub(r' ', '', comments)
print(f'[{arxiv_id}] Comments: {comments}')
else:
comments = None
doi = soup.find('td', class_='tablecell arxivdoi')
if doi:
doi = doi.find('a')
doi = doi.get_text(strip=True)
doi = re.sub(r'DOI:', '', doi)
doi = re.sub(r'\n', '', doi)
doi = re.sub(r' ', '', doi)
print(f'[{arxiv_id}] DOI: {doi}')
else:
doi = None
# Get the date
date_tag = soup.find('div', class_='dateline')
date_string = date_tag.get_text(strip=True)
date_string = re.sub(r' \(v.*\)', '', date_string)
date_match = re.search(r'\[Submitted on (.+)\]', date_string)
if date_match:
date_string = date_match.group(1)
date = datetime.strptime(date_string, '%d %b %Y').date()
else:
date = None
# Download the pdf
pdf_url = f'https://arxiv.org/pdf/{arxiv_id}.pdf'
try:
pdf_response = requests.get(pdf_url)
if pdf_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}: {e}')
return None
pdf_content = pdf_response.content
pdf_file = ContentFile(pdf_content, name=f'{arxiv_id}.pdf')
# Download the source
source_url = f'https://arxiv.org/e-print/{arxiv_id}'
try:
source_response = requests.get(source_url)
print(f'[{arxiv_id}] Downloading source from {source_url}')
if source_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}: {e}')
return None
source_content = source_response.content
source_tar = ContentFile(source_content, name=f'{arxiv_id}.tar.gz')
paper = ArxivPaper.objects.create(title=title, abstract=abstract, publication_date=date, arxiv_id=arxiv_id, doi=doi,
pdf=pdf_file, primary_subject=prim_subject, journal_ref=jref, comment=comments,
source_tar=source_tar)
# extract the source:
temp_dir = tempfile.mkdtemp()
try:
extract_tar_gz(paper.source_tar.path, temp_dir)
# grab all images from the source:
images = create_image_objects(temp_dir, paper)
for image in images:
paper.images.add(image)
print(f'[{arxiv_id}] Added {len(images)} images')
sources = create_tex_files(temp_dir, paper)
for source in sources:
paper.sources.add(source)
print(f'[{arxiv_id}] Added {len(sources)} sources')
except Exception as e:
print(f'[{arxiv_id}] Error occurred while extracting source: {e}')
# not a fatal exception, some papers do not provide tar.gz files and the source can just be e.g. a pdf
finally:
delete_files(temp_dir)
# Get a screenshot
screenshot_path = get_paper_screenshot_from_pdf(paper.pdf.path)
if screenshot_path:
screenshot = ContentFile(open(screenshot_path, 'rb').read(), name=f'{arxiv_id}.png')
paper.screenshot = screenshot
os.remove(screenshot_path)
# get a summary
try:
summary = get_paper_summary_from_abstract(paper.abstract)
paper.summary = summary
paper.save()
except Exception as e:
print(f"Exception while generating completion: {e}")
paper.delete()
return None
# get number of citations
if google_scholar:
try:
search_query = scholarly.search_pubs(f'"{paper.title}"', patents=False, citations=False)
first_paper_result = next(search_query)
citations = first_paper_result['num_citations']
paper.citations = citations
paper.save()
print(f'[{arxiv_id}] Citations: {citations}')
if citations > 1000:
interesting_paper = True
print(f'[{arxiv_id}] Interesting paper: {citations} citations')
except Exception as e:
print(f'[{arxiv_id}] Could not find paper on Google Scholar')
total_author_citations = 0
for author_name in authors:
# get author if exists: | author = Author.objects.filter(name=author_name).first() | 1 | 2023-12-14 08:23:05+00:00 | 4k |
yanzq95/SGNet | train.py | [
{
"identifier": "Middlebury_dataset",
"path": "data/middlebury_dataloader.py",
"snippet": "class Middlebury_dataset(Dataset):\n \"\"\"RGB-D-D Dataset.\"\"\"\n\n def __init__(self, root_dir, scale=8, transform=None):\n \"\"\"\n Args:\n root_dir (string): Directory with all the images.\n scale (float): dataset scale\n transform (callable, optional): Optional transform to be applied on a sample.\n \"\"\"\n\n self.transform = transform\n self.scale = scale\n\n self.GTs = []\n self.RGBs = []\n \n list_dir = os.listdir(root_dir)\n for name in list_dir:\n if name.find('output_color') > -1:\n self.RGBs.append('%s/%s' % (root_dir, name))\n elif name.find('output_depth') > -1:\n self.GTs.append('%s/%s' % (root_dir, name))\n self.RGBs.sort()\n self.GTs.sort()\n\n def __len__(self):\n return len(self.GTs)\n\n def __getitem__(self, idx):\n \n image = np.array(Image.open(self.RGBs[idx]))\n gt = np.array(Image.open(self.GTs[idx]))\n assert gt.shape[0] == image.shape[0] and gt.shape[1] == image.shape[1]\n s = self.scale \n image = modcrop(image, s)\n gt = modcrop(gt, s)\n\n h, w = gt.shape[0], gt.shape[1]\n s = self.scale\n\n lr = np.array(Image.fromarray(gt).resize((w//s,h//s),Image.BICUBIC)).astype(np.float32)\n gt = gt / 255.0\n image = image / 255.0\n lr = lr / 255.0\n \n\n if self.transform:\n image = self.transform(image).float()\n gt = self.transform(np.expand_dims(gt,2))\n lr = self.transform(np.expand_dims(lr,2)).float()\n\n # sample = {'guidance': image, 'lr': lr, 'gt': gt, 'max':maxx, 'min': minn}\n sample = {'guidance': image, 'lr': lr, 'gt': gt}\n return sample"
},
{
"identifier": "calc_rmse",
"path": "utils.py",
"snippet": "def calc_rmse(a, b, minmax):\n a = a[6:-6, 6:-6]\n b = b[6:-6, 6:-6]\n \n a = a*(minmax[0]-minmax[1]) + minmax[1]\n b = b*(minmax[0]-minmax[1]) + minmax[1]\n a = a * 100\n b = b * 100\n \n return torch.sqrt(torch.mean(torch.pow(a-b,2)))"
},
{
"identifier": "rgbdd_calc_rmse",
"path": "utils.py",
"snippet": "def rgbdd_calc_rmse(gt, out, minmax):\n gt = gt[6:-6, 6:-6]\n out = out[6:-6, 6:-6]\n\n # gt = gt*(minmax[0]-minmax[1]) + minmax[1]\n out = out*(minmax[0]-minmax[1]) + minmax[1]\n gt = gt / 10.0\n out = out / 10.0\n \n return torch.sqrt(torch.mean(torch.pow(gt-out,2)))"
},
{
"identifier": "midd_calc_rmse",
"path": "utils.py",
"snippet": "def midd_calc_rmse(gt, out):\n gt = gt[6:-6, 6:-6]\n out = out[6:-6, 6:-6]\n gt = gt * 255.0\n out = out * 255.0\n \n return torch.sqrt(torch.mean(torch.pow(gt-out,2)))"
}
] | import os
import torch
import numpy as np
import argparse
import torch.optim as optim
import torch.nn as nn
import logging
import os
from numpy.core.fromnumeric import mean
from models.SGNet import *
from models.common import *
from data.nyu_dataloader import *
from data.rgbdd_dataloader import *
from data.middlebury_dataloader import Middlebury_dataset
from utils import calc_rmse, rgbdd_calc_rmse, midd_calc_rmse
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from tqdm import tqdm
from datetime import datetime | 2,353 | parser = argparse.ArgumentParser()
parser.add_argument('--scale', type=int, default=8, help='scale factor')
parser.add_argument('--lr', default='0.0001', type=float, help='learning rate')
parser.add_argument('--result', default='experiment', help='learning rate')
parser.add_argument('--epoch', default=200, type=int, help='max epoch')
parser.add_argument('--device', default="0", type=str, help='which gpu use')
parser.add_argument("--decay_iterations", type=list, default=[5e4, 1e5, 1.6e5], help="steps to start lr decay")
parser.add_argument("--num_feats", type=int, default=40, help="channel number of the middle hidden layer")
parser.add_argument("--gamma", type=float, default=0.2, help="decay rate of learning rate")
parser.add_argument("--root_dir", type=str, default='/opt/data/share/120106010699/nyu_data', help="root dir of dataset")
parser.add_argument("--batchsize", type=int, default=1, help="batchsize of training dataloader")
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = opt.device
s = datetime.now().strftime('%Y%m%d%H%M%S')
dataset_name = opt.root_dir.split('/')[-1]
result_root = '%s/%s-lr_%s-s_%s-%s-b_%s' % (opt.result, s, opt.lr, opt.scale, dataset_name, opt.batchsize)
if not os.path.exists(result_root):
os.mkdir(result_root)
logging.basicConfig(filename='%s/train.log' % result_root, format='%(asctime)s %(message)s', level=logging.INFO)
logging.info(opt)
net = SGNet(num_feats=opt.num_feats, kernel_size=3, scale=opt.scale).cuda()
net_getFre = get_Fre()
net_grad = Get_gradient_nopadding_d()
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.decay_iterations, gamma=opt.gamma)
net.train()
data_transform = transforms.Compose([transforms.ToTensor()])
up = nn.Upsample(scale_factor=opt.scale, mode='bicubic')
if dataset_name == 'nyu_data':
test_minmax = np.load('%s/test_minmax.npy' % opt.root_dir)
train_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=True)
test_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=False)
if dataset_name == 'RGB-D-D':
train_dataset = NYU_v2_datset(root_dir='/data/SRData/NYU_v2', scale=opt.scale, transform=data_transform, train=True)
test_dataset = RGBDD_Dataset(root_dir=opt.root_dir, scale=opt.scale, downsample='bicubic', transform=data_transform,
train=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchsize, shuffle=True, num_workers=8)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)
max_epoch = opt.epoch
num_train = len(train_dataloader)
best_rmse = 10.0
best_epoch = 0
for epoch in range(max_epoch):
# ---------
# Training
# ---------
net.train()
running_loss = 0.0
t = tqdm(iter(train_dataloader), leave=True, total=len(train_dataloader))
for idx, data in enumerate(t):
batches_done = num_train * epoch + idx
optimizer.zero_grad()
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
out_amp, out_pha = net_getFre(out)
gt_amp, gt_pha = net_getFre(gt)
gt_grad = net_grad(gt)
loss_grad1 = criterion(out_grad, gt_grad)
loss_fre_amp = criterion(out_amp, gt_amp)
loss_fre_pha = criterion(out_pha, gt_pha)
loss_fre = 0.5 * loss_fre_amp + 0.5 * loss_fre_pha
loss_spa = criterion(out, gt)
loss = loss_spa + 0.002 * loss_fre + 0.001 * loss_grad1
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.data.item()
running_loss_50 = running_loss
if idx % 50 == 0:
running_loss_50 /= 50
t.set_description('[train epoch:%d] loss: %.8f' % (epoch + 1, running_loss_50))
t.refresh()
logging.info('epoch:%d iteration:%d running_loss:%.10f' % (epoch + 1, batches_done + 1, running_loss / num_train))
if (epoch % 2 == 0) and (epoch < 30):
with torch.no_grad():
net.eval()
if dataset_name == 'nyu_data':
rmse = np.zeros(449)
if dataset_name == 'RGB-D-D':
rmse = np.zeros(405)
t = tqdm(iter(test_dataloader), leave=True, total=len(test_dataloader))
for idx, data in enumerate(t):
if dataset_name == 'nyu_data':
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
minmax = test_minmax[:, idx]
minmax = torch.from_numpy(minmax).cuda()
rmse[idx] = calc_rmse(gt[0, 0], out[0, 0], minmax)
if dataset_name == 'RGB-D-D':
guidance, lr, gt, max, min = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda(), data[
'max'].cuda(), data['min'].cuda()
out = net((guidance, lr))
minmax = [max, min]
|
parser = argparse.ArgumentParser()
parser.add_argument('--scale', type=int, default=8, help='scale factor')
parser.add_argument('--lr', default='0.0001', type=float, help='learning rate')
parser.add_argument('--result', default='experiment', help='learning rate')
parser.add_argument('--epoch', default=200, type=int, help='max epoch')
parser.add_argument('--device', default="0", type=str, help='which gpu use')
parser.add_argument("--decay_iterations", type=list, default=[5e4, 1e5, 1.6e5], help="steps to start lr decay")
parser.add_argument("--num_feats", type=int, default=40, help="channel number of the middle hidden layer")
parser.add_argument("--gamma", type=float, default=0.2, help="decay rate of learning rate")
parser.add_argument("--root_dir", type=str, default='/opt/data/share/120106010699/nyu_data', help="root dir of dataset")
parser.add_argument("--batchsize", type=int, default=1, help="batchsize of training dataloader")
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = opt.device
s = datetime.now().strftime('%Y%m%d%H%M%S')
dataset_name = opt.root_dir.split('/')[-1]
result_root = '%s/%s-lr_%s-s_%s-%s-b_%s' % (opt.result, s, opt.lr, opt.scale, dataset_name, opt.batchsize)
if not os.path.exists(result_root):
os.mkdir(result_root)
logging.basicConfig(filename='%s/train.log' % result_root, format='%(asctime)s %(message)s', level=logging.INFO)
logging.info(opt)
net = SGNet(num_feats=opt.num_feats, kernel_size=3, scale=opt.scale).cuda()
net_getFre = get_Fre()
net_grad = Get_gradient_nopadding_d()
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.decay_iterations, gamma=opt.gamma)
net.train()
data_transform = transforms.Compose([transforms.ToTensor()])
up = nn.Upsample(scale_factor=opt.scale, mode='bicubic')
if dataset_name == 'nyu_data':
test_minmax = np.load('%s/test_minmax.npy' % opt.root_dir)
train_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=True)
test_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=False)
if dataset_name == 'RGB-D-D':
train_dataset = NYU_v2_datset(root_dir='/data/SRData/NYU_v2', scale=opt.scale, transform=data_transform, train=True)
test_dataset = RGBDD_Dataset(root_dir=opt.root_dir, scale=opt.scale, downsample='bicubic', transform=data_transform,
train=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchsize, shuffle=True, num_workers=8)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)
max_epoch = opt.epoch
num_train = len(train_dataloader)
best_rmse = 10.0
best_epoch = 0
for epoch in range(max_epoch):
# ---------
# Training
# ---------
net.train()
running_loss = 0.0
t = tqdm(iter(train_dataloader), leave=True, total=len(train_dataloader))
for idx, data in enumerate(t):
batches_done = num_train * epoch + idx
optimizer.zero_grad()
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
out_amp, out_pha = net_getFre(out)
gt_amp, gt_pha = net_getFre(gt)
gt_grad = net_grad(gt)
loss_grad1 = criterion(out_grad, gt_grad)
loss_fre_amp = criterion(out_amp, gt_amp)
loss_fre_pha = criterion(out_pha, gt_pha)
loss_fre = 0.5 * loss_fre_amp + 0.5 * loss_fre_pha
loss_spa = criterion(out, gt)
loss = loss_spa + 0.002 * loss_fre + 0.001 * loss_grad1
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.data.item()
running_loss_50 = running_loss
if idx % 50 == 0:
running_loss_50 /= 50
t.set_description('[train epoch:%d] loss: %.8f' % (epoch + 1, running_loss_50))
t.refresh()
logging.info('epoch:%d iteration:%d running_loss:%.10f' % (epoch + 1, batches_done + 1, running_loss / num_train))
if (epoch % 2 == 0) and (epoch < 30):
with torch.no_grad():
net.eval()
if dataset_name == 'nyu_data':
rmse = np.zeros(449)
if dataset_name == 'RGB-D-D':
rmse = np.zeros(405)
t = tqdm(iter(test_dataloader), leave=True, total=len(test_dataloader))
for idx, data in enumerate(t):
if dataset_name == 'nyu_data':
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
minmax = test_minmax[:, idx]
minmax = torch.from_numpy(minmax).cuda()
rmse[idx] = calc_rmse(gt[0, 0], out[0, 0], minmax)
if dataset_name == 'RGB-D-D':
guidance, lr, gt, max, min = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda(), data[
'max'].cuda(), data['min'].cuda()
out = net((guidance, lr))
minmax = [max, min] | rmse[idx] = rgbdd_calc_rmse(gt[0, 0], out[0, 0], minmax) | 2 | 2023-12-10 04:41:17+00:00 | 4k |
LSimon95/megatts2 | models/megatts2.py | [
{
"identifier": "MRTE",
"path": "modules/mrte.py",
"snippet": "class MRTE(nn.Module):\n def __init__(\n self, \n mel_bins: int = HIFIGAN_MEL_CHANNELS,\n mel_frames: int = HIFIGAN_HOP_LENGTH,\n attn_dim: int = 512,\n ff_dim: int = 1024,\n n_heads: int = 2,\n n_layers: int = 8,\n ge_kernel_size: int = 31,\n ge_hidden_sizes: List = [HIFIGAN_MEL_CHANNELS, 256, 256, 512, 512],\n ge_activation: str = 'ReLU',\n ge_out_channels: int = 512,\n duration_tokne_ms: float = (HIFIGAN_HOP_LENGTH / HIFIGAN_SR * 1000),\n phone_vocab_size: int = 320,\n dropout: float = 0.1,\n sample_rate: int = HIFIGAN_SR,\n ):\n super(MRTE, self).__init__()\n\n self.n_heads = n_heads\n \n self.phone_embedding = TokenEmbedding(\n dim_model=attn_dim,\n vocab_size=phone_vocab_size,\n dropout=dropout,\n )\n\n self.phone_pos_embedding = SinePositionalEmbedding(\n dim_model=attn_dim,\n dropout=dropout,\n )\n\n self.mel_embedding = nn.Linear(mel_bins, attn_dim)\n self.mel_pos_embedding = SinePositionalEmbedding(\n dim_model=attn_dim,\n dropout=dropout,\n )\n\n self.mel_encoder = TransformerEncoder(\n TransformerEncoderLayer(\n dim=attn_dim,\n ff_dim=ff_dim,\n conv_ff=True,\n n_heads=n_heads,\n dropout=dropout,\n ),\n num_layers=n_layers,\n )\n\n self.phone_encoder = TransformerEncoder(\n TransformerEncoderLayer(\n dim=attn_dim,\n ff_dim=ff_dim,\n conv_ff=True,\n n_heads=n_heads,\n dropout=dropout,\n ),\n num_layers=n_layers,\n )\n\n self.mha = MultiHeadAttention(\n qkv_dim=attn_dim,\n n_heads=n_heads,\n dropout=dropout,\n )\n self.norm = nn.LayerNorm(attn_dim)\n self.activation = nn.ReLU()\n\n self.compress_features = nn.Linear(attn_dim + ge_out_channels, ge_out_channels)\n\n self.ge = ConvNet(\n hidden_sizes = ge_hidden_sizes,\n kernel_size = ge_kernel_size,\n stack_size = 3,\n activation = ge_activation,\n avg_pooling = True\n )\n\n self.length_regulator = LengthRegulator(mel_frames, sample_rate, duration_tokne_ms)\n\n def forward(\n self,\n duration_tokens: torch.Tensor, # (B, T)\n phone: torch.Tensor, # (B, T)\n phone_lens: torch.Tensor, # (B,)\n mel: torch.Tensor, # (B, T, mel_bins)\n mel_lens: torch.Tensor, # (B,)\n ):\n \n phone_emb = self.phone_embedding(phone)\n phone_pos = self.phone_pos_embedding(phone_emb)\n\n mel_emb = self.mel_embedding(mel)\n mel_pos = self.mel_pos_embedding(mel_emb)\n\n mel_context = self.mel_encoder(mel_pos, mel_lens)\n phone_x = self.phone_encoder(phone_pos, phone_lens)\n\n phone_latent = self.mha(phone_x, kv=mel_context)\n phone_latent = self.norm(phone_latent)\n phone_latent = self.activation(phone_latent)\n\n mel = rearrange(mel, \"B T D -> B D T\")\n ge = self.ge(mel)\n ge = ge.unsqueeze(1).repeat(1, phone_latent.shape[1], 1)\n\n out = self.compress_features(torch.cat([ge, phone_latent], dim=-1))\n out = self.length_regulator(phone_latent, duration_tokens)\n return out"
},
{
"identifier": "VQProsodyEncoder",
"path": "modules/vqpe.py",
"snippet": "class VQProsodyEncoder(nn.Module):\n def __init__(\n self,\n hidden_sizes: List = [HIFIGAN_MEL_CHANNELS, 256, 256, 512, 512],\n kernel_size: int = 5,\n stack_size: int = 3,\n activation: str = 'ReLU',\n ):\n super(VQProsodyEncoder, self).__init__()\n\n self.convnet = ConvNet(\n hidden_sizes=hidden_sizes,\n kernel_size=kernel_size,\n stack_size=stack_size,\n activation=activation,\n )\n\n self.vq = ResidualVectorQuantizer(\n dimension=512,\n n_q=1,\n bins=1024,\n decay=0.99\n )\n\n def forward(\n self, \n mel: torch.Tensor, # (B, T, mel_bins)\n ):\n \n mel = rearrange(mel, \"B T D -> B D T\")\n ze = self.convnet(mel)\n zq, _, commit_loss = self.vq(ze)\n vq_loss = F.mse_loss(ze.detach(), zq)\n zq = rearrange(zq, \"B D T -> B T D\")\n return zq, commit_loss, vq_loss"
},
{
"identifier": "ConvNet",
"path": "modules/convnet.py",
"snippet": "class ConvNet(nn.Module):\n def __init__(\n self,\n hidden_sizes: List = [128, 256, 256, 512, 512],\n kernel_size: int = 5,\n stack_size: int = 3,\n activation: str = 'ReLU',\n avg_pooling: bool = False\n ):\n super(ConvNet, self).__init__()\n # First layer\n layers = [\n nn.Conv1d(\n hidden_sizes[0],\n hidden_sizes[0],\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2\n ),\n ]\n # Middle layers\n for i in range(len(hidden_sizes)):\n if i == 0:\n layers += [\n ConvStack(\n hidden_sizes=[hidden_sizes[0]] * stack_size,\n kernel_size=kernel_size,\n activation=activation,\n )\n ]\n else:\n layers += [\n ResidualBlock(\n hidden_sizes=[hidden_sizes[i]] * stack_size,\n kernel_size=kernel_size,\n activation=activation,\n )\n ]\n # Upsample or downsample\n if (i != len(hidden_sizes) - 1) and (hidden_sizes[i] != hidden_sizes[i + 1]):\n layers += [\n nn.Conv1d(\n hidden_sizes[i],\n hidden_sizes[i + 1],\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2\n ),\n getattr(nn, activation)(),\n ]\n # Last layer\n if avg_pooling:\n layers += [\n nn.AdaptiveAvgPool1d(1),\n nn.Flatten(),\n ]\n else:\n layers += [\n nn.Conv1d(\n hidden_sizes[-1],\n hidden_sizes[-1],\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2\n ),\n ]\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)"
},
{
"identifier": "SinePositionalEmbedding",
"path": "modules/embedding.py",
"snippet": "class SinePositionalEmbedding(nn.Module):\n def __init__(\n self,\n dim_model: int,\n dropout: float = 0.0,\n scale: bool = False,\n alpha: bool = False,\n ):\n super().__init__()\n self.dim_model = dim_model\n self.x_scale = math.sqrt(dim_model) if scale else 1.0\n self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)\n self.dropout = torch.nn.Dropout(p=dropout)\n\n self.reverse = False\n self.pe = None\n self.extend_pe(torch.tensor(0.0).expand(1, 4000))\n\n def extend_pe(self, x, offset = 0):\n \"\"\"Reset the positional encodings.\"\"\"\n x_size = x.size(1) + offset\n if self.pe is not None:\n if self.pe.size(1) >= x_size:\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\n return\n pe = torch.zeros(x_size, self.dim_model)\n if self.reverse:\n position = torch.arange(\n x_size - 1, -1, -1.0, dtype=torch.float32\n ).unsqueeze(1)\n else:\n position = torch.arange(\n 0, x_size, dtype=torch.float32\n ).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.dim_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.dim_model)\n )\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.pe = pe.to(device=x.device, dtype=x.dtype).detach()\n\n def forward(self, x: torch.Tensor, offset : int = 0) -> torch.Tensor:\n self.extend_pe(x, offset)\n output = x.unsqueeze(-1) if x.ndim == 2 else x\n output = output * self.x_scale + self.alpha * self.pe[:, offset : x.size(1) + offset]\n return self.dropout(output)"
},
{
"identifier": "TokenEmbedding",
"path": "modules/embedding.py",
"snippet": "class TokenEmbedding(nn.Module):\n def __init__(\n self,\n dim_model: int,\n vocab_size: int,\n dropout: float = 0.0,\n ):\n super().__init__()\n\n self.vocab_size = vocab_size\n self.dim_model = dim_model\n\n self.dropout = torch.nn.Dropout(p=dropout)\n self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model)\n\n @property\n def weight(self) -> torch.Tensor:\n return self.word_embeddings.weight\n\n def embedding(self, index: int) -> torch.Tensor:\n return self.word_embeddings.weight[index : index + 1]\n\n def forward(self, x: torch.Tensor):\n X = self.word_embeddings(x)\n X = self.dropout(X)\n\n return X"
},
{
"identifier": "TransformerEncoder",
"path": "modules/transformer.py",
"snippet": "class TransformerEncoder(nn.Module):\n def __init__(\n self,\n encoder_layer: TransformerEncoderLayer,\n num_layers: int,\n norm=None\n ):\n super().__init__()\n\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(\n self,\n x: torch.Tensor,\n x_lens: torch.Tensor,\n ) -> torch.Tensor:\n\n mask = make_attn_mask(x_lens, self.layers[0].n_heads)\n\n for layer in self.layers:\n x = layer(x, mask=mask)\n if self.norm is not None:\n x = self.norm(x)\n return x"
},
{
"identifier": "TransformerEncoderLayer",
"path": "modules/transformer.py",
"snippet": "class TransformerEncoderLayer(nn.Module):\n def __init__(self, dim, ff_dim, conv_ff=False, n_heads=8, dropout=0.):\n super().__init__()\n\n self.dim = dim\n self.conv_ff = conv_ff\n self.n_heads = n_heads\n\n self.norm1 = nn.LayerNorm(dim)\n self.norm2 = nn.LayerNorm(dim)\n\n self.attn = MultiHeadAttention(dim, n_heads=n_heads, dropout=dropout)\n\n self.dropout = nn.Dropout(dropout)\n\n if conv_ff:\n self.ff = nn.Sequential(\n nn.Conv1d(dim, ff_dim, kernel_size=5, padding=2),\n nn.ReLU(),\n nn.Conv1d(ff_dim, dim, kernel_size=5, padding=2),\n )\n else:\n self.ff = nn.Sequential(\n nn.Linear(dim, ff_dim),\n nn.ReLU(),\n self.dropout,\n nn.Linear(ff_dim, dim),\n )\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor = None\n ):\n\n x = x + self.attn(self.norm1(x), mask=mask)\n if self.conv_ff:\n x = self.norm2(x)\n x = rearrange(x, 'B T D -> B D T')\n x = x + self.ff(x)\n x = rearrange(x, 'B D T -> B T D')\n else:\n x = x + self.ff(self.norm2(x))\n return x"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.mrte import MRTE
from modules.vqpe import VQProsodyEncoder
from modules.convnet import ConvNet
from modules.embedding import SinePositionalEmbedding, TokenEmbedding
from modules.transformer import TransformerEncoder, TransformerEncoderLayer
from einops import rearrange | 3,193 |
class MegaVQ(nn.Module):
def __init__(
self,
mrte: MRTE,
|
class MegaVQ(nn.Module):
def __init__(
self,
mrte: MRTE, | vqpe: VQProsodyEncoder, | 1 | 2023-12-10 15:02:54+00:00 | 4k |
ml-stat-Sustech/TorchCP | tests/test_regression.py | [
{
"identifier": "ACI",
"path": "torchcp/regression/predictors/aci.py",
"snippet": "class ACI(SplitPredictor):\n \"\"\"\n Adaptive conformal inference (Gibbs et al., 2021)\n paper: https://arxiv.org/abs/2106.00170\n\n :param model: a pytorch model that can output the values of different quantiles.\n :param gamma: a step size parameter.\n \"\"\"\n\n def __init__(self, model, gamma):\n super().__init__(model)\n self.__gamma = gamma\n self.alpha_t = None\n\n def calculate_threshold(self, predicts, y_truth, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n self.scores = torch.maximum(predicts[:, 0] - y_truth, y_truth - predicts[:, 1])\n self.alpha = alpha\n if self.alpha_t is None:\n self.alpha_t = alpha\n\n def predict(self, x, y_t=None, pred_interval_t=None):\n \"\"\"\n \n :param x: input features at the time t+1.\n :param y_t: the truth value at the time t.\n :param pred_interval_t: the prediction interval for the time t.\n \"\"\"\n self._model.eval()\n x = x.to(self._device)\n\n if y_t is None:\n err_t = self.alpha\n else:\n if len(y_t.shape) == 0:\n err_t = 1 if (y_t >= pred_interval_t[0]) & (y_t <= pred_interval_t[1]) else 0\n else:\n steps_t = len(y_t)\n w = torch.arange(steps_t).to(self._device)\n w = torch.pow(0.95, w)\n w = w / torch.sum(w)\n err = x.new_zeros(steps_t)\n for i in range(steps_t):\n err[i] = 1 if (y_t[i] >= pred_interval_t[i][0]) & (y_t[i] <= pred_interval_t[i][1]) else 0\n err_t = torch.sum(w * err)\n self.alpha_t = self.alpha_t + self.__gamma * (self.alpha - err_t)\n predicts_batch = self._model(x.to(self._device)).float()\n quantile = (1 - self.alpha_t) * (1 + 1 / self.scores.shape[0])\n if quantile > 1:\n quantile = 1\n q_hat = torch.quantile(self.scores, quantile)\n prediction_intervals = x.new_zeros(2)\n prediction_intervals[0] = predicts_batch[0] - q_hat\n prediction_intervals[1] = predicts_batch[1] + q_hat\n return prediction_intervals"
},
{
"identifier": "CQR",
"path": "torchcp/regression/predictors/cqr.py",
"snippet": "class CQR(SplitPredictor):\n \"\"\"\n Conformalized Quantile Regression (Romano et al., 2019)\n paper: https://arxiv.org/abs/1905.03222\n\n :param model: a pytorch model that can output alpha/2 and 1-alpha/2 quantile regression.\n \"\"\"\n\n def __init__(self, model):\n super().__init__(model)\n\n def calculate_threshold(self, predicts, y_truth, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n self.scores = torch.maximum(predicts[:, 0] - y_truth, y_truth - predicts[:, 1])\n quantile = math.ceil((self.scores.shape[0] + 1) * (1 - alpha)) / self.scores.shape[0]\n if quantile > 1:\n quantile = 1\n self.q_hat = torch.quantile(self.scores, quantile)\n\n def predict(self, x_batch):\n self._model.eval()\n predicts_batch = self._model(x_batch.to(self._device)).float()\n if len(x_batch.shape) == 2:\n predicts_batch = self._model(x_batch.to(self._device)).float()\n prediction_intervals = x_batch.new_zeros((x_batch.shape[0], 2))\n prediction_intervals[:, 0] = predicts_batch[:, 0] - self.q_hat\n prediction_intervals[:, 1] = predicts_batch[:, 1] + self.q_hat\n else:\n prediction_intervals = torch.zeros(2)\n prediction_intervals[0] = predicts_batch[0] - self.q_hat\n prediction_intervals[1] = predicts_batch[1] + self.q_hat\n return prediction_intervals"
},
{
"identifier": "SplitPredictor",
"path": "torchcp/regression/predictors/split.py",
"snippet": "class SplitPredictor(object):\n \"\"\"\n Distribution-Free Predictive Inference For Regression (Lei et al., 2017)\n paper: https://arxiv.org/abs/1604.04173\n \n :param model: a pytorch model for regression.\n \"\"\"\n\n def __init__(self, model):\n self._model = model\n self._device = get_device(model)\n self._metric = Metrics()\n self.q_hat = None\n self.scores = None\n self.alpha = None\n\n def calibrate(self, cal_dataloader, alpha):\n self._model.eval()\n predicts_list = []\n y_truth_list = []\n with torch.no_grad():\n for examples in cal_dataloader:\n tmp_x, tmp_labels = examples[0].to(self._device), examples[1].to(self._device)\n tmp_predicts = self._model(tmp_x).detach()\n predicts_list.append(tmp_predicts)\n y_truth_list.append(tmp_labels)\n predicts = torch.cat(predicts_list).float().to(self._device)\n y_truth = torch.cat(y_truth_list).to(self._device)\n self.calculate_threshold(predicts, y_truth, alpha)\n\n def calculate_threshold(self, predicts, y_truth, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n self.scores = torch.abs(predicts.reshape(-1) - y_truth)\n quantile = math.ceil((self.scores.shape[0] + 1) * (1 - alpha)) / self.scores.shape[0]\n if quantile > 1:\n quantile = 1\n self.q_hat = torch.quantile(self.scores, quantile)\n\n def predict(self, x_batch):\n self._model.eval()\n x_batch.to(self._device)\n with torch.no_grad():\n predicts_batch = self._model(x_batch).float().reshape(-1)\n prediction_intervals = x_batch.new_zeros((x_batch.shape[0], 2))\n prediction_intervals[:, 0] = predicts_batch - self.q_hat\n prediction_intervals[:, 1] = predicts_batch + self.q_hat\n\n return prediction_intervals\n\n def evaluate(self, data_loader):\n y_list = []\n predict_list = []\n with torch.no_grad():\n for examples in data_loader:\n tmp_x, tmp_y = examples[0].to(self._device), examples[1].to(self._device)\n tmp_prediction_intervals = self.predict(tmp_x)\n y_list.append(tmp_y)\n predict_list.append(tmp_prediction_intervals)\n\n predicts = torch.cat(predict_list).float().to(self._device)\n test_y = torch.cat(y_list).to(self._device)\n\n res_dict = {\"Coverage_rate\": self._metric('coverage_rate')(predicts, test_y),\n \"Average_size\": self._metric('average_size')(predicts)}\n return res_dict"
},
{
"identifier": "QuantileLoss",
"path": "torchcp/regression/loss/quantile.py",
"snippet": "class QuantileLoss(nn.Module):\n \"\"\"\n Pinball loss function (Romano et al., 2019).\n Paper: https://proceedings.neurips.cc/paper_files/paper/2019/file/5103c3584b063c431bd1268e9b5e76fb-Paper.pdf\n\n :param quantiles: a list of quantiles, such as :math: [alpha/2, 1-alpha/2].\n \"\"\"\n\n def __init__(self, quantiles):\n \"\"\"\n\n \"\"\"\n super().__init__()\n self.quantiles = quantiles\n\n def forward(self, preds, target):\n \"\"\" \n Compute the pinball loss.\n\n :param preds: the alpha/2 and 1-alpha/2 predictions of the model. The shape is batch x 2.\n :param target: the truth values. The shape is batch x 1.\n \"\"\"\n assert not target.requires_grad\n if preds.size(0) != target.size(0):\n raise IndexError(f\"Shape of preds must be equal to shape of target.\")\n losses = preds.new_zeros(len(self.quantiles))\n\n for i, q in enumerate(self.quantiles):\n errors = target - preds[:, i:i + 1]\n losses[i] = torch.sum(torch.max((q - 1) * errors, q * errors).squeeze(1))\n loss = torch.mean(losses)\n return loss"
},
{
"identifier": "Metrics",
"path": "torchcp/regression/utils/metrics.py",
"snippet": "class Metrics:\n def __call__(self, metric) -> Any:\n if metric not in METRICS_REGISTRY_REGRESSION.registered_names():\n raise NameError(f\"The metric: {metric} is not defined in TorchCP.\")\n return METRICS_REGISTRY_REGRESSION.get(metric)"
},
{
"identifier": "fix_randomness",
"path": "torchcp/utils/common.py",
"snippet": "def fix_randomness(seed=0):\n \"\"\"\n Fix the random seed for python, torch, numpy.\n\n :param seed: the random seed\n \"\"\"\n np.random.seed(seed=seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)"
}
] | import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from torchcp.regression.predictors import SplitPredictor,CQR,ACI
from torchcp.regression.loss import QuantileLoss
from torchcp.regression import Metrics
from torchcp.utils import fix_randomness
from utils import build_reg_data, build_regression_model | 3,114 |
def train(model, device, epoch, train_data_loader, criterion, optimizer):
for index, (tmp_x, tmp_y) in enumerate(train_data_loader):
outputs = model(tmp_x.to(device))
loss = criterion(outputs, tmp_y.unsqueeze(dim=1).to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_SplitPredictor():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=1)
X, y = build_reg_data()
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
split_index1 = int(len(indices) * 0.4)
split_index2 = int(len(indices) * 0.6)
part1, part2, part3 = np.split(indices, [split_index1, split_index2])
scalerX = StandardScaler()
scalerX = scalerX.fit(X[part1, :])
train_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part1, :])), torch.from_numpy(y[part1]))
cal_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part2, :])), torch.from_numpy(y[part2]))
test_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part3, :])), torch.from_numpy(y[part3]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
cal_data_loader = torch.utils.data.DataLoader(cal_dataset, batch_size=100, shuffle=False, pin_memory=True)
test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, pin_memory=True)
epochs = 100
alpha = 0.1
##################################
# Split Conformal Prediction
##################################
print("########################## SplitPredictor ###########################")
model = build_regression_model("NonLinearNet")(X.shape[1], 1, 64, 0.5).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(epochs):
train(model, device, epoch, train_data_loader, criterion, optimizer)
model.eval()
predictor = SplitPredictor(model)
predictor.calibrate(cal_data_loader, alpha)
print(predictor.evaluate(test_data_loader))
def test_time_series():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=2)
X, y = build_reg_data(data_name="synthetic")
num_examples = X.shape[0]
T0 = int(num_examples * 0.4)
train_dataset = TensorDataset(torch.from_numpy(X[:T0, :]), torch.from_numpy(y[:T0]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
alpha = 0.1
quantiles = [alpha / 2, 1 - alpha / 2]
model = build_regression_model("NonLinearNet")(X.shape[1], 2, 64, 0.5).to(device)
|
def train(model, device, epoch, train_data_loader, criterion, optimizer):
for index, (tmp_x, tmp_y) in enumerate(train_data_loader):
outputs = model(tmp_x.to(device))
loss = criterion(outputs, tmp_y.unsqueeze(dim=1).to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_SplitPredictor():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=1)
X, y = build_reg_data()
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
split_index1 = int(len(indices) * 0.4)
split_index2 = int(len(indices) * 0.6)
part1, part2, part3 = np.split(indices, [split_index1, split_index2])
scalerX = StandardScaler()
scalerX = scalerX.fit(X[part1, :])
train_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part1, :])), torch.from_numpy(y[part1]))
cal_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part2, :])), torch.from_numpy(y[part2]))
test_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part3, :])), torch.from_numpy(y[part3]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
cal_data_loader = torch.utils.data.DataLoader(cal_dataset, batch_size=100, shuffle=False, pin_memory=True)
test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, pin_memory=True)
epochs = 100
alpha = 0.1
##################################
# Split Conformal Prediction
##################################
print("########################## SplitPredictor ###########################")
model = build_regression_model("NonLinearNet")(X.shape[1], 1, 64, 0.5).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(epochs):
train(model, device, epoch, train_data_loader, criterion, optimizer)
model.eval()
predictor = SplitPredictor(model)
predictor.calibrate(cal_data_loader, alpha)
print(predictor.evaluate(test_data_loader))
def test_time_series():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=2)
X, y = build_reg_data(data_name="synthetic")
num_examples = X.shape[0]
T0 = int(num_examples * 0.4)
train_dataset = TensorDataset(torch.from_numpy(X[:T0, :]), torch.from_numpy(y[:T0]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
alpha = 0.1
quantiles = [alpha / 2, 1 - alpha / 2]
model = build_regression_model("NonLinearNet")(X.shape[1], 2, 64, 0.5).to(device) | criterion = QuantileLoss(quantiles) | 3 | 2023-12-06 09:08:41+00:00 | 4k |
vintagedave/Fontimize | tests.py | [
{
"identifier": "get_used_characters_in_html",
"path": "fontimize.py",
"snippet": "def get_used_characters_in_html(html : str) -> set[chr]:\n soup = BeautifulSoup(html, 'html.parser')\n text = soup.get_text()\n return get_used_characters_in_str(text)"
},
{
"identifier": "charPair",
"path": "fontimize.py",
"snippet": "class charPair:\n def __init__(self, first : chr, second : chr):\n self.first = first\n self.second = second\n\n def __str__(self):\n return \"[\" + self.first + \"-\" + self.second + \"]\" # Pairs are inclusive\n \n # For print()-ing\n def __repr__(self):\n return self.__str__()\n \n def __eq__(self, other):\n if isinstance(other, charPair):\n return self.first == other.first and self.second == other.second\n return False\n \n def get_range(self):\n if self.first == self.second:\n return _get_unicode_string(self.first)\n else:\n return _get_unicode_string(self.first) + '-' + _get_unicode_string(self.second, False) # Eg \"U+0061-0071\""
},
{
"identifier": "_get_char_ranges",
"path": "fontimize.py",
"snippet": "def _get_char_ranges(chars : list[chr]):\n chars.sort()\n if not chars:\n return []\n res : list[charPair] = []\n first : chr = chars[0]\n prev_seen : chr = first\n for c in chars[1:]:\n expected_next_char = chr(ord(prev_seen) + 1)\n if c != expected_next_char:\n # non-sequential, so time to start a new set\n pair = charPair(first, prev_seen)\n res.append(pair)\n first = c\n prev_seen = c\n # add final set if it hasn't been added yet\n if (not res) or (res[-1].second != prev_seen):\n pair = charPair(first, prev_seen)\n res.append(pair)\n\n return res"
},
{
"identifier": "optimise_fonts",
"path": "fontimize.py",
"snippet": "def optimise_fonts(text : str, fonts : list[str], fontpath : str = \"\", subsetname = \"FontimizeSubset\", verbose : bool = False, print_stats : bool = True) -> dict[str, typing.Any]:\n verbosity = 2 if verbose else 0 # ttf2web has 0, 1, 2, so match that to off and on\n\n res : dict[str, typing.Any] = {}\n res[\"css\"] = {} # at this level there are no CSS files, include just to prevent errors for API consumer\n\n characters = get_used_characters_in_str(text)\n\n char_list = list(characters)\n if verbosity >= 2:\n print(\"Characters:\")\n print(\" \" + str(char_list))\n res[\"chars\"] = characters # set of characters used in the input text\n\n char_ranges = _get_char_ranges(char_list)\n if verbosity >= 2:\n print(\"Character ranges:\")\n print(\" \" + str(char_ranges))\n \n uranges_str = ', '.join(r.get_range() for r in char_ranges)\n uranges = [[subsetname, uranges_str]] # subsetname here will be in the generated font, eg 'Arial.FontimizeSubset.woff2'\n if verbosity >= 2:\n print(\"Unicode ranges:\")\n print(\" \" + uranges_str) \n res[\"uranges\"] = uranges_str # list of unicode ranges matching the characters used in the input text\n\n # For each font, generate a new font file using only the used characters\n # By default, place it in the same folder as the respective font, unless fontpath is specified\n res[\"fonts\"] = {} # dict of old font path -> new font path\n for font in fonts:\n assetdir = fontpath if fontpath else path.dirname(font)\n t2w = TTF2Web(font, uranges, assetdir=assetdir)\n woff2_list = t2w.generateWoff2(verbosity=verbosity)\n # print(woff2_list)\n assert len(woff2_list) == 1 # We only expect one font file to be generated, per font input\n assert len(woff2_list[0]) == 2 # Pair of font, plus ranges -- we only care about [0], the font\n res[\"fonts\"][font] = woff2_list[0][0]\n\n if verbosity >= 2:\n print(\"Generated the following fonts from the originals:\")\n for k in res[\"fonts\"].keys():\n print(\" \" + k + \" ->\\n \" + res[\"fonts\"][k])\n\n if (verbosity >= 2) or print_stats:\n print(\"Results:\")\n print(\" Fonts processed: \" + str(len(res[\"fonts\"])))\n if (verbosity == 1): # If 2, printed above already\n print(\" Generated (use verbose output for input -> generated map):\")\n for k in res[\"fonts\"].keys():\n print(\" \" + res[\"fonts\"][k])\n sum_orig = _get_file_size_sum(list(res[\"fonts\"].keys()))\n sum_new = _get_file_size_sum(list(res[\"fonts\"].values())) \n print(\" Total original font size: \" + _file_size_to_readable(sum_orig))\n print(\" Total optimised font size: \" + _file_size_to_readable(sum_new))\n savings = sum_orig - sum_new;\n savings_percent = savings / sum_orig * 100 \n print(\" Savings: \" + _file_size_to_readable(savings) + \" less, which is \" + str(round(savings_percent, 1)) + \"%!\")\n print(\"Thankyou for using Fontimize!\") # A play on Font and Optimise, haha, so good pun clever. But seriously - hopefully a memorable name!\n\n return res"
},
{
"identifier": "optimise_fonts_for_files",
"path": "fontimize.py",
"snippet": "def optimise_fonts_for_files(files : list[str], font_output_dir = \"\", subsetname = \"FontimizeSubset\", verbose : bool = False, print_stats : bool = True, fonts : list[str] = [], addtl_text : str = \"\") -> dict[str, typing.Any]:\n if (len(files) == 0) and len(addtl_text) == 0: # If you specify any text, input files are optional -- note, not documented, used for cmd line app\n print(\"Error: No input files. Exiting.\")\n res = {\n \"css\" : [],\n \"fonts\" : [],\n \"chars\": set(),\n \"uranges\": []\n }\n \n text = addtl_text\n css_files : set[str] = set()\n font_files : set[str] = set()\n for f in fonts: # user-specified input font files\n font_files.add(f)\n\n for f in files:\n file_ext = pathlib.Path(f).suffix.lower()\n with open(f, 'r') as file:\n if file_ext == '.html' or file_ext == '.htm':\n html = file.read()\n soup = BeautifulSoup(html, 'html.parser')\n\n # Extract used text\n text += soup.get_text()\n\n # Extract CSS files the HTML references\n for link in soup.find_all('link', href=True):\n if 'css' in link['href']:\n css_ref = link['href']\n adjusted_css_path = _get_path(f, css_ref) # It'll be relative, so relative to the HTML file\n css_files.add(adjusted_css_path)\n else: # not HTML, treat as text\n text += file.read()\n\n # Sanity check that there is any text to process\n if len(text) == 0:\n print(\"Error: No text found in the input files or additional text. Exiting.\")\n res = {\n \"css\" : [],\n \"fonts\" : [],\n \"chars\": set(),\n \"uranges\": []\n }\n return res\n\n # Extract fonts from CSS files\n for css_file in css_files:\n with open(css_file, 'r') as file:\n css = file.read()\n\n # Extract the contents of all :before and :after CSS pseudo-elements; add these to the text\n pseudo_elements = _extract_pseudo_elements_content(css)\n for pe in pseudo_elements:\n text += pe\n\n # List of all fonts from @font-face src url: statements. This assumes they're all local files\n font_urls = _find_font_face_urls(css)\n for font_url in font_urls:\n # Only handle local files -- this does not support remote files\n adjusted_font_path = _get_path(adjusted_css_path, font_url) # Relative to the CSS file\n if path.isfile(adjusted_font_path):\n font_files.add(adjusted_font_path)\n else:\n # if verbose:\n print(\"Warning: Font file not found (may be remote not local?); skipping: \" + font_url + \" (resolved to \" + adjusted_font_path + \")\")\n\n if verbose:\n print(\"Found the following CSS files:\")\n for css_file in css_files:\n print(\" \" + css_file)\n\n print(\"Found the following fonts:\")\n for font_file in font_files:\n print(\" \" + font_file)\n\n # print(\"Found the following text:\")\n # print(text)\n \n if len(font_files) == 0:\n print(\"Error: No fonts found in the input files. Exiting.\")\n res = {\n \"css\" : css_files,\n \"fonts\" : [],\n \"chars\": set(),\n \"uranges\": []\n }\n return res\n\n res = optimise_fonts(text, font_files, fontpath=font_output_dir, subsetname=subsetname, verbose=verbose, print_stats=print_stats)\n res[\"css\"] = css_files\n return res;"
}
] | import os
import unittest
import sys
from unittest.mock import patch
from fontimize import get_used_characters_in_html, charPair, _get_char_ranges, optimise_fonts, optimise_fonts_for_files
from fontTools.ttLib import woff2, TTFont | 3,279 |
class TestGetUsedCharactersInHtml(unittest.TestCase):
def test_empty_html(self):
self.assertEqual(get_used_characters_in_html(''), set(' '))
def test_html_with_no_text(self):
self.assertEqual(get_used_characters_in_html('<html><body></body></html>'), set(' '))
def test_html_with_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_repeated_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World! Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_multiple_spans(self):
self.assertEqual(get_used_characters_in_html('<html><body><span>Hello</span><span>, </span><span>World!</span></body></html>'), set('Hello, World!'))
def test_html_with_multiple_divs(self):
self.assertEqual(get_used_characters_in_html('<html><body><div>Hello</div><div>, </div><div>World!</div></body></html>'), set('Hello, World!'))
def test_html_with_links(self):
self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!'))
def test_html_with_nested_tags(self):
self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!'))
class TestCharPairs(unittest.TestCase):
def test_get_range_with_single_char(self):
self.assertEqual(charPair('a', 'a').get_range(), 'U+0061')
# Note that the second of the pair does not have the "U+" -- this caught me out
# with parse errors inside TTF2Web()
def test_get_range_with_two_chars(self):
self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062')
def test_get_range_with_multiple_chars(self):
self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064')
class TestCharRanges(unittest.TestCase):
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self):
|
class TestGetUsedCharactersInHtml(unittest.TestCase):
def test_empty_html(self):
self.assertEqual(get_used_characters_in_html(''), set(' '))
def test_html_with_no_text(self):
self.assertEqual(get_used_characters_in_html('<html><body></body></html>'), set(' '))
def test_html_with_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_repeated_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World! Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_multiple_spans(self):
self.assertEqual(get_used_characters_in_html('<html><body><span>Hello</span><span>, </span><span>World!</span></body></html>'), set('Hello, World!'))
def test_html_with_multiple_divs(self):
self.assertEqual(get_used_characters_in_html('<html><body><div>Hello</div><div>, </div><div>World!</div></body></html>'), set('Hello, World!'))
def test_html_with_links(self):
self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!'))
def test_html_with_nested_tags(self):
self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!'))
class TestCharPairs(unittest.TestCase):
def test_get_range_with_single_char(self):
self.assertEqual(charPair('a', 'a').get_range(), 'U+0061')
# Note that the second of the pair does not have the "U+" -- this caught me out
# with parse errors inside TTF2Web()
def test_get_range_with_two_chars(self):
self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062')
def test_get_range_with_multiple_chars(self):
self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064')
class TestCharRanges(unittest.TestCase):
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self): | result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False) | 3 | 2023-12-07 13:23:46+00:00 | 4k |
wanghao-cst/Omni-VideoAssistant | llava/model/omni_arch.py | [
{
"identifier": "build_vision_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identifier": "build_vision_projector",
"path": "llava/model/multimodal_projector/builder.py",
"snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n # import pdb;pdb.set_trace()\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')"
},
{
"identifier": "IGNORE_INDEX",
"path": "llava/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_PATCH_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
}
] | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 3,409 | if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} # weight:torch.Size([4096, 1024]) bias:torch.Size([4096])
# import pdb;pdb.set_trace()
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
# v1.5: mm_projector_weights['model.mm_projector.0.weight'].shape: torch.Size([4096, 1024])
# model.mm_projector.0.bias: torch.Size([4096]); model.mm_projector.2.weight: torch.Size([4096, 4096]); model.mm_projector.2.bias: torch.Size([4096])
if getattr(self, 'frames_conv', None) is None: ## Implement continue finetuning.
# self.frames_attn = MultiheadAttention(256*4096, num_heads)
# self.frames_conv = nn.Conv2d(4096, 4096, kernel_size=(12,1), stride=(10,1)) # b 4096 51 256
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
# self.keyframes_attn = MultiheadAttention(256*4096, num_heads)
# import pdb;pdb.set_trace()
self.config.mm_video_fuser = 'frames_conv'
class OmniMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_frames(self, frames):
frames_features = self.get_model().get_vision_tower()(frames) # torch.Size([276, 256, 1024])
frames_features = self.get_model().mm_projector(frames_features) # torch.Size([276, 256, 4096]) torch.float16
return frames_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, videos
):
vision_tower = self.get_vision_tower()
# import pdb;pdb.set_trace()
# frames_attn = self.get_model().frames_attn
frames_conv = self.get_model().frames_conv
# keyframes_attn = self.get_model().keyframes_attn
# import pdb;pdb.set_trace()
if vision_tower is None or videos is None or input_ids.shape[1] == 1: # False
if past_key_values is not None and vision_tower is not None and videos is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
# videos = [torch.Size([51, 3, 224, 224]), torch.Size([79, 3, 224, 224]), torch.Size([60, 3, 224, 224]), torch.Size([86, 3, 224, 224])]
assert type(videos) is list or videos.ndim == 5 # True
concat_frames = torch.cat([video for video in videos], dim=0) # torch.Size([79, 3, 336, 336])
# import pdb;pdb.set_trace()
frames_features = self.encode_frames(concat_frames) # torch.Size([276, 256, 4096]) torch.Size([79, 576, 4096])
split_sizes = [video.shape[0] for video in videos] # [51, 79, 60, 86]
frames_features = torch.split(frames_features, split_sizes, dim=0) # (torch.Size([51, 256, 4096]), torch.Size([79, 256, 4096]), torch.Size([60, 256, 4096]), torch.Size([86, 256, 4096]))
# import pdb;pdb.set_trace()
# frames_features = [x.flatten(0, 1) for x in frames_features]
key_frames_feature = []
for frame_feature in frames_features:
# import pdb;pdb.set_trace()
frame_feature = frame_feature.unsqueeze(0) # b 51 256 4096
frame_feature = frame_feature.permute(0,2,1,3) # b 256 51 4096
# short video
if frame_feature.shape[2] >= 12:
frame_feature = frames_conv(frame_feature) # torch.Size([1, 256, 4, 4096])
frame_feature = frame_feature.squeeze(0).permute(1,0,2) # torch.Size([4, 256, 4096])
# key_frames_feature.append(frame_feature[:6])
# import pdb;pdb.set_trace()
num_frames = frame_feature.shape[0]
key_frames_feature.append(frame_feature[::max(1,num_frames//5)][:6]) # v1.5 576 patch
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_video_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375])
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_frames_features = key_frames_feature[cur_video_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
# import pdb;pdb.set_trace()
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0)
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_video_idx += 1
# import pdb;pdb.set_trace()
# never enter it
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),)
cur_new_input_embeds = []
if labels is not None: # torch.Size([4, 375])
cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0: # 统计元素个数 1
# import pdb;pdb.set_trace()
# if cur_video_idx > len(key_frames_feature)-1:
# cur_frames_features = key_frames_feature[-1] # for gradio demo
# else:
cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096])
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OmniMetaModel:
def __init__(self, config):
super(OmniMetaModel, self).__init__(config)
# import pdb;pdb.set_trace()
if hasattr(config, "mm_vision_tower"): # train False, v1.5 continue finetune True
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
# import pdb;pdb.set_trace()
if hasattr(config, "mm_video_fuser"):
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1))
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 for exp1 test uncomment it
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None): # Train
vision_tower = model_args.vision_tower # 'openai/clip-vit-large-patch14'
mm_vision_select_layer = model_args.mm_vision_select_layer # -2
mm_vision_select_feature = model_args.mm_vision_select_feature # patch
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter # '/home/wanghao/weights/llava/llava-pretrain-vicuna-7b-v1.3/mm_projector.bin'
self.config.mm_vision_tower = vision_tower
# import pdb;pdb.set_trace()
# vision_tower = build_vision_tower(model_args)
if self.get_vision_tower() is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else: ## Implement continue finetuning.
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size # 1024
self.config.mm_vision_select_layer = mm_vision_select_layer # -2
self.config.mm_vision_select_feature = mm_vision_select_feature # patch
# self.mm_projector = build_vision_projector(self.config) # 1024->4096
if getattr(self, 'mm_projector', None) is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True
# import pdb;pdb.set_trace()
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} # weight:torch.Size([4096, 1024]) bias:torch.Size([4096])
# import pdb;pdb.set_trace()
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
# v1.5: mm_projector_weights['model.mm_projector.0.weight'].shape: torch.Size([4096, 1024])
# model.mm_projector.0.bias: torch.Size([4096]); model.mm_projector.2.weight: torch.Size([4096, 4096]); model.mm_projector.2.bias: torch.Size([4096])
if getattr(self, 'frames_conv', None) is None: ## Implement continue finetuning.
# self.frames_attn = MultiheadAttention(256*4096, num_heads)
# self.frames_conv = nn.Conv2d(4096, 4096, kernel_size=(12,1), stride=(10,1)) # b 4096 51 256
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
# self.keyframes_attn = MultiheadAttention(256*4096, num_heads)
# import pdb;pdb.set_trace()
self.config.mm_video_fuser = 'frames_conv'
class OmniMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_frames(self, frames):
frames_features = self.get_model().get_vision_tower()(frames) # torch.Size([276, 256, 1024])
frames_features = self.get_model().mm_projector(frames_features) # torch.Size([276, 256, 4096]) torch.float16
return frames_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, videos
):
vision_tower = self.get_vision_tower()
# import pdb;pdb.set_trace()
# frames_attn = self.get_model().frames_attn
frames_conv = self.get_model().frames_conv
# keyframes_attn = self.get_model().keyframes_attn
# import pdb;pdb.set_trace()
if vision_tower is None or videos is None or input_ids.shape[1] == 1: # False
if past_key_values is not None and vision_tower is not None and videos is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
# videos = [torch.Size([51, 3, 224, 224]), torch.Size([79, 3, 224, 224]), torch.Size([60, 3, 224, 224]), torch.Size([86, 3, 224, 224])]
assert type(videos) is list or videos.ndim == 5 # True
concat_frames = torch.cat([video for video in videos], dim=0) # torch.Size([79, 3, 336, 336])
# import pdb;pdb.set_trace()
frames_features = self.encode_frames(concat_frames) # torch.Size([276, 256, 4096]) torch.Size([79, 576, 4096])
split_sizes = [video.shape[0] for video in videos] # [51, 79, 60, 86]
frames_features = torch.split(frames_features, split_sizes, dim=0) # (torch.Size([51, 256, 4096]), torch.Size([79, 256, 4096]), torch.Size([60, 256, 4096]), torch.Size([86, 256, 4096]))
# import pdb;pdb.set_trace()
# frames_features = [x.flatten(0, 1) for x in frames_features]
key_frames_feature = []
for frame_feature in frames_features:
# import pdb;pdb.set_trace()
frame_feature = frame_feature.unsqueeze(0) # b 51 256 4096
frame_feature = frame_feature.permute(0,2,1,3) # b 256 51 4096
# short video
if frame_feature.shape[2] >= 12:
frame_feature = frames_conv(frame_feature) # torch.Size([1, 256, 4, 4096])
frame_feature = frame_feature.squeeze(0).permute(1,0,2) # torch.Size([4, 256, 4096])
# key_frames_feature.append(frame_feature[:6])
# import pdb;pdb.set_trace()
num_frames = frame_feature.shape[0]
key_frames_feature.append(frame_feature[::max(1,num_frames//5)][:6]) # v1.5 576 patch
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_video_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375])
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_frames_features = key_frames_feature[cur_video_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
# import pdb;pdb.set_trace()
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0)
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_video_idx += 1
# import pdb;pdb.set_trace()
# never enter it
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),)
cur_new_input_embeds = []
if labels is not None: # torch.Size([4, 375])
cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0: # 统计元素个数 1
# import pdb;pdb.set_trace()
# if cur_video_idx > len(key_frames_feature)-1:
# cur_frames_features = key_frames_feature[-1] # for gradio demo
# else:
cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096])
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start]) | cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) | 2 | 2023-12-05 08:02:17+00:00 | 4k |
RobertCsordas/moe_attention | layers/transformer/relative_transformer.py | [
{
"identifier": "ActivationFunction",
"path": "layers/transformer/transformer.py",
"snippet": "class TransformerEncoderLayer(torch.nn.Module):\nclass TransformerDecoderLayer(torch.nn.Module):\nclass TransformerDecoderBase(torch.nn.Module):\n class State:\nclass TransformerEncoder(torch.nn.Module):\nclass TransformerDecoder(TransformerDecoderBase):\nclass TransformerBase(torch.nn.Module):\nclass Transformer(TransformerBase):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor:\n def reset_parameters(self):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,\n memory_key_padding_mask: Optional[torch.Tensor] = None,\n full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:\n def reset_parameters(self):\n def __init__(self, d_model: int):\n def create_state(self, batch_size: int, max_length: int, device: torch.device) -> State:\n def one_step_forward(self, state: State, data: torch.Tensor, *args, **kwargs):\n def __init__(self, layer, n_layers: int, d_model, *args, **kwargs):\n def forward(self, data: torch.Tensor, *args, **kwargs):\n def __init__(self, layer, n_layers: int, d_model: int, *args, **kwargs):\n def forward(self, data: torch.Tensor, *args, **kwargs):\ndef TransformerEncoderWithLayer(layer: Type[torch.nn.Module] = TransformerEncoderLayer):\ndef TransformerDecoderWithLayer(layer: Type[torch.nn.Module] = TransformerDecoderLayer):\n def __init__(self, encoder: torch.nn.Module, decoder: torch.nn.Module):\n def forward(self, src: torch.Tensor, tgt: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,\n src_mask: Optional[AttentionMask] = None):\n def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6,\n dim_feedforward: int = 2048, dropout: float = 0.1, activation: ActivationFunction = F.relu,\n encoder_layer=TransformerEncoderWithLayer(), decoder_layer=TransformerDecoderWithLayer(),\n attention_dropout: float = 0):"
},
{
"identifier": "FixedRelativeMultiheadAttention",
"path": "layers/transformer/multi_head_relative_pos_attention.py",
"snippet": "def shift(posmat: torch.Tensor) -> torch.Tensor:\n def __init__(self, state_size: int, n_heads: int, dropout: float, projection_size: Optional[int] = None):\n def get_attention_scores(self, mask: Optional[torch.Tensor],\n q_content: torch.Tensor, k_content: torch.Tensor,\n q_pos: torch.Tensor, k_pos: torch.Tensor,\n pos_offset: int, ar_gate: Optional[torch.Tensor] = None) -> torch.Tensor:\n def _attention(self, mask: Optional[torch.Tensor],\n q_content: torch.Tensor, k_content: torch.Tensor,\n q_pos: torch.Tensor, k_pos: torch.Tensor,\n v: torch.Tensor, pos_offset: int,\n ar_gate: Optional[torch.Tensor] = None) -> [torch.Tensor, torch.Tensor]:\n def _get_pos_subset(self, pos_encoding: torch.Tensor, length: int, offset: int) -> torch.Tensor:\n def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, pos_clamp: Optional[int] = None,\n test_pos_clamp: Optional[int] = None):\n def _create_buffer(self, max_len: int, clamp: Optional[int] = None):\n def get_pos(self, l: int, offset: int) -> torch.Tensor:\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,\n global_content_bias: bool = True, input_size: Optional[int] = None, absolute_gate: bool = False,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, pos_clamp: Optional[int] = None,\n test_pos_clamp: Optional[int] = None):\n def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n def reset_parameters(self):\nclass RelativeAttentionBase(MultiHeadAttentionBase):\nclass FixedRelativeMultiheadAttentionBase(RelativeAttentionBase):\nclass FixedRelativeMultiheadAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):"
},
{
"identifier": "MultiHeadAttention",
"path": "layers/transformer/multi_head_attention.py",
"snippet": "class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.1, input_size: Optional[int] = None,\n out_size: Optional[int] = None):\n super(AbsPosAttentionBase, self).__init__(state_size, n_heads, dropout)\n\n self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)\n self.data_to_q = torch.nn.Linear(input_size or state_size, n_heads * self.projection_size, bias=False)\n\n super(MultiHeadAttention, self).__init__(out_size)\n self.reset_parameters()\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n need_weights: bool = False):\n # Input and output shape: [n_batch, n_steps, data_size]\n k, v = self.transform_data(attend_to, self.data_to_kv, 2)\n q, = self.transform_data(curr_state, self.data_to_q, 1)\n\n data, scores = self.merged_attention(curr_state.shape[0], q.shape[1], mask, q, k, v)\n if need_weights:\n return data, scores\n else:\n return data\n\n def reset_parameters(self):\n # super().reset_parameters()\n\n torch.nn.init.xavier_uniform_(self.data_to_q.weight)\n torch.nn.init.xavier_uniform_(self.data_to_kv.weight)\n torch.nn.init.xavier_uniform_(self.data_to_kv.weight)"
},
{
"identifier": "Transformer",
"path": "layers/transformer/transformer.py",
"snippet": "class Transformer(TransformerBase):\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6,\n dim_feedforward: int = 2048, dropout: float = 0.1, activation: ActivationFunction = F.relu,\n encoder_layer=TransformerEncoderWithLayer(), decoder_layer=TransformerDecoderWithLayer(),\n attention_dropout: float = 0):\n\n super().__init__(\n encoder_layer(num_encoder_layers, d_model, nhead, dim_feedforward, dropout, activation, attention_dropout),\n decoder_layer(num_decoder_layers, d_model, nhead, dim_feedforward, dropout, activation, attention_dropout))"
},
{
"identifier": "TransformerEncoderWithLayer",
"path": "layers/transformer/transformer.py",
"snippet": "def TransformerEncoderWithLayer(layer: Type[torch.nn.Module] = TransformerEncoderLayer):\n return lambda *args, **kwargs: TransformerEncoder(layer, *args, **kwargs)"
},
{
"identifier": "TransformerDecoderWithLayer",
"path": "layers/transformer/transformer.py",
"snippet": "def TransformerDecoderWithLayer(layer: Type[torch.nn.Module] = TransformerDecoderLayer):\n return lambda *args, **kwargs: TransformerDecoder(layer, *args, **kwargs)"
}
] | from typing import Optional
from .transformer import ActivationFunction
from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttention, AttentionMask
from .multi_head_attention import MultiHeadAttention
from .transformer import Transformer, TransformerEncoderWithLayer, TransformerDecoderWithLayer
import torch
import torch.nn
import torch.nn.functional as F | 3,039 |
class RelativeTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,
head_projection_size: Optional[int] = None, ln_after_attention: bool = True):
super().__init__()
self.ln_after_attention = ln_after_attention
self.self_attn = FixedRelativeMultiheadAttention(
d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
projection_size=head_projection_size)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
if ln_after_attention:
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,
pos_offset: Optional[int] = None) -> torch.Tensor:
src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)
src = src + self.dropout1(src2)
src = self.norm1(src) if self.ln_after_attention else src
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformerDecoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, drop_expand: bool = True):
super().__init__()
self.self_attn = FixedRelativeMultiheadAttention(d_model, nhead, dropout=attention_dropout)
self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
# Implementation of Feedforward model
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.norm3 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.dropout3 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,
memory_key_padding_mask: Optional[torch.Tensor] = None,
full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:
assert pos_offset == 0 or tgt_mask is None
tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=tgt_mask,
pos_offset=pos_offset)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformer(Transformer):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: ActivationFunction = F.relu, attention_dropout: float = 0):
super().__init__(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout, activation,
TransformerEncoderWithLayer(RelativeTransformerEncoderLayer),
|
class RelativeTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,
head_projection_size: Optional[int] = None, ln_after_attention: bool = True):
super().__init__()
self.ln_after_attention = ln_after_attention
self.self_attn = FixedRelativeMultiheadAttention(
d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
projection_size=head_projection_size)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
if ln_after_attention:
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,
pos_offset: Optional[int] = None) -> torch.Tensor:
src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)
src = src + self.dropout1(src2)
src = self.norm1(src) if self.ln_after_attention else src
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformerDecoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, drop_expand: bool = True):
super().__init__()
self.self_attn = FixedRelativeMultiheadAttention(d_model, nhead, dropout=attention_dropout)
self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
# Implementation of Feedforward model
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.norm3 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.dropout3 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,
memory_key_padding_mask: Optional[torch.Tensor] = None,
full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:
assert pos_offset == 0 or tgt_mask is None
tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=tgt_mask,
pos_offset=pos_offset)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformer(Transformer):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: ActivationFunction = F.relu, attention_dropout: float = 0):
super().__init__(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout, activation,
TransformerEncoderWithLayer(RelativeTransformerEncoderLayer), | TransformerDecoderWithLayer(RelativeTransformerDecoderLayer), attention_dropout) | 5 | 2023-12-13 08:45:02+00:00 | 4k |
riccardomusmeci/mlx-llm | src/mlx_llm/model/_factory.py | [
{
"identifier": "MODEL_ENTRYPOINTS",
"path": "src/mlx_llm/model/_registry.py",
"snippet": "MODEL_ENTRYPOINTS = {\n \"Phi2\": phi2,\n \"LLaMA-2-7B-chat\": llama_2_7B_chat,\n \"TinyLlama-1.1B-Chat-v0.6\": tiny_llama_chat_v06,\n # \"Mistral-7B-Instruct-v0.1\": mistral_7B_instruct_v01,\n \"Mistral-7B-Instruct-v0.2\": mistral_7B_instruct_v02,\n \"OpenHermes-2.5-Mistral-7B\": openhermes_25_mistral_7B,\n \"e5-mistral-7b-instruct\": e5_mistral_7b_instruct\n}"
},
{
"identifier": "load_weights",
"path": "src/mlx_llm/model/_utils.py",
"snippet": "def load_weights(\n model: nn.Module,\n weights: str,\n strict: bool = True,\n verbose: bool = False\n) -> nn.Module:\n \"\"\"Load weights from a given path.\n\n Args:\n model (nn.Module): a LLM model\n weights (str): path to weights\n strict (bool, optional): whether to strictly enforce that the keys in weights match the keys of the model. Defaults to True.\n verbose (bool, optional): whether to print information during loading. Defaults to False.\n\n Returns:\n nn.Module: an nn.Module with loaded weights\n \"\"\"\n \n assert os.path.exists(weights), f\"Weights path {weights} does not exist.\"\n \n if verbose: print(f\"> Loading weights from {weights}\")\n \n weights = list(mx.load(weights).items())\n \n new_state = dict(weights)\n # create a torch-like state dict { layer_name: weights }\n model_state = dict(tree_flatten(model.parameters()))\n \n # check if new_state does not have more keys\n extras = set(new_state.keys()) - set(model_state.keys())\n if extras:\n extras = \" \".join(list(extras))\n if strict:\n raise ValueError(f\"Found extra keys in weights file: {extras}\")\n else:\n if verbose: print(f\"\\t- [WARNING] Found extra keys in weights file: {extras}\")\n \n # check if new_state does not have less keys\n missing = set(model_state.keys()) - set(new_state.keys())\n if missing:\n missing = \" \".join(list(missing))\n if strict:\n raise ValueError(f\"Missing keys in weights file: {missing}\")\n else:\n if verbose: print(f\"\\t- [WARNING] Missing keys in weights file: {missing}\")\n \n for k, w in model_state.items():\n try:\n new_w = new_state[k]\n except KeyError:\n if strict:\n raise ValueError(f\"Missing key {k} in weights file\")\n else:\n if verbose: print(f\"\\t- [WARNING] Missing key {k} in weights file\")\n continue\n \n # checking if new_w is an mx.array first\n if not isinstance(new_w, mx.array):\n if strict:\n raise ValueError(f\"Expected mx.array for key {k}, got {type(new_w)}\")\n else:\n if verbose: print(f\"\\t- [WARNING] Expected mx.array for key {k}, got {type(new_w)}\")\n # checking if new_w has the same shape as w\n if new_w.shape != w.shape:\n if strict:\n raise ValueError(f\"Expected shape {w.shape} for key {k}, got {new_w.shape}\")\n else:\n if verbose: print(f\"\\t- [WARNING] Expected shape {w.shape} for key {k}, got {new_w.shape}\")\n \n model.update(tree_unflatten(weights))\n \n return model"
},
{
"identifier": "load_weights_from_hf",
"path": "src/mlx_llm/model/_utils.py",
"snippet": "def load_weights_from_hf(\n model: nn.Module,\n model_name: str,\n strict: bool = True,\n verbose: bool = False\n) -> nn.Module:\n \"\"\"Load weights from HuggingFace Hub.\n\n Args:\n model (nn.Module): an LLM model\n model_name (str): model namw\n strict (bool, optional): whether to strictly enforce that the keys in weights match the keys of the model. Defaults to True.\n verbose (bool, optional): whether to print information during loading. Defaults to False.\n\n Returns:\n nn.Module: an LLM with loaded weights\n \"\"\"\n try:\n repo_id = MODEL_WEIGHTS[model_name][\"repo_id\"]\n filename = MODEL_WEIGHTS[model_name][\"filename\"]\n weights_path = hf_hub_download(repo_id=repo_id, repo_type=\"model\", filename=filename)\n except Exception as e:\n print(f\"Error while downloading weights from HuggingFace Hub: {e}. Weights won't be loaded.\")\n weights_path = None\n \n if weights_path is not None:\n model = load_weights(\n model=model,\n weights=weights_path,\n strict=strict,\n verbose=verbose\n )\n return model "
}
] | from ._registry import MODEL_ENTRYPOINTS
from typing import Optional, Tuple, Union
from ._utils import load_weights, load_weights_from_hf
import mlx.nn as nn | 1,641 |
__all__ = ["list_models", "create_model"]
def list_models() -> None:
"""List all available LLM models.
"""
print("Available models:")
for model_name in list(MODEL_ENTRYPOINTS.keys()):
print(f"\t- {model_name}")
def create_model(model_name: str, weights: Union[str, bool] = True, strict: bool = False, verbose: bool = False) -> nn.Module:
"""Create a LLM model.
Example:
```
>>> from mlx_llm.model import create_model
>>> # Create a Phi2 model with no pretrained weights.
>>> model = create_model('Phi2')
>>> # Create a Phi2 model with pretrained weights from HF.
>>> model = create_model('Phi2', weights=True)
>>> # Create a Phi2 model with custom weights.
>>> model = create_model('Phi2', weights="path/to/weights.npz")
```
Args:
model_name (str): model name
weights (Union[str, bool]): if True, load pretrained weights from HF. If str, load weights from the given path. Defaults to True.
strict (bool, optional): whether to strictly enforce that the keys in weights match the keys of the model. Defaults to False.
verbose (bool, optional): whether to print the model summary. Defaults to False.
Returns:
nn.Module: a LLM model
Raises:
ValueError: Unknown model name
"""
if model_name not in MODEL_ENTRYPOINTS:
raise ValueError(f"Unknown model name: {model_name}.")
model = MODEL_ENTRYPOINTS[model_name]()
if weights and isinstance(weights, bool):
model = load_weights_from_hf(
model=model,
model_name=model_name,
strict=strict,
verbose=verbose
)
elif isinstance(weights, str):
|
__all__ = ["list_models", "create_model"]
def list_models() -> None:
"""List all available LLM models.
"""
print("Available models:")
for model_name in list(MODEL_ENTRYPOINTS.keys()):
print(f"\t- {model_name}")
def create_model(model_name: str, weights: Union[str, bool] = True, strict: bool = False, verbose: bool = False) -> nn.Module:
"""Create a LLM model.
Example:
```
>>> from mlx_llm.model import create_model
>>> # Create a Phi2 model with no pretrained weights.
>>> model = create_model('Phi2')
>>> # Create a Phi2 model with pretrained weights from HF.
>>> model = create_model('Phi2', weights=True)
>>> # Create a Phi2 model with custom weights.
>>> model = create_model('Phi2', weights="path/to/weights.npz")
```
Args:
model_name (str): model name
weights (Union[str, bool]): if True, load pretrained weights from HF. If str, load weights from the given path. Defaults to True.
strict (bool, optional): whether to strictly enforce that the keys in weights match the keys of the model. Defaults to False.
verbose (bool, optional): whether to print the model summary. Defaults to False.
Returns:
nn.Module: a LLM model
Raises:
ValueError: Unknown model name
"""
if model_name not in MODEL_ENTRYPOINTS:
raise ValueError(f"Unknown model name: {model_name}.")
model = MODEL_ENTRYPOINTS[model_name]()
if weights and isinstance(weights, bool):
model = load_weights_from_hf(
model=model,
model_name=model_name,
strict=strict,
verbose=verbose
)
elif isinstance(weights, str): | model = load_weights( | 1 | 2023-12-07 16:19:47+00:00 | 4k |
xetdata/xetcache | xetcache/xetmemo_kernel_extension.py | [
{
"identifier": "hash_anything",
"path": "xetcache/util.py",
"snippet": "def hash_anything(x):\n return hashlib.sha256(pickle.dumps(x)).hexdigest()"
},
{
"identifier": "probe_memo",
"path": "xetcache/util.py",
"snippet": "def probe_memo(memopath, inputhashstr, key=None):\n \"\"\"\n Locate the memo from the provided input.\n \"\"\"\n memo_file = inputhashstr + '.pickle'\n if key is None:\n full_memo_file = os.path.join(memopath, inputhashstr + '.pickle')\n else:\n key = str(key)\n full_memo_file = os.path.join(memopath, key, inputhashstr + '.pickle')\n if full_memo_file.startswith(\"xet://\"):\n try:\n openfile = fsspec.open(full_memo_file, 'rb')\n fbytestr = None\n with openfile as f:\n print(f\"Loading from {memo_file}\")\n # reading from a string first will avoid potential tiny\n # reads that are extraordinarily slow\n fbytestr = f.read()\n result = pickle.loads(fbytestr)\n return result\n except Exception as e:\n if str(\"404 Not Found\") in str(e):\n return None\n print(f'Failed to load: {e}')\n return None\n elif os.path.exists(full_memo_file):\n if file_is_pointer_file(full_memo_file):\n materialized = materialize_pointer_file(full_memo_file)\n else:\n materialized = True\n if materialized:\n with open(full_memo_file, 'rb') as f:\n print(f\"Loading from {memo_file}\")\n result = pickle.load(f)\n return result\n return None"
},
{
"identifier": "store_memo",
"path": "xetcache/util.py",
"snippet": "def store_memo(memopath, inputhashstr, store, key):\n \"\"\"\n Locate the memo from the provided input.\n \"\"\"\n memo_file = inputhashstr + '.pickle'\n if key is None:\n full_memo_file = os.path.join(memopath, inputhashstr + '.pickle')\n else:\n key = str(key)\n full_memo_file = os.path.join(memopath, key, inputhashstr + '.pickle')\n memopath = os.path.join(memopath, key)\n if full_memo_file.startswith(\"xet://\"):\n fs = fsspec.filesystem(\"xet\")\n with fs.transaction:\n openfile = fsspec.open(full_memo_file, 'wb')\n with openfile as f:\n print(f\"Writing to {memo_file}\")\n pickle.dump(store, f)\n else:\n os.makedirs(memopath, exist_ok=True)\n with open(full_memo_file, 'wb') as f:\n print(f\"Writing to {memo_file}\")\n pickle.dump(store, f)\n return None"
},
{
"identifier": "get_memo_path",
"path": "xetcache/config.py",
"snippet": "def get_memo_path():\n \"\"\"\n Reads the current memo path\n \"\"\"\n return _MEMOPATH"
},
{
"identifier": "get_runtime_threshold",
"path": "xetcache/config.py",
"snippet": "def get_runtime_threshold():\n \"\"\"\n Reads the current runtime threshold in seconds. \n Only functions or cells which run longer than this will be cached.\n \"\"\"\n return _RUNTIME_THRESHOLD_SEC"
}
] | import os
import time
from .util import hash_anything, probe_memo, store_memo
from .config import get_memo_path, get_runtime_threshold
from IPython.core.magic import Magics, magics_class, cell_magic | 1,730 |
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line
inputhashes = [hash_anything(line), hash_anything(cell)]
for i in inputvars:
try:
var = ip.ev(i)
except Exception as e:
print(f"Unable to read variable {i}. Error {e}")
return
try:
h = hash_anything(var)
except Exception as e:
print(f"Unable to hash variable {i}. Error {e}")
return
inputhashes.append(h)
# Then we hash the list of hashes and use that as the filename
inputhashstr = hash_anything(inputhashes)
memopath = get_memo_path()
runtime_threshold = get_runtime_threshold()
try:
retrieved_vals = probe_memo(memopath, inputhashstr, key)
if retrieved_vals is not None:
keys = retrieved_vals.keys()
print(f"Retrieving variables {list(keys)}")
for k, v in retrieved_vals.items():
ip.user_ns[k] = v
return
except Exception as e:
print(f"Unable to load from memo from {memopath}: {e}")
print("Executing the cell normally")
start_time = time.time()
ret = ip.run_cell(cell)
elapsed_time = time.time() - start_time
if ret.success and (always or elapsed_time > runtime_threshold):
try:
storedict = {}
for v in outputvars:
if v not in ip.user_ns:
print(f"{v} not found in scope. Error in specification. Not memoizing.")
return
storedict[v] = ip.user_ns[v]
|
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line
inputhashes = [hash_anything(line), hash_anything(cell)]
for i in inputvars:
try:
var = ip.ev(i)
except Exception as e:
print(f"Unable to read variable {i}. Error {e}")
return
try:
h = hash_anything(var)
except Exception as e:
print(f"Unable to hash variable {i}. Error {e}")
return
inputhashes.append(h)
# Then we hash the list of hashes and use that as the filename
inputhashstr = hash_anything(inputhashes)
memopath = get_memo_path()
runtime_threshold = get_runtime_threshold()
try:
retrieved_vals = probe_memo(memopath, inputhashstr, key)
if retrieved_vals is not None:
keys = retrieved_vals.keys()
print(f"Retrieving variables {list(keys)}")
for k, v in retrieved_vals.items():
ip.user_ns[k] = v
return
except Exception as e:
print(f"Unable to load from memo from {memopath}: {e}")
print("Executing the cell normally")
start_time = time.time()
ret = ip.run_cell(cell)
elapsed_time = time.time() - start_time
if ret.success and (always or elapsed_time > runtime_threshold):
try:
storedict = {}
for v in outputvars:
if v not in ip.user_ns:
print(f"{v} not found in scope. Error in specification. Not memoizing.")
return
storedict[v] = ip.user_ns[v] | store_memo(memopath, inputhashstr, storedict, key) | 2 | 2023-12-05 21:59:08+00:00 | 4k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.