repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
kkchara/get-beastsaber-score
plug/plugins/beatsaber/fun/main.py
[ { "identifier": "make_pic", "path": "plug/plugins/beatsaber/fun/make_pic.py", "snippet": "def make_pic(account, up=\"get-beastsaber-score/plugins/beatsaber/\"):\n # 账户\n acc = str(account)\n\n # 获取用户信息\n with open(f\"{up}data/info/user.json\", \"r\", encoding=\"utf-8\") as f:\n user_dict = json.load(f)\n\n # 获取hash-key对应表\n with open(f\"{up}data/info/hash_key.json\", \"r\", encoding=\"utf-8\") as f:\n hash_key_dict = json.load(f)\n\n # 获取歌曲信息\n with open(f\"{up}data/info/page1.json\", \"r\", encoding=\"utf-8\") as f:\n page1_dict = json.load(f)\n\n with open(f\"{up}data/info/page2.json\", \"r\", encoding=\"utf-8\") as f:\n page2_dict = json.load(f)\n\n with open(f\"{up}data/info/page3.json\", \"r\", encoding=\"utf-8\") as f:\n page3_dict = json.load(f)\n\n with open(f\"{up}data/info/page4.json\", \"r\", encoding=\"utf-8\") as f:\n page4_dict = json.load(f)\n\n # 页面列表\n page_list = [page1_dict, page2_dict, page3_dict, page4_dict]\n\n # 字体\n font_name = ImageFont.truetype(font=f'{up}data/font/STHUPO.TTF', size=165)\n font_song = ImageFont.truetype(font=f'{up}data/font/impact.ttf', size=34)\n font_pp = ImageFont.truetype(font=f'{up}data/font/impact.ttf', size=145)\n font_rank = ImageFont.truetype(font=f'{up}data/font/FZYTK.TTF', size=28)\n\n # 创建一个空白图像\n image = Image.new('RGBA', (2560, 1700), color=(160, 160, 160, 0))\n\n # 整体圆角蒙版效果\n # image.paste(Image.open(f\"{up}data/img/upper_musk.png\"), (0, 0))\n # 绘图对象\n draw = ImageDraw.Draw(image)\n\n # 数据块规格\n y_start_height = 560\n x_start_weight = 120\n y_end_height = 100\n x_end_weight = 120\n block_height = 140\n block_weight = 420\n between_h = 40\n between_w = 62\n head_height = 360\n head_weight = 1400\n y_head_start_height = (y_start_height - head_height) // 2\n x_head_start_weight = x_start_weight\n\n # 头部色块\n image_head = Image.new('RGBA', (head_weight, head_height), color=(0, 0, 0, 160))\n # 歌曲块色块\n image_fill = Image.new('RGBA', (block_weight, block_height), color=(0, 0, 0, 225))\n\n # 构建背景\n if os.path.exists(f\"{up}data/img/back/{acc}.jpg\"):\n image.paste(Image.open(f\"{up}data/img/back/{acc}.jpg\"), (0, 0))\n else:\n image.paste(Image.open(f\"{up}data/img/back/default.jpg\"), (0, 0))\n\n # 构建头部\n image.paste(image_head, (\n x_head_start_weight, y_head_start_height, x_head_start_weight + head_weight, y_head_start_height + head_height),\n Image.open(f\"{up}data/img/musk/head_musk.png\"))\n # 构建头像\n image.paste(Image.open(f\"{up}data/img/avatar.jpg\"), (\n x_head_start_weight, y_head_start_height, x_head_start_weight + head_height, y_head_start_height + head_height),\n Image.open(f\"{up}data/img/musk/avatar_musk.png\"))\n\n # 写名字\n draw.text(xy=(x_head_start_weight + head_height + 30, y_head_start_height + 25), text=user_dict[\"name\"],\n fill=(200, 255, 255),\n font=font_name)\n\n # 写pp\n user_pp = user_dict[\"pp\"]\n draw.text(xy=(x_head_start_weight + head_height + 30, y_head_start_height + 40 + 140),\n text=str(\"%.2f\" % user_pp) + \" PP\",\n fill=(0, 50, 80),\n font=font_pp)\n\n # 写国家和排名\n\n # 装饰\n # image.paste(Image.open(f\"data/img/DLX1.png\"), (1260, 150), Image.open(f\"data/img/DLX1.png\"))\n\n # 循环构建歌曲块\n page = 0\n count = 0\n\n # 注意先竖后横\n while x_start_weight < 2560 - x_end_weight:\n y_start_height = 560\n while y_start_height < 1700 - y_end_height:\n\n # 当前歌曲json\n if count <= 7:\n current_page = page_list[page]\n else:\n page += 1\n count = 0\n current_page = page_list[page]\n current_song = current_page[\"playerScores\"][count]\n count += 1\n # 构建圆角半透明底板\n image.paste(image_fill,\n (x_start_weight, y_start_height, x_start_weight + block_weight, y_start_height + block_height),\n Image.open(f\"{up}data/img/musk/song_musk.png\"))\n # 构建封面\n path = current_song[\"leaderboard\"][\"songHash\"]\n image.paste(Image.open(f\"{up}data/info/cover//{path}.png\"),\n (x_start_weight, y_start_height, x_start_weight + block_height, y_start_height + block_height),\n Image.open(f\"{up}data/img/musk/cover_musk.png\"))\n # 等级\n # if current_song[\"score\"][\"level\"] == 0:\n # image.paste(Image.open(f\"{up}data/img/Rank_SS.png\"),\n # (x_start_weight - 40, y_start_height - 40),\n # Image.open(f\"{up}data/img/Rank_SS.png\"))\n\n # 难度\n\n # 是否fc\n if current_song[\"score\"][\"fullCombo\"]:\n image.paste(Image.open(f\"{up}data/img/FC.png\"),\n (x_start_weight - 120 + block_weight - 10, y_start_height - 52 + block_height),\n Image.open(f\"{up}data/img/FC.png\"))\n\n # 文字部分\n # 歌名,超出17字符长度的部分要截去\n song_name = current_song[\"leaderboard\"][\"songName\"]\n re_song_name = r\"(.+)\\s[^\\s]+\"\n while len(song_name) > 17:\n song_name = re.search(re_song_name, song_name).group(1)\n # print(song_name)\n continue\n draw.text(xy=(x_start_weight + block_height + 25, y_start_height + 10),\n text=song_name,\n fill=(220, 220, 255), font=font_song)\n # rank\n draw.text(xy=(x_start_weight + block_height + 25, y_start_height + 52),\n text=\"RANK \" + str(current_song[\"leaderboard\"][\"stars\"]),\n fill=(10, 90, 210),\n font=font_rank)\n # 准度\n # draw.text(xy=(x_start_weight + block_height + 170, y_start_height + 55), text=info_list[4][4] + \"%\", fill=(0, 160, 160),\n # font=font2)\n\n # key\n draw.text(xy=(x_start_weight + block_height + 170, y_start_height + 52), text=\"! \" + hash_key_dict[current_song[\"leaderboard\"][\"songHash\"]],\n fill=(46, 210, 231),\n font=font_rank)\n\n # 歌曲pp\n pp = current_song[\"score\"][\"pp\"]\n draw.text(xy=(x_start_weight + block_height + 25, y_start_height + 85),\n text=str(\"%.2f\" % pp) + \"PP\", fill=(280, 160, 280),\n font=font_song)\n\n y_start_height += block_height + between_h\n x_start_weight += block_weight + between_w\n\n image.save(rf'{up}data/img/score.png')\n image.show()" }, { "identifier": "get_info", "path": "plug/plugins/beatsaber/fun/get_info.py", "snippet": "def get_json(acc, up=\"get-beastsaber-score/plugins/beatsaber/\"):\ndef get_avatar(scc, up=\"get-beastsaber-score/plugins/beatsaber/\"):\ndef get_hash(up=\"get-beastsaber-score/plugins/beatsaber/\"):\ndef get_cover(hash_list, up=\"get-beastsaber-score/plugins/beatsaber/\"):\ndef get_key(hash_list: list, up=\"get-beastsaber-score/plugins/beatsaber/\"):" } ]
from plug.plugins.beatsaber.fun import make_pic from plug.plugins.beatsaber.fun import get_info
2,348
acc = input("your SS account:") get_info.get_json(acc) # 提取hash列表 hash_list = get_info.get_hash() # 获取key get_info.get_key(hash_list) # 处理用户头像 get_info.get_avatar(acc) # 处理歌曲封面 get_info.get_cover(hash_list) # 生成图片
acc = input("your SS account:") get_info.get_json(acc) # 提取hash列表 hash_list = get_info.get_hash() # 获取key get_info.get_key(hash_list) # 处理用户头像 get_info.get_avatar(acc) # 处理歌曲封面 get_info.get_cover(hash_list) # 生成图片
make_pic.make_pic(acc)
0
2023-12-05 09:36:30+00:00
4k
lbcb-sci/GNNome
graph_dataset.py
[ { "identifier": "get_config", "path": "config.py", "snippet": "def get_config():\n return {\n 'checkpoints_path': 'checkpoints',\n 'models_path': 'models',\n \n 'tool_dir': 'vendor',\n 'raven_dir': 'vendor/raven-1.8.1',\n 'hifiasm_dir': 'vendor/hifiasm-0.18.8',\n 'pbsim3_dir': 'vendor/pbsim3',\n \n 'sample_profile_id': '',\n 'sample_file': '',\n 'sequencing_depth': 60,\n }" }, { "identifier": "preprocess_graph", "path": "utils.py", "snippet": "def preprocess_graph(g, data_path, idx):\n g = g.int()\n g.ndata['x'] = torch.ones(g.num_nodes(), 1)\n ol_len = g.edata['overlap_length'].float()\n ol_sim = g.edata['overlap_similarity']\n ol_len = (ol_len - ol_len.mean()) / ol_len.std()\n if get_hyperparameters()['use_similarities']:\n g.edata['e'] = torch.cat((ol_len.unsqueeze(-1), ol_sim.unsqueeze(-1)), dim=1)\n else:\n g.edata['e'] = ol_len.unsqueeze(-1)\n return g" }, { "identifier": "add_positional_encoding", "path": "utils.py", "snippet": "def add_positional_encoding(g):\n \"\"\"\n Initializing positional encoding with k-RW-PE\n \"\"\"\n\n g.ndata['in_deg'] = g.in_degrees().float()\n g.ndata['out_deg'] = g.out_degrees().float()\n \n pe_dim = get_hyperparameters()['nb_pos_enc']\n pe_type = get_hyperparameters()['type_pos_enc']\n \n if pe_dim == 0:\n return g\n\n if pe_type == 'RW':\n # Geometric diffusion features with Random Walk\n A = g.adjacency_matrix(scipy_fmt=\"csr\")\n Dinv = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -1.0, dtype=float) # D^-1\n RW = A @ Dinv \n M = RW\n # Iterate\n PE = [torch.from_numpy(M.diagonal()).float()]\n M_power = M\n for _ in range(pe_dim-1):\n M_power = M_power @ M\n PE.append(torch.from_numpy(M_power.diagonal()).float())\n PE = torch.stack(PE,dim=-1)\n g.ndata['pe'] = PE \n\n if pe_type == 'PR':\n # k-step PageRank features\n A = g.adjacency_matrix(scipy_fmt=\"csr\")\n D = A.sum(axis=1) # out degree\n Dinv = 1./ (D+1e-9); Dinv[D<1e-9] = 0 # take care of nodes without outgoing edges\n Dinv = sp.diags(np.squeeze(np.asarray(Dinv)), dtype=float) # D^-1 \n P = (Dinv @ A).T \n n = A.shape[0]\n One = np.ones([n])\n x = One/ n\n PE = [] \n alpha = 0.95 \n for _ in range(pe_dim): \n x = alpha* P.dot(x) + (1.0-alpha)/n* One \n PE.append(torch.from_numpy(x).float())\n PE = torch.stack(PE,dim=-1)\n g.ndata['pe'] = PE \n\n return g" }, { "identifier": "extract_contigs", "path": "utils.py", "snippet": "def extract_contigs(path, idx):\n gfa_path = os.path.join(path, f'{idx}_asm.bp.p_ctg.gfa')\n asm_path = os.path.join(path, f'{idx}_assembly.fasta')\n contigs = []\n with open(gfa_path) as f:\n n = 0\n for line in f.readlines():\n line = line.strip()\n if line[0] != 'S':\n continue\n seq=Seq.Seq(line.split()[2])\n ctg = SeqIO.SeqRecord(seq, description=f'contig_{n}', id=f'contig_{n}')\n contigs.append(ctg)\n n += 1\n SeqIO.write(contigs, asm_path, 'fasta')\n subprocess.run(f'rm {path}/{idx}_asm*', shell=True)\n # subprocess.run(f'rm {path}/output.csv', shell=True)" } ]
import re import os import pickle import subprocess import dgl import graph_parser from dgl.data import DGLDataset from config import get_config from utils import preprocess_graph, add_positional_encoding, extract_contigs
2,259
class AssemblyGraphDataset(DGLDataset): def __init__(self, root, assembler, threads=32, generate=False): self.root = os.path.abspath(root) self.assembler = assembler self.threads = threads self.assembly_dir = os.path.join(self.root, self.assembler) # print(self.assembly_dir) if 'raw' not in os.listdir(self.root): subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root) if 'output' not in os.listdir(self.assembly_dir): subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir) if f'processed' not in os.listdir(self.assembly_dir): subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir) if f'info' not in os.listdir(self.assembly_dir): subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir) raw_dir = os.path.join(self.root, 'raw') save_dir = os.path.join(self.assembly_dir, f'processed') self.output_dir = os.path.join(self.assembly_dir, f'output') self.info_dir = os.path.join(self.assembly_dir, f'info') config = get_config() raven_dir = config['raven_dir'] self.raven_path = os.path.join(raven_dir, f'build/bin/raven') self.raven_path = os.path.abspath(self.raven_path) hifiasm_dir = config['hifiasm_dir'] self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm') self.hifiasm_path = os.path.abspath(self.hifiasm_path) super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir) self.graph_list = [] if not generate: for file in os.listdir(self.save_dir): idx = int(file[:-4]) graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0] graph = preprocess_graph(graph, self.root, idx) graph = add_positional_encoding(graph) print(f'DGL graph idx={idx} info:\n',graph) self.graph_list.append((idx, graph)) self.graph_list.sort(key=lambda x: x[0]) def has_cache(self): """Check if the raw data is already processed and stored.""" raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)} return len(raw_files - prc_files) == 0 # set difference def __len__(self): return len(os.listdir(self.save_dir)) def __getitem__(self, idx): i, graph = self.graph_list[idx] return i, graph def process(self): pass class AssemblyGraphDataset_HiFi(AssemblyGraphDataset): def __init__(self, root, assembler='hifiasm', threads=32, generate=False): super().__init__(root=root, assembler=assembler, threads=threads, generate=generate) def process(self): """Process the raw data and save it on the disk.""" assembler = 'hifiasm' assert assembler in ('raven', 'hifiasm'), 'Choose either "raven" or "hifiasm" assembler' graphia_dir = os.path.join(self.assembly_dir, 'graphia') if not os.path.isdir(graphia_dir): os.mkdir(graphia_dir) raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)} diff = raw_files - prc_files for cnt, idx in enumerate(diff): fastq = f'{idx}.fasta' if fastq not in os.listdir(self.raw_dir): fastq = f'{idx}.fastq' print(f'\nStep {cnt}: generating graphs for reads in {fastq}') reads_path = os.path.abspath(os.path.join(self.raw_dir, fastq)) print(f'Path to the reads: {reads_path}') print(f'Using assembler: {assembler}\n') # Raven if assembler == 'raven': subprocess.run(f'{self.raven_path} --disable-checkpoints --identity 0.99 -k29 -w9 -t{self.threads} -p0 {reads_path} > {idx}_assembly.fasta', shell=True, cwd=self.output_dir) subprocess.run(f'mv graph_1.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir) gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa') # Hifiasm elif assembler == 'hifiasm': subprocess.run(f'{self.hifiasm_path} --prt-raw -o {idx}_asm -t{self.threads} -l0 {reads_path}', shell=True, cwd=self.output_dir) subprocess.run(f'mv {idx}_asm.bp.raw.r_utg.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir) gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa')
class AssemblyGraphDataset(DGLDataset): def __init__(self, root, assembler, threads=32, generate=False): self.root = os.path.abspath(root) self.assembler = assembler self.threads = threads self.assembly_dir = os.path.join(self.root, self.assembler) # print(self.assembly_dir) if 'raw' not in os.listdir(self.root): subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root) if 'output' not in os.listdir(self.assembly_dir): subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir) if f'processed' not in os.listdir(self.assembly_dir): subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir) if f'info' not in os.listdir(self.assembly_dir): subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir) raw_dir = os.path.join(self.root, 'raw') save_dir = os.path.join(self.assembly_dir, f'processed') self.output_dir = os.path.join(self.assembly_dir, f'output') self.info_dir = os.path.join(self.assembly_dir, f'info') config = get_config() raven_dir = config['raven_dir'] self.raven_path = os.path.join(raven_dir, f'build/bin/raven') self.raven_path = os.path.abspath(self.raven_path) hifiasm_dir = config['hifiasm_dir'] self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm') self.hifiasm_path = os.path.abspath(self.hifiasm_path) super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir) self.graph_list = [] if not generate: for file in os.listdir(self.save_dir): idx = int(file[:-4]) graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0] graph = preprocess_graph(graph, self.root, idx) graph = add_positional_encoding(graph) print(f'DGL graph idx={idx} info:\n',graph) self.graph_list.append((idx, graph)) self.graph_list.sort(key=lambda x: x[0]) def has_cache(self): """Check if the raw data is already processed and stored.""" raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)} return len(raw_files - prc_files) == 0 # set difference def __len__(self): return len(os.listdir(self.save_dir)) def __getitem__(self, idx): i, graph = self.graph_list[idx] return i, graph def process(self): pass class AssemblyGraphDataset_HiFi(AssemblyGraphDataset): def __init__(self, root, assembler='hifiasm', threads=32, generate=False): super().__init__(root=root, assembler=assembler, threads=threads, generate=generate) def process(self): """Process the raw data and save it on the disk.""" assembler = 'hifiasm' assert assembler in ('raven', 'hifiasm'), 'Choose either "raven" or "hifiasm" assembler' graphia_dir = os.path.join(self.assembly_dir, 'graphia') if not os.path.isdir(graphia_dir): os.mkdir(graphia_dir) raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)} diff = raw_files - prc_files for cnt, idx in enumerate(diff): fastq = f'{idx}.fasta' if fastq not in os.listdir(self.raw_dir): fastq = f'{idx}.fastq' print(f'\nStep {cnt}: generating graphs for reads in {fastq}') reads_path = os.path.abspath(os.path.join(self.raw_dir, fastq)) print(f'Path to the reads: {reads_path}') print(f'Using assembler: {assembler}\n') # Raven if assembler == 'raven': subprocess.run(f'{self.raven_path} --disable-checkpoints --identity 0.99 -k29 -w9 -t{self.threads} -p0 {reads_path} > {idx}_assembly.fasta', shell=True, cwd=self.output_dir) subprocess.run(f'mv graph_1.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir) gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa') # Hifiasm elif assembler == 'hifiasm': subprocess.run(f'{self.hifiasm_path} --prt-raw -o {idx}_asm -t{self.threads} -l0 {reads_path}', shell=True, cwd=self.output_dir) subprocess.run(f'mv {idx}_asm.bp.raw.r_utg.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir) gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa')
extract_contigs(self.output_dir, idx)
3
2023-12-08 04:45:45+00:00
4k
cubesat-lab/cc1101-spi-analyzer
HighLevelAnalyzer.py
[ { "identifier": "CC1101SpiProtocol", "path": "CC1101SpiProtocol.py", "snippet": "class CC1101SpiProtocol:\n PROTOCOL_MSG = {\n \"request\": None,\n \"response\": None,\n }\n REQUEST = {\n \"type\": None,\n \"access\": None,\n \"burst\": None,\n \"register\": None,\n \"data\": None,\n \"description\": None,\n \"error\": None,\n }\n RESPONSE = {\n \"status\": None,\n \"data\": None,\n \"error\": None,\n }\n STATUS = {\n \"chip_rdy\": None,\n \"state\": None,\n \"fifo_bytes_available\": None,\n }\n\n def __init__(self):\n pass\n\n def process_frame(self, protocol_frame):\n protocol_msg = deepcopy(self.PROTOCOL_MSG)\n\n if len(protocol_frame) > 0:\n # Interpret Request\n protocol_msg[\"request\"] = self.interpret_request(self.get_mosi_data(protocol_frame))\n\n if self.is_read_access(protocol_frame):\n # Interpret Response\n protocol_msg[\"response\"] = self.interpret_response(self.get_miso_data(protocol_frame))\n return protocol_msg\n\n def is_read_access(self, protocol_frame):\n return (protocol_frame[0][\"mosi\"] & 0x80) != 0\n\n def get_mosi_data(self, protocol_frame):\n return [x[\"mosi\"] for x in protocol_frame]\n\n def get_miso_data(self, protocol_frame):\n return [x[\"miso\"] for x in protocol_frame]\n\n def is_read(self, data_byte):\n return True if (data_byte & 0x80) != 0 else False\n\n def is_write(self, data_byte):\n return True if (data_byte & 0x80) == 0 else False\n\n def is_burst(self, data_byte):\n return True if (data_byte & 0x40) != 0 else False\n\n def interpret_register(self, data_byte):\n register = None\n address = data_byte & 0x3F\n frame_type = None\n error = None\n\n if address < 0x30:\n frame_type = ProtocolFrameType.REGISTER\n register = CONFIG_REGISTERS[address]\n elif address == 0x3E:\n frame_type = ProtocolFrameType.PA_TABLE\n register = MULTI_BYTE_REGISTERS[address]\n elif address == 0x3F:\n frame_type = ProtocolFrameType.FIFO\n register = MULTI_BYTE_REGISTERS[address]\n elif self.is_read(data_byte) and self.is_burst(data_byte):\n frame_type = ProtocolFrameType.STATUS\n register = STATUS_REGISTERS[address]\n elif address <= 0x3D:\n if address != 0x37:\n frame_type = ProtocolFrameType.COMMAND\n register = COMMAND_REGISTERS[address]\n else:\n frame_type = ProtocolFrameType.ERROR\n error = \"Invalid COMMAND\"\n elif address > 0x3D:\n frame_type = ProtocolFrameType.ERROR\n error = \"Invalid ADDRESS\"\n\n return frame_type, register[\"register\"], register[\"description\"], error\n\n def interpret_request(self, data):\n request = deepcopy(self.REQUEST)\n\n # Access mode\n request[\"access\"] = \"W\" if self.is_write(data[0]) else \"R\"\n request[\"burst\"] = \"B\" if self.is_burst(data[0]) else \"\"\n\n # Register address\n request[\"type\"], request[\"register\"], request[\"description\"], request[\"error\"] = self.interpret_register(data[0])\n\n # Data Byte\n if len(data) > 1:\n request[\"data\"] = data[1:]\n\n return request\n\n def interpret_status(self, status_byte):\n status = deepcopy(self.STATUS)\n status[\"chip_rdy\"] = False if (status_byte & 0x80) != 0 else True\n status[\"state\"] = STATE_BITS[(status_byte & 0x70) >> 4][\"state\"]\n status[\"fifo_bytes_available\"] = (status_byte & 0x0F)\n return status\n\n def interpret_response(self, data):\n response = deepcopy(self.RESPONSE)\n\n # Status Byte\n response[\"status\"] = self.interpret_status(data[0])\n\n # Data byte\n response[\"data\"] = data[1:]\n\n return response" }, { "identifier": "ProtocolFrameType", "path": "CC1101SpiProtocol.py", "snippet": "class ProtocolFrameType:\n REGISTER = \"register\"\n COMMAND = \"cmd\"\n STATUS = \"status\"\n PA_TABLE = \"pa table\"\n FIFO = \"fifo\"\n ERROR = \"protocol error\"" }, { "identifier": "MARC_STATE", "path": "CC1101SpiProtocol.py", "snippet": "MARC_STATE = {\n # Value: {State name, State (Figure 25, page 50) }\n 0x00: {\"state_name\": \"SLEEP\", \"state\": \"SLEEP\"},\n 0x01: {\"state_name\": \"IDLE\", \"state\": \"IDLE\"},\n 0x02: {\"state_name\": \"XOFF\", \"state\": \"XOFF\"},\n 0x03: {\"state_name\": \"VCOON_MC\", \"state\": \"MANCAL\"},\n 0x04: {\"state_name\": \"REGON_MC\", \"state\": \"MANCAL\"},\n 0x05: {\"state_name\": \"MANCAL\", \"state\": \"MANCAL\"},\n 0x06: {\"state_name\": \"VCOON\", \"state\": \"FS_WAKEUP\"},\n 0x07: {\"state_name\": \"REGON\", \"state\": \"FS_WAKEUP\"},\n 0x08: {\"state_name\": \"STARTCAL\", \"state\": \"CALIBRATE\"},\n 0x09: {\"state_name\": \"BWBOOST\", \"state\": \"SETTLING\"},\n 0x0A: {\"state_name\": \"FS_LOCK\", \"state\": \"SETTLING\"},\n 0x0B: {\"state_name\": \"IFADCON\", \"state\": \"SETTLING\"},\n 0x0C: {\"state_name\": \"ENDCAL\", \"state\": \"CALIBRATE\"},\n 0x0D: {\"state_name\": \"RX\", \"state\": \"RX\"},\n 0x0E: {\"state_name\": \"RX_END\", \"state\": \"RX\"},\n 0x0F: {\"state_name\": \"RX_RST\", \"state\": \"RX\"},\n 0x10: {\"state_name\": \"TXRX_SWITCH\", \"state\": \"TXRX_SETTLING\"},\n 0x11: {\"state_name\": \"RXFIFO_OVERFLOW\", \"state\": \"RXFIFO_OVERFLOW\"},\n 0x12: {\"state_name\": \"FSTXON\", \"state\": \"FSTXON\"},\n 0x13: {\"state_name\": \"TX\", \"state\": \"TX\"},\n 0x14: {\"state_name\": \"TX_END\", \"state\": \"TX\"},\n 0x15: {\"state_name\": \"RXTX_SWITCH\", \"state\": \"RXTX_SETTLING\"},\n 0x16: {\"state_name\": \"TXFIFO_UNDERFLOW\", \"state\": \"TXFIFO_UNDERFLOW\"},\n}" } ]
from saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting from copy import deepcopy from CC1101SpiProtocol import CC1101SpiProtocol, ProtocolFrameType, MARC_STATE
2,259
# High Level Analyzer # For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions SPI_DATA_FRAME = {"mosi": 0, "miso": 0} class SpiFrameType: error = "error" enable = "enable" disable = "disable" result = "result" class SpiFrameState: idle = 0 start = 1 active = 2 end = 3 error = 4 # High level analyzers must subclass the HighLevelAnalyzer class. class Hla(HighLevelAnalyzer): # List of settings that a user can set for this High Level Analyzer. # TODO Check the String/Number/Choice settings # my_string_setting = StringSetting() # my_number_setting = NumberSetting(min_value=0, max_value=100) # my_choices_setting = ChoicesSetting(choices=('A', 'B')) # An optional list of types this analyzer produces, providing a way to customize the way frames are displayed in Logic 2. result_types = { 'spi error': { 'format': 'Error: {{type}}' }, ProtocolFrameType.ERROR: { 'format': 'Error: {{type}} | {{data.error_details}}' }, ProtocolFrameType.REGISTER: { 'format': 'Register: {{data.access}} | {{data.register}} = {{data.focus_data}}' }, ProtocolFrameType.COMMAND: { 'format': 'Command: {{data.register}}' }, ProtocolFrameType.STATUS: { 'format': 'Status: {{data.register}} = {{data.focus_data}}' }, ProtocolFrameType.PA_TABLE: { 'format': 'PA Table: {{data.access}} = {{data.focus_data}}' }, ProtocolFrameType.FIFO: { 'format': 'FIFO: {{data.access}} = {{data.focus_data}}' }, } def __init__(self): ''' Initialize HLA. Settings can be accessed using the same name used above. ''' self.state = SpiFrameState.idle self.spi_frame_queue = []
# High Level Analyzer # For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions SPI_DATA_FRAME = {"mosi": 0, "miso": 0} class SpiFrameType: error = "error" enable = "enable" disable = "disable" result = "result" class SpiFrameState: idle = 0 start = 1 active = 2 end = 3 error = 4 # High level analyzers must subclass the HighLevelAnalyzer class. class Hla(HighLevelAnalyzer): # List of settings that a user can set for this High Level Analyzer. # TODO Check the String/Number/Choice settings # my_string_setting = StringSetting() # my_number_setting = NumberSetting(min_value=0, max_value=100) # my_choices_setting = ChoicesSetting(choices=('A', 'B')) # An optional list of types this analyzer produces, providing a way to customize the way frames are displayed in Logic 2. result_types = { 'spi error': { 'format': 'Error: {{type}}' }, ProtocolFrameType.ERROR: { 'format': 'Error: {{type}} | {{data.error_details}}' }, ProtocolFrameType.REGISTER: { 'format': 'Register: {{data.access}} | {{data.register}} = {{data.focus_data}}' }, ProtocolFrameType.COMMAND: { 'format': 'Command: {{data.register}}' }, ProtocolFrameType.STATUS: { 'format': 'Status: {{data.register}} = {{data.focus_data}}' }, ProtocolFrameType.PA_TABLE: { 'format': 'PA Table: {{data.access}} = {{data.focus_data}}' }, ProtocolFrameType.FIFO: { 'format': 'FIFO: {{data.access}} = {{data.focus_data}}' }, } def __init__(self): ''' Initialize HLA. Settings can be accessed using the same name used above. ''' self.state = SpiFrameState.idle self.spi_frame_queue = []
self.protocol = CC1101SpiProtocol()
0
2023-12-10 22:55:07+00:00
4k
Deltares/imod-python
imod/tests/test_mf6/test_utilities/test_schemata_utilities.py
[ { "identifier": "River", "path": "imod/mf6/riv.py", "snippet": "class River(BoundaryCondition):\n \"\"\"\n River package.\n Any number of RIV Packages can be specified for a single groundwater flow\n model.\n https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=71\n\n Parameters\n ----------\n stage: array of floats (xr.DataArray)\n is the head in the river.\n conductance: array of floats (xr.DataArray)\n is the riverbed hydraulic conductance.\n bottom_elevation: array of floats (xr.DataArray)\n is the elevation of the bottom of the riverbed.\n concentration: array of floats (xr.DataArray, optional)\n if this flow package is used in simulations also involving transport, then this array is used\n as the concentration for inflow over this boundary.\n concentration_boundary_type: ({\"AUX\", \"AUXMIXED\"}, optional)\n if this flow package is used in simulations also involving transport, then this keyword specifies\n how outflow over this boundary is computed.\n print_input: ({True, False}, optional)\n keyword to indicate that the list of river information will be written\n to the listing file immediately after it is read. Default is False.\n print_flows: ({True, False}, optional)\n Indicates that the list of river flow rates will be printed to the\n listing file for every stress period time step in which \"BUDGET PRINT\"\n is specified in Output Control. If there is no Output Control option and\n PRINT FLOWS is specified, then flow rates are printed for the last time\n step of each stress period. Default is False.\n save_flows: ({True, False}, optional)\n Indicates that river flow terms will be written to the file specified\n with \"BUDGET FILEOUT\" in Output Control. Default is False.\n observations: [Not yet supported.]\n Default is None.\n validate: {True, False}\n Flag to indicate whether the package should be validated upon\n initialization. This raises a ValidationError if package input is\n provided in the wrong manner. Defaults to True.\n repeat_stress: Optional[xr.DataArray] of datetimes\n Used to repeat data for e.g. repeating stress periods such as\n seasonality without duplicating the values. The DataArray should have\n dimensions ``(\"repeat\", \"repeat_items\")``. The ``repeat_items``\n dimension should have size 2: the first value is the \"key\", the second\n value is the \"value\". For the \"key\" datetime, the data of the \"value\"\n datetime will be used. Can also be set with a dictionary using the\n ``set_repeat_stress`` method.\n \"\"\"\n\n _pkg_id = \"riv\"\n _period_data = (\"stage\", \"conductance\", \"bottom_elevation\")\n _keyword_map = {}\n\n _init_schemata = {\n \"stage\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema((\"layer\",)),\n BOUNDARY_DIMS_SCHEMA,\n ],\n \"conductance\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema((\"layer\",)),\n BOUNDARY_DIMS_SCHEMA,\n ],\n \"bottom_elevation\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema((\"layer\",)),\n BOUNDARY_DIMS_SCHEMA,\n ],\n \"concentration\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema(\n (\n \"species\",\n \"layer\",\n )\n ),\n CONC_DIMS_SCHEMA,\n ],\n \"print_input\": [DTypeSchema(np.bool_), DimsSchema()],\n \"print_flows\": [DTypeSchema(np.bool_), DimsSchema()],\n \"save_flows\": [DTypeSchema(np.bool_), DimsSchema()],\n }\n _write_schemata = {\n \"stage\": [\n AllValueSchema(\">=\", \"bottom_elevation\"),\n OtherCoordsSchema(\"idomain\"),\n AllNoDataSchema(), # Check for all nan, can occur while clipping\n AllInsideNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ],\n \"conductance\": [IdentityNoDataSchema(\"stage\"), AllValueSchema(\">\", 0.0)],\n \"bottom_elevation\": [\n IdentityNoDataSchema(\"stage\"),\n # Check river bottom above layer bottom, else Modflow throws error.\n AllValueSchema(\">\", \"bottom\"),\n ],\n \"concentration\": [IdentityNoDataSchema(\"stage\"), AllValueSchema(\">=\", 0.0)],\n }\n\n _template = BoundaryCondition._initialize_template(_pkg_id)\n _auxiliary_data = {\"concentration\": \"species\"}\n\n _regrid_method = {\n \"stage\": (RegridderType.OVERLAP, \"mean\"),\n \"conductance\": (RegridderType.RELATIVEOVERLAP, \"conductance\"),\n \"bottom_elevation\": (RegridderType.OVERLAP, \"mean\"),\n \"concentration\": (RegridderType.OVERLAP, \"mean\"),\n }\n\n def __init__(\n self,\n stage,\n conductance,\n bottom_elevation,\n concentration=None,\n concentration_boundary_type=\"aux\",\n print_input=False,\n print_flows=False,\n save_flows=False,\n observations=None,\n validate: bool = True,\n repeat_stress=None,\n ):\n super().__init__(locals())\n self.dataset[\"stage\"] = stage\n self.dataset[\"conductance\"] = conductance\n self.dataset[\"bottom_elevation\"] = bottom_elevation\n if concentration is not None:\n self.dataset[\"concentration\"] = concentration\n self.dataset[\"concentration_boundary_type\"] = concentration_boundary_type\n add_periodic_auxiliary_variable(self)\n self.dataset[\"print_input\"] = print_input\n self.dataset[\"print_flows\"] = print_flows\n self.dataset[\"save_flows\"] = save_flows\n self.dataset[\"observations\"] = observations\n self.dataset[\"repeat_stress\"] = repeat_stress\n self._validate_init_schemata(validate)\n\n def _validate(self, schemata, **kwargs):\n # Insert additional kwargs\n kwargs[\"stage\"] = self[\"stage\"]\n kwargs[\"bottom_elevation\"] = self[\"bottom_elevation\"]\n errors = super()._validate(schemata, **kwargs)\n\n return errors" }, { "identifier": "filter_schemata_dict", "path": "imod/mf6/utilities/schemata.py", "snippet": "def filter_schemata_dict(\n schemata_dict: dict[str, list[BaseSchema]], schema_types: tuple[type[BaseSchema]]\n) -> dict[str, list[BaseSchema]]:\n \"\"\"\n Filter schemata dict with a tuple of schema types. Keys which do not have\n provided types in their corresponding schema list are dropped. The schema\n list in the values is reduced to contain the schema_types only.\n\n Example\n -------\n >>> _write_schemata = {\n \"stage\": [\n AllValueSchema(\">=\", \"bottom_elevation\"),\n OtherCoordsSchema(\"idomain\"),\n AllNoDataSchema(), # Check for all nan, can occur while clipping\n AllInsideNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ],\n \"conductance\": [IdentityNoDataSchema(\"stage\"), AllValueSchema(\">\", 0.0)],\n }\n\n >>> print(filter_schemata_dict(write_schemata, (AllNoDataSchema())))\n\n Prints ``{'stage': [<imod.schemata.AllNoDataSchema at 0x1b152b12aa0>]}``\n \"\"\"\n\n dict = {}\n for key, schema_ls in schemata_dict.items():\n schema_match = [\n schema for schema in schema_ls if isinstance(schema, schema_types)\n ]\n if schema_match:\n dict[key] = schema_match\n return dict" }, { "identifier": "AllNoDataSchema", "path": "imod/schemata.py", "snippet": "class AllNoDataSchema(NoDataSchema):\n \"\"\"\n Fails when all data is NoData.\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n valid = self.is_notnull(obj)\n if ~valid.any():\n raise ValidationError(\"all nodata\")" }, { "identifier": "IdentityNoDataSchema", "path": "imod/schemata.py", "snippet": "class IdentityNoDataSchema(NoDataComparisonSchema):\n \"\"\"\n Checks that the NoData values are located at exactly the same locations.\n\n Tests only if if all dimensions of the other object are present in the\n object. So tests if \"stage\" with `{time, layer, y, x}` compared to \"idomain\"\n `{layer, y, x}` but doesn't test if \"k\" with `{layer}` is comperated to\n \"idomain\" `{layer, y, x}`\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n\n # Only test if object has all dimensions in other object.\n missing_dims = set(other_obj.dims) - set(obj.dims)\n\n if len(missing_dims) == 0:\n valid = self.is_notnull(obj)\n other_valid = self.is_other_notnull(other_obj)\n if (valid ^ other_valid).any():\n raise ValidationError(f\"nodata is not aligned with {self.other}\")" }, { "identifier": "IndexesSchema", "path": "imod/schemata.py", "snippet": "class IndexesSchema(EmptyIndexesSchema):\n \"\"\"\n Verify indexes, check if no dims with zero size are included and that\n indexes are monotonic. Skips unstructured grid dimensions.\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n # Test if indexes all empty\n super().validate(obj)\n\n dims_to_validate = self.get_dims_to_validate(obj)\n\n for dim in dims_to_validate:\n if dim == \"y\":\n if not obj.indexes[dim].is_monotonic_decreasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically decreasing\"\n )\n\n else:\n if not obj.indexes[dim].is_monotonic_increasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically increasing\"\n )" } ]
from pytest_cases import parametrize_with_cases from imod.mf6.riv import River from imod.mf6.utilities.schemata import filter_schemata_dict from imod.schemata import AllNoDataSchema, IdentityNoDataSchema, IndexesSchema
2,864
class CasesFilteredSchemata: def case_empty(self): schemata = {} arg = (AllNoDataSchema,) expected = {} return schemata, arg, expected def case_river_allnodata(self): schemata = River._write_schemata arg = (AllNoDataSchema,) expected = {"stage": [AllNoDataSchema()]} return schemata, arg, expected def case_river_allnodata_identitynodataschema(self): schemata = River._write_schemata arg = (AllNoDataSchema, IdentityNoDataSchema) expected = { "stage": [AllNoDataSchema()], "conductance": [IdentityNoDataSchema("stage")], "bottom_elevation": [IdentityNoDataSchema("stage")], "concentration": [IdentityNoDataSchema("stage")], } return schemata, arg, expected def case_river_not_found(self): # IndexesSchema part of _init_schemata, so should not be in # _write_schemata. schemata = River._write_schemata arg = (IndexesSchema,) expected = {} return schemata, arg, expected @parametrize_with_cases(("schemata", "arg", "expected"), cases=CasesFilteredSchemata) def test_filter_schemata_dict(schemata, arg, expected): # Act
class CasesFilteredSchemata: def case_empty(self): schemata = {} arg = (AllNoDataSchema,) expected = {} return schemata, arg, expected def case_river_allnodata(self): schemata = River._write_schemata arg = (AllNoDataSchema,) expected = {"stage": [AllNoDataSchema()]} return schemata, arg, expected def case_river_allnodata_identitynodataschema(self): schemata = River._write_schemata arg = (AllNoDataSchema, IdentityNoDataSchema) expected = { "stage": [AllNoDataSchema()], "conductance": [IdentityNoDataSchema("stage")], "bottom_elevation": [IdentityNoDataSchema("stage")], "concentration": [IdentityNoDataSchema("stage")], } return schemata, arg, expected def case_river_not_found(self): # IndexesSchema part of _init_schemata, so should not be in # _write_schemata. schemata = River._write_schemata arg = (IndexesSchema,) expected = {} return schemata, arg, expected @parametrize_with_cases(("schemata", "arg", "expected"), cases=CasesFilteredSchemata) def test_filter_schemata_dict(schemata, arg, expected): # Act
filtered_dict = filter_schemata_dict(schemata, arg)
1
2023-12-08 13:57:59+00:00
4k
YoungJeansKR/Llama2-Ko-Chatbot
llama/generation.py
[ { "identifier": "ModelArgs", "path": "llama/model.py", "snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n ffn_dim_multiplier: Optional[float] = None\n norm_eps: float = 1e-5\n\n max_batch_size: int = 32\n max_seq_len: int = 2048" }, { "identifier": "Transformer", "path": "llama/model.py", "snippet": "class Transformer(nn.Module):\n def __init__(self, params: ModelArgs):\n \"\"\"\n Initialize a Transformer model.\n\n Args:\n params (ModelArgs): Model configuration parameters.\n\n Attributes:\n params (ModelArgs): Model configuration parameters.\n vocab_size (int): Vocabulary size.\n n_layers (int): Number of layers in the model.\n tok_embeddings (ParallelEmbedding): Token embeddings.\n layers (torch.nn.ModuleList): List of Transformer blocks.\n norm (RMSNorm): Layer normalization for the model output.\n output (ColumnParallelLinear): Linear layer for final output.\n freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.\n\n \"\"\"\n super().__init__()\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = ParallelEmbedding(\n params.vocab_size, params.dim, init_method=lambda x: x\n )\n\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = ColumnParallelLinear(\n params.dim, params.vocab_size, bias=False, init_method=lambda x: x\n )\n\n self.freqs_cis = precompute_freqs_cis(\n # Note that self.params.max_seq_len is multiplied by 2 because the token limit for the Llama 2 generation of models is 4096. \n # Adding this multiplier instead of using 4096 directly allows for dynamism of token lengths while training or fine-tuning.\n self.params.dim // self.params.n_heads, self.params.max_seq_len * 2\n )\n\n @torch.inference_mode()\n def forward(self, tokens: torch.Tensor, start_pos: int):\n \"\"\"\n Perform a forward pass through the Transformer model.\n\n Args:\n tokens (torch.Tensor): Input token indices.\n start_pos (int): Starting position for attention caching.\n\n Returns:\n torch.Tensor: Output logits after applying the Transformer model.\n\n \"\"\"\n _bsz, seqlen = tokens.shape\n h = self.tok_embeddings(tokens)\n self.freqs_cis = self.freqs_cis.to(h.device)\n freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]\n\n mask = None\n if seqlen > 1:\n mask = torch.full(\n (seqlen, seqlen), float(\"-inf\"), device=tokens.device\n )\n\n mask = torch.triu(mask, diagonal=1)\n\n # When performing key-value caching, we compute the attention scores\n # only for the new sequence. Thus, the matrix of scores is of size\n # (seqlen, cache_len + seqlen), and the only masked entries are (i, j) for\n # j > cache_len + i, since row i corresponds to token cache_len + i.\n mask = torch.hstack([\n torch.zeros((seqlen, start_pos), device=tokens.device),\n mask\n ]).type_as(h)\n\n for layer in self.layers:\n h = layer(h, start_pos, freqs_cis, mask)\n h = self.norm(h)\n output = self.output(h).float()\n return output" }, { "identifier": "Tokenizer", "path": "llama/tokenizer.py", "snippet": "class Tokenizer:\n \"\"\"tokenizing and encoding/decoding text using SentencePiece.\"\"\"\n def __init__(self, model_path: str):\n \"\"\"\n Initializes the Tokenizer with a SentencePiece model.\n\n Args:\n model_path (str): The path to the SentencePiece model file.\n \"\"\"\n # reload tokenizer\n assert os.path.isfile(model_path), model_path\n self.sp_model = SentencePieceProcessor(model_file=model_path)\n logger.info(f\"Reloaded SentencePiece model from {model_path}\")\n\n # BOS / EOS token IDs\n self.n_words: int = self.sp_model.vocab_size()\n self.bos_id: int = self.sp_model.bos_id()\n self.eos_id: int = self.sp_model.eos_id()\n self.pad_id: int = self.sp_model.pad_id()\n logger.info(\n f\"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}\"\n )\n assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()\n\n def encode(self, s: str, bos: bool, eos: bool) -> List[int]:\n \"\"\"\n Encodes a string into a list of token IDs.\n\n Args:\n s (str): The input string to be encoded.\n bos (bool): Whether to prepend the beginning-of-sequence token.\n eos (bool): Whether to append the end-of-sequence token.\n\n Returns:\n List[int]: A list of token IDs.\n \"\"\"\n assert type(s) is str\n t = self.sp_model.encode(s)\n if bos:\n t = [self.bos_id] + t\n if eos:\n t = t + [self.eos_id]\n return t\n\n def decode(self, t: List[int]) -> str:\n \"\"\"\n Decodes a list of token IDs into a string.\n\n Args:\n t (List[int]): The list of token IDs to be decoded.\n\n Returns:\n str: The decoded string.\n \"\"\"\n return self.sp_model.decode(t)" } ]
import json import os import sys import time import torch import torch.nn.functional as F from pathlib import Path from typing import List, Literal, Optional, Tuple, TypedDict from fairscale.nn.model_parallel.initialize import ( get_model_parallel_rank, initialize_model_parallel, model_parallel_is_initialized, ) from llama.model import ModelArgs, Transformer from llama.tokenizer import Tokenizer
2,317
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. Role = Literal["system", "user", "assistant"] class Message(TypedDict): role: Role content: str class CompletionPrediction(TypedDict, total=False): generation: str tokens: List[str] # not required logprobs: List[float] # not required class ChatPrediction(TypedDict, total=False): generation: Message tokens: List[str] # not required logprobs: List[float] # not required Dialog = List[Message] B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>"] UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt." class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, seed: int = 1, ) -> "Llama": """ Build a Llama instance by initializing and loading a pre-trained model. Args: ckpt_dir (str): Path to the directory containing checkpoint files. tokenizer_path (str): Path to the tokenizer file. max_seq_len (int): Maximum sequence length for input text. max_batch_size (int): Maximum batch size for inference. model_parallel_size (Optional[int], optional): Number of model parallel processes. If not provided, it's determined from the environment. Defaults to None. Returns: Llama: An instance of the Llama class with the loaded model and tokenizer. Raises: AssertionError: If there are no checkpoint files in the specified directory, or if the model parallel size does not match the number of checkpoint files. Note: This method initializes the distributed process group, sets the device to CUDA, and loads the pre-trained model and tokenizer. """ if not torch.distributed.is_initialized(): torch.distributed.init_process_group("nccl") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(seed) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, )
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. Role = Literal["system", "user", "assistant"] class Message(TypedDict): role: Role content: str class CompletionPrediction(TypedDict, total=False): generation: str tokens: List[str] # not required logprobs: List[float] # not required class ChatPrediction(TypedDict, total=False): generation: Message tokens: List[str] # not required logprobs: List[float] # not required Dialog = List[Message] B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>"] UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt." class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, seed: int = 1, ) -> "Llama": """ Build a Llama instance by initializing and loading a pre-trained model. Args: ckpt_dir (str): Path to the directory containing checkpoint files. tokenizer_path (str): Path to the tokenizer file. max_seq_len (int): Maximum sequence length for input text. max_batch_size (int): Maximum batch size for inference. model_parallel_size (Optional[int], optional): Number of model parallel processes. If not provided, it's determined from the environment. Defaults to None. Returns: Llama: An instance of the Llama class with the loaded model and tokenizer. Raises: AssertionError: If there are no checkpoint files in the specified directory, or if the model parallel size does not match the number of checkpoint files. Note: This method initializes the distributed process group, sets the device to CUDA, and loads the pre-trained model and tokenizer. """ if not torch.distributed.is_initialized(): torch.distributed.init_process_group("nccl") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(seed) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, )
tokenizer = Tokenizer(model_path=tokenizer_path)
2
2023-12-08 09:37:42+00:00
4k
Dong142857/Live3DPortrait
models/eg3d/networks_stylegan2.py
[ { "identifier": "misc", "path": "torch_utils/misc.py", "snippet": "def constant(value, shape=None, dtype=None, device=None, memory_format=None):\n def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin\ndef suppress_tracer_warnings():\ndef assert_shape(tensor, ref_shape):\ndef profiled_function(fn):\n def decorator(*args, **kwargs):\n def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):\n def __iter__(self):\ndef params_and_buffers(module):\ndef named_params_and_buffers(module):\ndef copy_params_and_buffers(src_module, dst_module, require_all=False):\ndef ddp_sync(module, sync):\ndef check_ddp_consistency(module, ignore_regex=None):\ndef print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):\n def pre_hook(_mod, _inputs):\n def post_hook(mod, _inputs, outputs):\nclass InfiniteSampler(torch.utils.data.Sampler):" }, { "identifier": "persistence", "path": "torch_utils/persistence.py", "snippet": "def persistent_class(orig_class):\n def __init__(self, *args, **kwargs):\n def init_args(self):\n def init_kwargs(self):\n def __reduce__(self):\ndef is_persistent(obj):\ndef import_hook(hook):\ndef _reconstruct_persistent_obj(meta):\ndef _module_to_src(module):\ndef _src_to_module(src):\ndef _check_pickleable(obj):\n def recurse(obj):\n class Decorator(orig_class):" }, { "identifier": "conv2d_resample", "path": "torch_utils/ops/conv2d_resample.py", "snippet": "@misc.profiled_function\ndef conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):\n r\"\"\"2D convolution with optional up/downsampling.\n\n Padding is performed only once at the beginning, not between the operations.\n\n Args:\n x: Input tensor of shape\n `[batch_size, in_channels, in_height, in_width]`.\n w: Weight tensor of shape\n `[out_channels, in_channels//groups, kernel_height, kernel_width]`.\n f: Low-pass filter for up/downsampling. Must be prepared beforehand by\n calling upfirdn2d.setup_filter(). None = identity (default).\n up: Integer upsampling factor (default: 1).\n down: Integer downsampling factor (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n groups: Split input channels into N groups (default: 1).\n flip_weight: False = convolution, True = correlation (default: True).\n flip_filter: False = convolution, True = correlation (default: False).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n # Validate arguments.\n assert isinstance(x, torch.Tensor) and (x.ndim == 4)\n assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)\n assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)\n assert isinstance(up, int) and (up >= 1)\n assert isinstance(down, int) and (down >= 1)\n assert isinstance(groups, int) and (groups >= 1)\n out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)\n fw, fh = _get_filter_size(f)\n px0, px1, py0, py1 = _parse_padding(padding)\n\n # Adjust padding to account for up/downsampling.\n if up > 1:\n px0 += (fw + up - 1) // 2\n px1 += (fw - up) // 2\n py0 += (fh + up - 1) // 2\n py1 += (fh - up) // 2\n if down > 1:\n px0 += (fw - down + 1) // 2\n px1 += (fw - down) // 2\n py0 += (fh - down + 1) // 2\n py1 += (fh - down) // 2\n\n # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.\n if kw == 1 and kh == 1 and (down > 1 and up == 1):\n x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)\n x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)\n return x\n\n # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.\n if kw == 1 and kh == 1 and (up > 1 and down == 1):\n x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)\n x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)\n return x\n\n # Fast path: downsampling only => use strided convolution.\n if down > 1 and up == 1:\n x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)\n x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)\n return x\n\n # Fast path: upsampling with optional downsampling => use transpose strided convolution.\n if up > 1:\n if groups == 1:\n w = w.transpose(0, 1)\n else:\n w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)\n w = w.transpose(1, 2)\n w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)\n px0 -= kw - 1\n px1 -= kw - up\n py0 -= kh - 1\n py1 -= kh - up\n pxt = max(min(-px0, -px1), 0)\n pyt = max(min(-py0, -py1), 0)\n x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))\n x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)\n if down > 1:\n x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)\n return x\n\n # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.\n if up == 1 and down == 1:\n if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:\n return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)\n\n # Fallback: Generic reference implementation.\n x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)\n x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)\n if down > 1:\n x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)\n return x" }, { "identifier": "upfirdn2d", "path": "torch_utils/ops/upfirdn2d.py", "snippet": "def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Pad, upsample, filter, and downsample a batch of 2D images.\n\n Performs the following sequence of operations for each channel:\n\n 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).\n\n 2. Pad the image with the specified number of zeros on each side (`padding`).\n Negative padding corresponds to cropping the image.\n\n 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it\n so that the footprint of all output pixels lies within the input image.\n\n 4. Downsample the image by keeping every Nth pixel (`down`).\n\n This sequence of operations bears close resemblance to scipy.signal.upfirdn().\n The fused op is considerably more efficient than performing the same calculation\n using standard PyTorch ops. It supports gradients of arbitrary order.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n up: Integer upsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n down: Integer downsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n assert isinstance(x, torch.Tensor)\n assert impl in ['ref', 'cuda']\n if impl == 'cuda' and x.device.type == 'cuda' and _init():\n return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)\n return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)" }, { "identifier": "bias_act", "path": "torch_utils/ops/bias_act.py", "snippet": "def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):\n r\"\"\"Fused bias and activation function.\n\n Adds bias `b` to activation tensor `x`, evaluates activation function `act`,\n and scales the result by `gain`. Each of the steps is optional. In most cases,\n the fused op is considerably more efficient than performing the same calculation\n using standard PyTorch ops. It supports first and second order gradients,\n but not third order gradients.\n\n Args:\n x: Input activation tensor. Can be of any shape.\n b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type\n as `x`. The shape must be known, and it must match the dimension of `x`\n corresponding to `dim`.\n dim: The dimension in `x` corresponding to the elements of `b`.\n The value of `dim` is ignored if `b` is not specified.\n act: Name of the activation function to evaluate, or `\"linear\"` to disable.\n Can be e.g. `\"relu\"`, `\"lrelu\"`, `\"tanh\"`, `\"sigmoid\"`, `\"swish\"`, etc.\n See `activation_funcs` for a full list. `None` is not allowed.\n alpha: Shape parameter for the activation function, or `None` to use the default.\n gain: Scaling factor for the output tensor, or `None` to use default.\n See `activation_funcs` for the default scaling of each activation function.\n If unsure, consider specifying 1.\n clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable\n the clamping (default).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the same shape and datatype as `x`.\n \"\"\"\n assert isinstance(x, torch.Tensor)\n assert impl in ['ref', 'cuda']\n if impl == 'cuda' and x.device.type == 'cuda' and _init():\n return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)\n return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)" }, { "identifier": "fma", "path": "torch_utils/ops/fma.py", "snippet": "def fma(a, b, c): # => a * b + c\n return _FusedMultiplyAdd.apply(a, b, c)" } ]
import numpy as np import torch from torch_utils import misc from torch_utils import persistence from torch_utils.ops import conv2d_resample from torch_utils.ops import upfirdn2d from torch_utils.ops import bias_act from torch_utils.ops import fma
3,518
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: LicenseRef-NvidiaProprietary # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related # documentation and any modifications thereto. Any use, reproduction, # disclosure or distribution of this material and related documentation # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. """Network architectures from the paper "Analyzing and Improving the Image Quality of StyleGAN". Matches the original implementation of configs E-F by Karras et al. at https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" #----------------------------------------------------------------------------
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: LicenseRef-NvidiaProprietary # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related # documentation and any modifications thereto. Any use, reproduction, # disclosure or distribution of this material and related documentation # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. """Network architectures from the paper "Analyzing and Improving the Image Quality of StyleGAN". Matches the original implementation of configs E-F by Karras et al. at https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" #----------------------------------------------------------------------------
@misc.profiled_function
0
2023-12-09 15:18:53+00:00
4k
blaise-tk/RVC_CLI
rvc/train/preprocess/preprocess.py
[ { "identifier": "load_audio", "path": "rvc/lib/utils.py", "snippet": "def load_audio(file, sampling_rate):\n try:\n file = file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n out, _ = (\n ffmpeg.input(file, threads=0)\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sampling_rate)\n .run(cmd=[\"ffmpeg\", \"-nostdin\"], capture_stdout=True, capture_stderr=True)\n )\n except Exception as error:\n raise RuntimeError(f\"Failed to load audio: {error}\")\n\n return np.frombuffer(out, np.float32).flatten()" }, { "identifier": "Slicer", "path": "rvc/train/slicer.py", "snippet": "class Slicer:\n def __init__(\n self,\n sr: int,\n threshold: float = -40.0,\n min_length: int = 5000,\n min_interval: int = 300,\n hop_size: int = 20,\n max_sil_kept: int = 5000,\n ):\n if not min_length >= min_interval >= hop_size:\n raise ValueError(\"min_length >= min_interval >= hop_size is required\")\n if not max_sil_kept >= hop_size:\n raise ValueError(\"max_sil_kept >= hop_size is required\")\n\n min_interval = sr * min_interval / 1000\n self.threshold = 10 ** (threshold / 20.0)\n self.hop_size = round(sr * hop_size / 1000)\n self.win_size = min(round(min_interval), 4 * self.hop_size)\n self.min_length = round(sr * min_length / 1000 / self.hop_size)\n self.min_interval = round(min_interval / self.hop_size)\n self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)\n\n def _apply_slice(self, waveform, begin, end):\n start_idx = begin * self.hop_size\n if len(waveform.shape) > 1:\n end_idx = min(waveform.shape[1], end * self.hop_size)\n return waveform[:, start_idx:end_idx]\n else:\n end_idx = min(waveform.shape[0], end * self.hop_size)\n return waveform[start_idx:end_idx]\n\n def slice(self, waveform):\n samples = waveform.mean(axis=0) if len(waveform.shape) > 1 else waveform\n if samples.shape[0] <= self.min_length:\n return [waveform]\n\n rms_list = get_rms(\n y=samples, frame_length=self.win_size, hop_length=self.hop_size\n ).squeeze(0)\n sil_tags = []\n silence_start, clip_start = None, 0\n\n for i, rms in enumerate(rms_list):\n if rms < self.threshold:\n if silence_start is None:\n silence_start = i\n continue\n\n if silence_start is None:\n continue\n\n is_leading_silence = silence_start == 0 and i > self.max_sil_kept\n need_slice_middle = (\n i - silence_start >= self.min_interval\n and i - clip_start >= self.min_length\n )\n\n if not is_leading_silence and not need_slice_middle:\n silence_start = None\n continue\n\n if i - silence_start <= self.max_sil_kept:\n pos = rms_list[silence_start : i + 1].argmin() + silence_start\n if silence_start == 0:\n sil_tags.append((0, pos))\n else:\n sil_tags.append((pos, pos))\n clip_start = pos\n elif i - silence_start <= self.max_sil_kept * 2:\n pos = rms_list[\n i - self.max_sil_kept : silence_start + self.max_sil_kept + 1\n ].argmin()\n pos += i - self.max_sil_kept\n pos_l = (\n rms_list[\n silence_start : silence_start + self.max_sil_kept + 1\n ].argmin()\n + silence_start\n )\n pos_r = (\n rms_list[i - self.max_sil_kept : i + 1].argmin()\n + i\n - self.max_sil_kept\n )\n if silence_start == 0:\n sil_tags.append((0, pos_r))\n clip_start = pos_r\n else:\n sil_tags.append((min(pos_l, pos), max(pos_r, pos)))\n clip_start = max(pos_r, pos)\n else:\n pos_l = (\n rms_list[\n silence_start : silence_start + self.max_sil_kept + 1\n ].argmin()\n + silence_start\n )\n pos_r = (\n rms_list[i - self.max_sil_kept : i + 1].argmin()\n + i\n - self.max_sil_kept\n )\n if silence_start == 0:\n sil_tags.append((0, pos_r))\n else:\n sil_tags.append((pos_l, pos_r))\n clip_start = pos_r\n silence_start = None\n\n total_frames = rms_list.shape[0]\n\n if (\n silence_start is not None\n and total_frames - silence_start >= self.min_interval\n ):\n silence_end = min(total_frames, silence_start + self.max_sil_kept)\n pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start\n sil_tags.append((pos, total_frames + 1))\n\n if not sil_tags:\n return [waveform]\n else:\n chunks = []\n if sil_tags[0][0] > 0:\n chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))\n\n for i in range(len(sil_tags) - 1):\n chunks.append(\n self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])\n )\n\n if sil_tags[-1][1] < total_frames:\n chunks.append(\n self._apply_slice(waveform, sil_tags[-1][1], total_frames)\n )\n\n return chunks" } ]
from multiprocessing import cpu_count from scipy import signal from scipy.io import wavfile from rvc.lib.utils import load_audio from rvc.train.slicer import Slicer import os import sys import librosa import numpy as np import multiprocessing
1,634
now_directory = os.getcwd() sys.path.append(now_directory) experiment_directory = sys.argv[1] input_root = sys.argv[2] sampling_rate = int(sys.argv[3]) percentage = float(sys.argv[4]) num_processes = cpu_count() no_parallel = "True" class PreProcess: def __init__(self, sr, exp_dir, per=3.0):
now_directory = os.getcwd() sys.path.append(now_directory) experiment_directory = sys.argv[1] input_root = sys.argv[2] sampling_rate = int(sys.argv[3]) percentage = float(sys.argv[4]) num_processes = cpu_count() no_parallel = "True" class PreProcess: def __init__(self, sr, exp_dir, per=3.0):
self.slicer = Slicer(
1
2023-12-10 21:09:41+00:00
4k
lumi-ua/goit-project2-django-assistant
personal_assistant/app_contacts/views.py
[ { "identifier": "ContactForm", "path": "personal_assistant/app_contacts/forms.py", "snippet": "class ContactForm(ModelForm):\n fullname = CharField(max_length=255, \n widget=forms.TextInput(attrs={'placeholder': 'Name Lastname', \"class\": \"form-control\"}))\n address = CharField(max_length=255, required=False, \n widget=forms.TextInput(attrs={'placeholder': 'City, Street, House number', \"class\": \"form-control\"}))\n birthday = DateField(required=False, input_formats=[\"%d.%m.%Y\"], \n widget=forms.DateInput(attrs={'placeholder': 'DD.MM.YYYY', 'class': 'form-control'}))\n class Meta:\n model = Contact\n fields = [\"fullname\", \"address\", \"birthday\"]\n exclude = [\"user\"]" }, { "identifier": "PhoneNumberForm", "path": "personal_assistant/app_contacts/forms.py", "snippet": "class PhoneNumberForm(forms.ModelForm):\n phone_number = PhoneNumberField(\n widget=PhoneNumberPrefixWidget(attrs={'placeholder': '+380', 'class': 'form-control'})\n )\n class Meta:\n model = PhoneNumber\n fields = [\"phone_number\"]\n exclude = [\"contact\"]" }, { "identifier": "EmailAddressForm", "path": "personal_assistant/app_contacts/forms.py", "snippet": "class EmailAddressForm(forms.ModelForm):\n email = EmailField(max_length=100, required=False, widget=forms.EmailInput(attrs={'placeholder': '[email protected]', 'class': 'form-control'}))\n\n class Meta:\n model = EmailAddress\n fields = [\"email\"]\n exclude = [\"contact\"]" }, { "identifier": "Contact", "path": "personal_assistant/app_contacts/models.py", "snippet": "class Contact(models.Model):\n fullname = models.CharField(max_length=255)\n address = models.CharField(max_length=255, blank=True, null=True)\n birthday = models.DateField(blank=True, null=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)" }, { "identifier": "PhoneNumber", "path": "personal_assistant/app_contacts/models.py", "snippet": "class PhoneNumber(models.Model):\n phone_number = PhoneNumberField(null=True,)\n contact = models.ForeignKey(\n Contact, on_delete=models.CASCADE, default=None, null=True, related_name='phone_numbers'\n )" }, { "identifier": "EmailAddress", "path": "personal_assistant/app_contacts/models.py", "snippet": "class EmailAddress(models.Model):\n email = models.EmailField(max_length=100, null=True)\n contact = models.ForeignKey(\n Contact, on_delete=models.CASCADE, default=None, null=True, related_name='email_addresses'\n )" } ]
from datetime import date from django.shortcuts import render, redirect, get_object_or_404 from django.contrib.auth.decorators import login_required from django.contrib import messages from django.db.models import Q from django.urls import reverse_lazy from django.core.exceptions import ObjectDoesNotExist from django.core.paginator import Paginator from datetime import date, timedelta from .forms import ContactForm, PhoneNumberForm, EmailAddressForm from .models import Contact, PhoneNumber, EmailAddress
2,058
email_address_form = EmailAddressForm() return render(request, 'app_contacts/add_email_address.html', { 'title': 'Email address', 'email_address_form': email_address_form, 'email_adress_add_url': email_adress_add_url } ) def upcoming_birthdays(request): max_days = 10 today = date.today() days_in_future = int(request.GET.get("days", 7)) if days_in_future > max_days : days_in_future=max_days future_date = today + timedelta(days=days_in_future) print(future_date) contacts = Contact.objects.filter( Q(birthday__month__gte=today.month, birthday__day__gte=today.day, birthday__year__lte=today.year) | Q(birthday__month__gte=future_date.month, birthday__day__lte=future_date.day, birthday__year__lte=future_date.year), user=request.user ) if not contacts.exists(): return render(request, "app_contacts/upcoming_birthdays.html", { "title": "Upcoming birthdays list", "message": "No upcoming birthdays.", "max_days": str(max_days), "days_in_future": str(days_in_future) } ) return render(request, "app_contacts/upcoming_birthdays.html", { "title": "Upcoming birthdays list", "contacts": contacts, "max_days": str(max_days), "days_in_future": str(days_in_future) } ) @login_required def search_contacts(request): query = request.GET.get("query", "") error_message = "" contacts = None try: user_contacts = Contact.objects.filter(user=request.user) contacts = user_contacts.filter( Q(fullname__icontains=query) | Q(phone_numbers__phone_number__icontains=query) | Q(email_addresses__email__icontains=query) ).distinct() except Contact.DoesNotExist: contact=[] error_message = "Contact not found" return render(request, "app_contacts/search_contacts.html", { "title": "Searching contacts", "contacts": contacts, "error_message": error_message }) @login_required def edit_contact(request, pk): contact = get_object_or_404(Contact, pk=pk) if request.method == "POST": form = ContactForm(request.POST, instance=contact) if form.is_valid(): form.save() return redirect(to="app_contacts:detail", pk=pk) else: form = ContactForm(instance=contact) return render(request, "app_contacts/edit_contact.html", { "title": "Editing contact", "form": form, "contact": contact }) @login_required def delete_contact(request, pk): contact = get_object_or_404(Contact, pk=pk) if request.method == "POST": contact.delete() messages.success(request, "Contact successfully deleted") return redirect(to="app_contacts:dashboard") else: return render(request, "app_contacts/delete_contact.html", { "title": "Deleting contact", "contact": contact, "user": request.user } ) def delete_email(request, pk): try: email = EmailAddress.objects.get(pk=pk) email.delete() except ObjectDoesNotExist: email = None return detail(request, pk) def delete_phone(request, pk): try:
# from django.db.models import Q # Create your views here. @login_required def dashboard(request): return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"}) @login_required def contact(request): contact_form = ContactForm() phone_number_form = PhoneNumberForm() email_address_form = EmailAddressForm() if request.method == "POST": contact_form = ContactForm(request.POST) phone_number_form = PhoneNumberForm(request.POST) email_address_form = EmailAddressForm(request.POST) if contact_form.is_valid() and phone_number_form.is_valid() and email_address_form.is_valid(): new_contact = contact_form.save(commit=False) new_contact.user = request.user new_contact.save() phone_number = phone_number_form.save(commit=False) phone_number.contact = new_contact phone_number.save() email_address_data = email_address_form.cleaned_data if email_address_data.get("email"): email_address = email_address_form.save(commit=False) email_address.contact = new_contact email_address.save() return redirect(to="app_contacts:dashboard") return render( request, "app_contacts/contact.html", { "title": "Creation new contact", "contact_form": contact_form, "phone_number_form": phone_number_form, "email_address_form": email_address_form, } ) @login_required def contacts(request, page=1): per_page = 10 contacts = Contact.objects.filter(user=request.user) paginator = Paginator(list(contacts), per_page) contacts_on_page = paginator.page(page) return render(request, "app_contacts/all_contacts.html", { "title": "Contacts list", "contacts": contacts_on_page } ) @login_required def detail(request, pk): contact = get_object_or_404(Contact, pk=pk) return render(request, 'app_contacts/detail.html', { "title": "Contact details", "contact": contact } ) @login_required def add_phone_number(request, pk): contact = Contact.objects.get(pk=pk) phone_number_add_url = reverse_lazy('app_contacts:add_phone_number', kwargs={'pk': pk}) if request.method == 'POST': phone_number_form = PhoneNumberForm(request.POST) if phone_number_form.is_valid(): new_phone_number = phone_number_form.save() new_phone_number.contact = contact new_phone_number.save() return redirect(to="app_contacts:detail", pk=pk) else: phone_number_form = PhoneNumberForm() return render(request, 'app_contacts/add_phone_number.html', { 'title': "Adding Phone-number", 'phone_number_form': phone_number_form, 'phone_number_add_url': phone_number_add_url, } ) @login_required def add_email_address(request, pk): contact = Contact.objects.get(pk=pk) email_adress_add_url = reverse_lazy('app_contacts:add_email_address', kwargs={'pk': pk}) if request.method == 'POST': email_address_form = EmailAddressForm(request.POST) if email_address_form.is_valid(): new_email_address = email_address_form.save() new_email_address.contact = contact new_email_address.save() return redirect(to="app_contacts:detail", pk=pk) else: email_address_form = EmailAddressForm() return render(request, 'app_contacts/add_email_address.html', { 'title': 'Email address', 'email_address_form': email_address_form, 'email_adress_add_url': email_adress_add_url } ) def upcoming_birthdays(request): max_days = 10 today = date.today() days_in_future = int(request.GET.get("days", 7)) if days_in_future > max_days : days_in_future=max_days future_date = today + timedelta(days=days_in_future) print(future_date) contacts = Contact.objects.filter( Q(birthday__month__gte=today.month, birthday__day__gte=today.day, birthday__year__lte=today.year) | Q(birthday__month__gte=future_date.month, birthday__day__lte=future_date.day, birthday__year__lte=future_date.year), user=request.user ) if not contacts.exists(): return render(request, "app_contacts/upcoming_birthdays.html", { "title": "Upcoming birthdays list", "message": "No upcoming birthdays.", "max_days": str(max_days), "days_in_future": str(days_in_future) } ) return render(request, "app_contacts/upcoming_birthdays.html", { "title": "Upcoming birthdays list", "contacts": contacts, "max_days": str(max_days), "days_in_future": str(days_in_future) } ) @login_required def search_contacts(request): query = request.GET.get("query", "") error_message = "" contacts = None try: user_contacts = Contact.objects.filter(user=request.user) contacts = user_contacts.filter( Q(fullname__icontains=query) | Q(phone_numbers__phone_number__icontains=query) | Q(email_addresses__email__icontains=query) ).distinct() except Contact.DoesNotExist: contact=[] error_message = "Contact not found" return render(request, "app_contacts/search_contacts.html", { "title": "Searching contacts", "contacts": contacts, "error_message": error_message }) @login_required def edit_contact(request, pk): contact = get_object_or_404(Contact, pk=pk) if request.method == "POST": form = ContactForm(request.POST, instance=contact) if form.is_valid(): form.save() return redirect(to="app_contacts:detail", pk=pk) else: form = ContactForm(instance=contact) return render(request, "app_contacts/edit_contact.html", { "title": "Editing contact", "form": form, "contact": contact }) @login_required def delete_contact(request, pk): contact = get_object_or_404(Contact, pk=pk) if request.method == "POST": contact.delete() messages.success(request, "Contact successfully deleted") return redirect(to="app_contacts:dashboard") else: return render(request, "app_contacts/delete_contact.html", { "title": "Deleting contact", "contact": contact, "user": request.user } ) def delete_email(request, pk): try: email = EmailAddress.objects.get(pk=pk) email.delete() except ObjectDoesNotExist: email = None return detail(request, pk) def delete_phone(request, pk): try:
phone = PhoneNumber.objects.get(pk=pk)
4
2023-12-08 17:26:59+00:00
4k
SubConv/SubConv
modules/convert/converter.py
[ { "identifier": "RandUserAgent", "path": "modules/convert/util.py", "snippet": "def RandUserAgent() -> str:\n return userAgents[random.randint(0, len(userAgents) - 1)]" }, { "identifier": "get", "path": "modules/convert/util.py", "snippet": "def get(content):\n if content is None:\n return \"\"\n else:\n return content" }, { "identifier": "uniqueName", "path": "modules/convert/util.py", "snippet": "def uniqueName(names: dict, name):\n\tindex = names.get(name)\n\tif index is None:\n\t\tindex = 0\n\t\tnames[name] = index\n\telse:\n\t\tindex += 1\n\t\tnames[name] = index\n\t\tname = \"%s-%02d\" % (name, index)\n\treturn name" }, { "identifier": "urlSafe", "path": "modules/convert/util.py", "snippet": "def urlSafe(string):\n\treturn string.replace(\"+\", \"-\").replace(\"/\", \"_\")" }, { "identifier": "base64RawStdDecode", "path": "modules/convert/util.py", "snippet": "def base64RawStdDecode(encoded):\n\treturn base64.b64decode(\n encoded + \"=\"*(-len(encoded)%4)\n\t).decode(\"utf-8\")" }, { "identifier": "base64RawURLDecode", "path": "modules/convert/util.py", "snippet": "def base64RawURLDecode(encoded):\n\treturn base64.urlsafe_b64decode(\n\t\t\t encoded + \"=\"*(-len(encoded)%4)\n\t).decode(\"utf-8\")" }, { "identifier": "handleVShareLink", "path": "modules/convert/v.py", "snippet": "def handleVShareLink(names: dict, url: urlparse.ParseResult, scheme: str, proxy: dict):\n query = dict(urlparse.parse_qsl(url.query))\n proxy[\"name\"] = uniqueName(names, urlparse.unquote(url.fragment))\n if url.hostname == \"\":\n raise\n if url.port == \"\":\n raise\n proxy[\"type\"] = scheme\n proxy[\"server\"] = url.hostname\n proxy[\"port\"] = url.port\n proxy[\"uuid\"] = url.username\n proxy[\"udp\"] = True\n tls = get(query.get(\"security\")).lower()\n if tls.endswith(\"tls\") or tls == \"reality\":\n proxy[\"tls\"] = True\n fingerprint = get(query.get(\"fp\"))\n if fingerprint == \"\":\n proxy[\"client-fingerprint\"] = \"chrome\"\n else:\n proxy[\"client-fingerprint\"] = fingerprint\n alpn = get(query.get(\"alpn\"))\n if alpn != \"\":\n proxy[\"alpn\"] = alpn.split(\",\")\n sni = get(query.get(\"sni\"))\n if sni != \"\":\n proxy[\"servername\"] = sni\n realityPublicKey = get(query.get(\"pbk\"))\n if realityPublicKey != \"\":\n proxy[\"reality-opts\"] = {\n \"public-key\": realityPublicKey,\n \"short-id\": get(query.get(\"sid\"))\n }\n \n switch = get(query.get(\"packetEncoding\"))\n if switch == \"none\" or switch == \"\":\n pass\n elif switch == \"packet\":\n proxy[\"packet-addr\"] = True\n else:\n proxy[\"xudp\"] = True\n\n network = get(query.get(\"type\")).lower()\n if network == \"\":\n network = \"tcp\"\n fakeType = get(query.get(\"headerType\")).lower()\n if fakeType == \"http\":\n network = \"http\"\n elif network == \"http\":\n network = \"h2\"\n proxy[\"network\"] = network\n if network == \"tcp\":\n if fakeType != \"none\" and fakeType != \"\":\n headers = {}\n httpOpts = {}\n httpOpts[\"path\"] = \"/\"\n\n host = get(query.get(\"host\"))\n if host != \"\":\n headers[\"Host\"] = str(host)\n\n method = get(query.get(\"method\"))\n if method != \"\":\n httpOpts[\"method\"] = method\n\n path = get(query.get(\"path\"))\n if path != \"\":\n httpOpts[\"path\"] = str(path)\n \n httpOpts[\"headers\"] = headers\n proxy[\"http-opts\"] = httpOpts\n\n elif network == \"http\":\n headers = {}\n h2Opts = {}\n h2Opts[\"path\"] = \"/\"\n path = get(query.get(\"path\"))\n if path != \"\":\n h2Opts[\"path\"] = str(path)\n host = get(query.get(\"host\"))\n if host != \"\":\n h2Opts[\"host\"] = str(host)\n h2Opts[\"headers\"] = headers\n proxy[\"h2-opts\"] = h2Opts\n \n elif network == \"ws\":\n headers = {}\n wsOpts = {}\n headers[\"User-Agent\"] = RandUserAgent()\n headers[\"Host\"] = get(query.get(\"host\"))\n wsOpts[\"path\"] = get(query.get(\"path\"))\n wsOpts[\"headers\"] = headers\n\n earlyData = get(query.get(\"ed\"))\n if earlyData != \"\":\n try:\n med = int(earlyData)\n except:\n raise\n wsOpts[\"max-early-data\"] = med\n earlyDataHeader = get(query.get(\"edh\"))\n if earlyDataHeader != \"\":\n wsOpts[\"early-data-header-name\"] = earlyDataHeader\n\n proxy[\"ws-opts\"] = wsOpts\n\n elif network == \"grpc\":\n grpcOpts = {}\n grpcOpts[\"grpc-service-name\"] = get(query.get(\"serviceName\"))\n proxy[\"grpc-opts\"] = grpcOpts" } ]
from modules.convert.util import RandUserAgent from modules.convert.util import get from modules.convert.util import uniqueName from modules.convert.util import urlSafe from modules.convert.util import base64RawStdDecode from modules.convert.util import base64RawURLDecode from modules.convert.v import handleVShareLink import json import base64 import urllib.parse as urlparse import distutils.util
3,578
except: continue vmess["alterId"] = 0 vmess["cipher"] = "auto" encryption = get(query.get("encryption")) if encryption != "": vmess["cipher"] = encryption proxies.append(vmess) continue values = {} try: values = json.loads(dcBuf) except: continue try: tempName = values["ps"] except: continue name = uniqueName(names, tempName) vmess = {} vmess["name"] = name vmess["type"] = scheme vmess["server"] = values["add"] vmess["port"] = values["port"] vmess["uuid"] = values["id"] alterId = values.get("aid") if alterId is not None: vmess["alterId"] = alterId else: vmess["alterId"] = 0 vmess["udp"] = True vmess["xudp"] = True vmess["tls"] = False vmess["skip-cert-verify"] = False vmess["cipher"] = "auto" cipher = get(values.get("scy")) if cipher != "": vmess["cipher"] = cipher sni = get(values.get("sni")) if sni != "": vmess["servername"] = sni network = get(values.get("net")).lower() if values.get("type") == "http": network = "http" elif network == "http": network = "h2" vmess["network"] = network tls = values.get("tls") if tls is not None: tls = str(tls).lower() if tls.endswith("tls"): vmess["tls"] = True alpn = values.get("alpn") if alpn is not None and alpn != "": vmess["alpn"] = alpn.split(",") if network == "http": headers = {} httpOpts = {} host = get(values.get("host")) if host != "": headers["Host"] = host httpOpts["path"] = "/" path = get(values.get("path")) if path != "": httpOpts["path"] = path httpOpts["headers"] = headers vmess["http-opts"] = httpOpts elif network == "h2": headers = {} h2Opts = {} host = get(values.get("host")) if host != "": headers["Host"] = host h2Opts["path"] = get(values.get("path")) h2Opts["headers"] = headers vmess["h2-opts"] = h2Opts elif network == "ws": headers = {} wsOpts = {} wsOpts["path"] = "/" host = get(values.get("host")) if host != "": headers["Host"] = host path = get(values.get("path")) if path != "": wsOpts["path"] = path wsOpts["headers"] = headers vmess["ws-opts"] = wsOpts elif network == "grpc": grpcOpts = {} grpcOpts["grpc-service-name"] = get(values.get("path")) vmess["grpc-opts"] = grpcOpts proxies.append(vmess) # ss and ssr still WIP elif scheme == "ss": try: urlSS = urlparse.urlparse(line) except: continue name = uniqueName(names, urlparse.unquote(urlSS.fragment)) port = urlSS.port if port == "": try:
async def ConvertsV2Ray(buf): try: data = base64.b64decode(buf).decode("utf-8") except: try: data = buf.decode("utf-8") except: data = buf arr = data.splitlines() proxies = [] names = {} for line in arr: if line == "": continue if -1 == line.find("://"): continue else: scheme, body = line.split("://", 1) scheme = scheme.lower() if scheme == "hysteria": try: urlHysteria = urlparse.urlparse(line) except: continue query = dict(urlparse.parse_qsl(urlHysteria.query)) name = uniqueName(names, urlparse.unquote(urlHysteria.fragment)) hysteria = {} hysteria["name"] = name hysteria["type"] = scheme hysteria["server"] = urlHysteria.hostname hysteria["port"] = urlHysteria.port hysteria["sni"] = query.get("peer") hysteria["obfs"] = query.get("obfs") alpn = get(query.get("alpn")) if alpn != "": hysteria["alpn"] = alpn.split(",") hysteria["auth_str"] = query.get("auth") hysteria["protocol"] = query.get("protocol") up = get(query.get("up")) down = get(query.get("down")) if up == "": up = query.get("upmbps") if down == "": down = query.get("downmbps") hysteria["up"] = up hysteria["down"] = down hysteria["skip-cert-verify"] = bool( distutils.util.strtobool(query.get("insecure"))) proxies.append(hysteria) elif scheme == "hysteria2" or scheme == "hy2": # apply f6bf9c08577060bb199c2f746c7d91dd3c0ca7b9 from mihomo try: urlHysteria2 = urlparse.urlparse(line) except: continue query = dict(urlparse.parse_qsl(urlHysteria2.query)) name = uniqueName(names, urlparse.unquote(urlHysteria2.fragment)) hysteria2 = {} hysteria2["name"] = name hysteria2["type"] = scheme hysteria2["server"] = urlHysteria2.hostname port = get(urlHysteria2.port) if port != "": hysteria2["port"] = int(port) else: hysteria2["port"] = 443 obfs = get(query.get("obfs")) if obfs != "" and obfs not in ["none", "None"]: hysteria2["obfs"] = query.get("obfs") hysteria2["obfs-password"] = get(query.get("obfs-password")) sni = get(query.get("sni")) if sni == "": sni = get(query.get("peer")) if sni != "": hysteria2["sni"] = sni hysteria2["skip-cert-verify"] = bool( distutils.util.strtobool(query.get("insecure"))) alpn = get(query.get("alpn")) if alpn != "": hysteria2["alpn"] = alpn.split(",") auth = get(urlHysteria2.username) if auth != "": hysteria2["password"] = auth hysteria2["fingerprint"] = get(query.get("pinSHA256")) hysteria2["down"] = get(query.get("down")) hysteria2["up"] = get(query.get("up")) proxies.append(hysteria2) elif scheme == "tuic": # A temporary unofficial TUIC share link standard # Modified from https://github.com/daeuniverse/dae/discussions/182 # Changes: # 1. Support TUICv4, just replace uuid:password with token # 2. Remove `allow_insecure` field try: urlTUIC = urlparse.urlparse(line) except: continue query = dict(urlparse.parse_qsl(urlTUIC.query)) tuic = {} tuic["name"] = uniqueName( names, urlparse.unquote(urlTUIC.fragment)) tuic["type"] = scheme tuic["server"] = urlTUIC.hostname tuic["port"] = urlTUIC.port tuic["udp"] = True password = urlTUIC.password if password is not None: tuic["uuid"] = urlTUIC.username tuic["password"] = password else: tuic["token"] = urlTUIC.username cc = get(query.get("congestion_control")) if cc != "": tuic["congestion-control"] = cc alpn = get(query.get("alpn")) if alpn != "": tuic["alpn"] = alpn.split(",") sni = get(query.get("sni")) if sni != "": tuic["sni"] = sni if query.get("disable_sni") == "1": tuic["disable-sni"] = True udpRelayMode = get(query.get("udp_relay_mode")) if udpRelayMode != "": tuic["udp-relay-mode"] = udpRelayMode proxies.append(tuic) elif scheme == "trojan": try: urlTrojan = urlparse.urlparse(line) except: continue query = dict(urlparse.parse_qsl(urlTrojan.query)) name = uniqueName(names, urlparse.unquote(urlTrojan.fragment)) trojan = {} trojan["name"] = name trojan["type"] = scheme trojan["server"] = urlTrojan.hostname trojan["port"] = urlTrojan.port trojan["password"] = urlTrojan.password trojan["udp"] = True trojan["skip-cert-verify"] = bool( distutils.util.strtobool(query.get("allowInsecure"))) sni = get(query.get("sni")) if sni != "": trojan["sni"] = sni alpn = get(query.get("alpn")) if alpn != "": trojan["alpn"] = alpn.split(",") network = get(query.get("type")) if network != "": network = network.lower() trojan["network"] = network if network == "ws": headers = {} wsOpts = {} headers["User-Agent"] = RandUserAgent() wsOpts["path"] = query.get("path") wsOpts["headers"] = headers trojan["ws-opts"] = wsOpts elif network == "grpc": grpcOpts = {} grpcOpts["serviceName"] = query.get("serviceName") trojan["grpc-opts"] = grpcOpts fingerprint = get(query.get("fp")) if fingerprint == "": trojan["client-fingerprint"] = "chrome" else: trojan["client-fingerprint"] = fingerprint proxies.append(trojan) elif scheme == "vless": try: urlVless = urlparse.urlparse(line) except: continue query = dict(urlparse.parse_qsl(urlVless.query)) vless = {} try: handleVShareLink(names, urlVless, scheme, vless) except: continue flow = get(query.get("flow")) if flow != "": vless["flow"] = str(flow).lower() proxies.append(vless) elif scheme == "vmess": try: dcBuf = base64.b64decode(body) except: # Xray VMessAEAD share link try: urlVMess = urlparse.urlparse(line) except: continue query = dict(urlparse.parse_qsl(urlVMess.query)) vmess = {} try: handleVShareLink(names, urlVMess, scheme, vmess) except: continue vmess["alterId"] = 0 vmess["cipher"] = "auto" encryption = get(query.get("encryption")) if encryption != "": vmess["cipher"] = encryption proxies.append(vmess) continue values = {} try: values = json.loads(dcBuf) except: continue try: tempName = values["ps"] except: continue name = uniqueName(names, tempName) vmess = {} vmess["name"] = name vmess["type"] = scheme vmess["server"] = values["add"] vmess["port"] = values["port"] vmess["uuid"] = values["id"] alterId = values.get("aid") if alterId is not None: vmess["alterId"] = alterId else: vmess["alterId"] = 0 vmess["udp"] = True vmess["xudp"] = True vmess["tls"] = False vmess["skip-cert-verify"] = False vmess["cipher"] = "auto" cipher = get(values.get("scy")) if cipher != "": vmess["cipher"] = cipher sni = get(values.get("sni")) if sni != "": vmess["servername"] = sni network = get(values.get("net")).lower() if values.get("type") == "http": network = "http" elif network == "http": network = "h2" vmess["network"] = network tls = values.get("tls") if tls is not None: tls = str(tls).lower() if tls.endswith("tls"): vmess["tls"] = True alpn = values.get("alpn") if alpn is not None and alpn != "": vmess["alpn"] = alpn.split(",") if network == "http": headers = {} httpOpts = {} host = get(values.get("host")) if host != "": headers["Host"] = host httpOpts["path"] = "/" path = get(values.get("path")) if path != "": httpOpts["path"] = path httpOpts["headers"] = headers vmess["http-opts"] = httpOpts elif network == "h2": headers = {} h2Opts = {} host = get(values.get("host")) if host != "": headers["Host"] = host h2Opts["path"] = get(values.get("path")) h2Opts["headers"] = headers vmess["h2-opts"] = h2Opts elif network == "ws": headers = {} wsOpts = {} wsOpts["path"] = "/" host = get(values.get("host")) if host != "": headers["Host"] = host path = get(values.get("path")) if path != "": wsOpts["path"] = path wsOpts["headers"] = headers vmess["ws-opts"] = wsOpts elif network == "grpc": grpcOpts = {} grpcOpts["grpc-service-name"] = get(values.get("path")) vmess["grpc-opts"] = grpcOpts proxies.append(vmess) # ss and ssr still WIP elif scheme == "ss": try: urlSS = urlparse.urlparse(line) except: continue name = uniqueName(names, urlparse.unquote(urlSS.fragment)) port = urlSS.port if port == "": try:
dcBuf = base64RawStdDecode(urlSS.hostname)
4
2023-12-06 12:57:11+00:00
4k
Opt-Mucca/PySCIPOpt-ML
src/pyscipopt_ml/lightgbm/lgbgetter.py
[ { "identifier": "NoModel", "path": "src/pyscipopt_ml/exceptions.py", "snippet": "class NoModel(Exception):\n \"\"\"No model is known for some structure.\"\"\"\n\n def __init__(self, predictor, reason):\n if not isinstance(predictor, str):\n predictor = type(predictor).__name__\n super().__init__(f\"Can't do model for {predictor}: {reason}\")" }, { "identifier": "NoSolution", "path": "src/pyscipopt_ml/exceptions.py", "snippet": "class NoSolution(Exception):\n \"\"\"SCIP doesn't have a solution.\"\"\"\n\n def __init__(self):\n super().__init__(\"No solution available\")" }, { "identifier": "ParameterError", "path": "src/pyscipopt_ml/exceptions.py", "snippet": "class ParameterError(Exception):\n \"\"\"Wrong parameter to a function.\"\"\"\n\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "AbstractPredictorConstr", "path": "src/pyscipopt_ml/modelling/base_predictor_constraint.py", "snippet": "class AbstractPredictorConstr(ABC):\n \"\"\"Base class to store all information of embedded ML model by :py:func`pyscipopt_ml.add_predictor_constr`.\n\n This class is the base class to store everything that is added to\n a SCIP model when a trained predictor is inserted into it. Depending on\n the type of the predictor, a class derived from it will be returned\n by :py:func:`pyscipopt_ml.add_predictor_constr`.\n\n Warning\n -------\n\n Users should usually never construct objects of this class or one of its derived\n classes. They are returned by the :py:func:`pyscipopt_ml.add_predictor_constr` and\n other functions.\n \"\"\"\n\n def __init__(\n self, scip_model, input_vars, output_vars=None, unique_naming_prefix=\"\", **kwargs\n ):\n self.scip_model = scip_model\n self.unique_naming_prefix = unique_naming_prefix\n self._validate(input_vars, output_vars)\n self._created_vars = []\n self._created_cons = []\n self._build_predictor_model(**kwargs)\n\n def _validate(self, input_vars, output_vars=None):\n \"\"\"Validate input and output variables (check shapes, reshape if needed).\"\"\"\n\n # Ensure the correct type of input and output is given\n if type(input_vars) not in [list, np.ndarray]:\n raise ParameterError(\n f\"Input variables are not type list or np.ndarray. They are type {type(input_vars)}.\"\n )\n if output_vars is not None:\n if not isinstance(output_vars, list) and not isinstance(output_vars, np.ndarray):\n raise ParameterError(\n f\"Output variables are not type list or np.ndarray. They are type {type(output_vars)}.\"\n )\n\n # Transform the type list to type np.ndarray\n if isinstance(input_vars, list):\n input_vars = np.array(input_vars, dtype=object)\n if isinstance(output_vars, list):\n output_vars = np.array(output_vars, dtype=object)\n\n # Change the dimension of the input variables if needed. (Always want number of data points first)\n if input_vars.ndim == 1:\n input_vars = input_vars.reshape((1, -1))\n if input_vars.ndim >= 3:\n input_vars = input_vars.reshape((input_vars.shape[0], -1))\n\n # In the case of the output being None, create the appropriate output variables here\n if output_vars is None:\n output_vars = self._create_output_vars(input_vars)\n\n # Change the dimensions of the output variables if needed (Always want the number of data points first)\n if output_vars.ndim == 1:\n if input_vars.shape[0] == 1:\n output_vars = output_vars.reshape((1, -1))\n else:\n output_vars = output_vars.reshape((-1, 1))\n\n # Ensure that the variable dimensions match that of the predictor\n if hasattr(self, \"input_size\") and input_vars.shape[-1] != self.input_size:\n raise ParameterError(\n f\"Input variables dimension don't conform with predictor {type(self)} \"\n + f\"Input variable dimensions: {input_vars.shape[-1]} != {self.input_size}\"\n )\n\n if hasattr(self, \"output_size\") and output_vars.shape[-1] != self.output_size:\n raise ParameterError(\n f\"Output variable dimensions don't conform with predictor {type(self)} \"\n + f\"Output variable dimensions: {output_vars.shape[-1]} != {self.output_size}\"\n )\n\n if output_vars.shape[0] != input_vars.shape[0]:\n raise ParameterError(\n \"Non-conforming dimension between input variables and output variables: \"\n + f\"{output_vars.shape[0]} != {input_vars.shape[0]}\"\n )\n\n self._input = input_vars\n self._output = output_vars\n\n def _build_predictor_model(self, **kwargs):\n self._mip_model(**kwargs)\n\n def print_stats(self, file=None):\n \"\"\"Print statistics on model additions stored by this class.\n\n This function prints detailed statistics on the variables\n and constraints that were added to the model.\n\n Arguments\n ---------\n\n file: None, optional\n Text stream to which output should be redirected. By default, this is sys.stdout.\n \"\"\"\n\n n_indicator_cons = 0\n n_sos_cons = 0\n n_linear_cons = 0\n\n created_cons = self._created_cons\n created_vars = self._created_vars\n if hasattr(self, \"_estimators\"):\n for estimator in self._estimators:\n created_cons += estimator._created_cons\n created_vars += estimator._created_vars\n if hasattr(self, \"_layers\"):\n for layer in self._layers:\n created_cons += layer._created_cons\n created_vars += layer._created_vars\n for cons_set in created_cons:\n it = np.nditer(cons_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(cons_set[it.multi_index], Constraint):\n cons_type = cons_set[it.multi_index].getConshdlrName()\n if cons_type == \"indicator\":\n n_indicator_cons += 1\n elif cons_type == \"SOS1\":\n n_sos_cons += 1\n elif cons_type == \"linear\":\n n_linear_cons += 1\n else:\n raise TypeError(\n f\"Cons {cons_set[it.multi_index]} is of unknown type {cons_type}\"\n )\n\n n_bin_vars = 0\n n_cont_vars = 0\n\n for var_set in created_vars:\n it = np.nditer(var_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(var_set[it.multi_index], Variable):\n var_type = var_set[it.multi_index].vtype()\n if var_type == \"BINARY\":\n n_bin_vars += 1\n elif var_type == \"CONTINUOUS\":\n n_cont_vars += 1\n else:\n raise TypeError(\n f\"Var {var_set[it.multi_index]} is of unknown type {var_type}\"\n )\n\n print(\n f\"Constraints created:\\n Linear {n_linear_cons}\\n Indicator {n_indicator_cons}\\n \"\n f\"SOS1 {n_sos_cons}\\n\"\n f\"Created (internal) variables:\\n Binary {n_bin_vars}\\n Continuous {n_cont_vars}\\n\"\n f\"Input Shape: {self.input.shape}\\nOutput Shape: {self.output.shape}\",\n file=file,\n )\n\n def _create_output_vars(self, input_vars):\n \"\"\"May be defined in derived class to create the output variables of predictor.\"\"\"\n if (not hasattr(self, \"_output\") or self._output is None) and (\n not hasattr(self, \"output_size\") or self.output_size is None\n ):\n raise AttributeError\n\n if not hasattr(self, \"_output\") or self._output is None:\n if hasattr(self, \"classification\"):\n if self.classification:\n vtype = \"B\"\n else:\n vtype = \"C\"\n else:\n vtype = \"C\"\n output_vars = create_vars(\n self.scip_model,\n (input_vars.shape[0], self.output_size),\n vtype,\n lb=None,\n ub=None,\n name_prefix=\"out\",\n )\n return output_vars\n else:\n return self._output\n\n @property\n def _has_solution(self):\n \"\"\"Returns true if we have a solution.\"\"\"\n if self.scip_model.getNSols() > 0:\n return True\n return False\n\n @abstractmethod\n def get_error(self, eps):\n \"\"\"Returns error in SCIP's solution with respect to prediction from input.\n\n Returns\n -------\n error : ndarray of same shape as\n :py:attr:`pyscipopt_ml.modelling.base_predictor_constr.AbstractPredictorConstr.output`\n Assuming that we have a solution for the input and output variables\n `x, y`. Returns the absolute value of the differences between `predictor.predict(x)` and\n `y`. Where predictor is the regression / classification model represented by this object.\n\n Raises\n ------\n NoSolution\n If the SCIP model has no solution (either was not optimized or is infeasible).\n \"\"\"\n ...\n\n @abstractmethod\n def _mip_model(self, **kwargs):\n \"\"\"Makes MIP model for the predictor.\"\"\"\n ...\n\n @property\n def input(self):\n \"\"\"Returns the input variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._input\n\n @property\n def output(self):\n \"\"\"Output variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._output\n\n @property\n def input_values(self):\n \"\"\"Returns the values for the input variables if a solution is known.\n\n Returns\n -------\n input_vals : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n input_vals = np.zeros(self.input.shape)\n for i in range(self.input.shape[0]):\n for j in range(self.input.shape[1]):\n input_vals[i][j] = self.scip_model.getVal(self.input[i][j])\n\n return input_vals\n\n @property\n def output_values(self):\n \"\"\"Returns the values for the output variables if a solution is known.\n\n Returns\n -------\n output_value : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n output_vals = np.zeros(self.output.shape)\n for i in range(self.output.shape[0]):\n for j in range(self.output.shape[1]):\n output_vals[i][j] = self.scip_model.getVal(self.output[i][j])\n\n return output_vals\n\n def __str__(self):\n return self._name" }, { "identifier": "create_vars", "path": "src/pyscipopt_ml/modelling/var_utils.py", "snippet": "def create_vars(scip_model, shape, vtype, lb=None, ub=None, name_prefix=\"\"):\n \"\"\"\n Create PySCIPOpt variables in a numpy.ndarray of a given shape.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n shape : tuple\n The shape of the numpy array that will be constructed\n vtype : 'C' | 'B' | 'I'\n Whether the variables will be continuous, binary, or integer\n lb : float or int or None, optional\n The lower bound of the variables\n ub : float or int or None, optional\n The upper bound of the variables\n name_prefix : str, optional\n The naming prefix used for these variables\n\n Returns\n -------\n scip_vars : np.ndarray\n A np.ndarray with shape (shape) that contains uniquely names variables all of which are the specified type\n \"\"\"\n\n scip_vars = np.zeros(shape, dtype=object)\n it = np.nditer(scip_vars, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n idx_list = str(it.multi_index).strip(\")\").strip(\"(\").split(\",\")\n idx_string = \"\"\n for idx in idx_list:\n if idx == \"\":\n continue\n idx_string += f\"_{int(idx)}\"\n name = name_prefix + idx_string\n scip_vars[it.multi_index] = scip_model.addVar(vtype=vtype, lb=lb, ub=ub, name=name)\n return scip_vars" } ]
import numpy as np from sklearn.base import is_classifier from ..exceptions import NoModel, NoSolution, ParameterError from ..modelling import AbstractPredictorConstr from ..modelling.var_utils import create_vars
3,249
"""Implements some utility tools for all lightgbm objects.""" class LGBgetter(AbstractPredictorConstr): """Utility class for lightgbm models convertors. Implement some common functionalities: check predictor is fitted, output dimension, get error Attributes ---------- predictor Lightgbm predictor embedded into SCIP model. """ def __init__(self, predictor, input_vars, output_type="regular", **kwargs): if not hasattr(predictor, "booster_"):
"""Implements some utility tools for all lightgbm objects.""" class LGBgetter(AbstractPredictorConstr): """Utility class for lightgbm models convertors. Implement some common functionalities: check predictor is fitted, output dimension, get error Attributes ---------- predictor Lightgbm predictor embedded into SCIP model. """ def __init__(self, predictor, input_vars, output_type="regular", **kwargs): if not hasattr(predictor, "booster_"):
raise ParameterError(
2
2023-12-10 20:28:22+00:00
4k
DongqiShen/qwen-fast
eval.py
[ { "identifier": "LLaMA", "path": "model.py", "snippet": "def find_multiple(n: int, k: int) -> int:\n def __post_init__(self):\n def from_name(cls, name: str):\n def __init__(self, max_batch_size, max_seq_length, n_heads, head_dim, dtype=torch.bfloat16):\n def update(self, input_pos, k_val, v_val):\n def __init__(self, config: ModelArgs) -> None:\n def setup_caches(self, max_batch_size, max_seq_length):\n def forward(self, idx: Tensor, input_pos: Optional[Tensor] = None) -> Tensor:\n def from_name(cls, name: str):\n def __init__(self, config: ModelArgs) -> None:\n def forward(self, x: Tensor, input_pos: Tensor, freqs_cis: Tensor, mask: Tensor) -> Tensor:\n def __init__(self, config: ModelArgs):\n def load_hook(self, state_dict, prefix, *args):\n def forward(self, x: Tensor, freqs_cis: Tensor, mask: Tensor, input_pos: Optional[Tensor] = None) -> Tensor:\n def __init__(self, config: ModelArgs) -> None:\n def forward(self, x: Tensor) -> Tensor:\n def __init__(self, dim: int, eps: float = 1e-5):\n def _norm(self, x):\n def forward(self, x: Tensor) -> Tensor:\ndef precompute_freqs_cis(\n seq_len: int, n_elem: int, base: int = 10000\n) -> Tensor:\ndef apply_rotary_emb(x: Tensor, freqs_cis: Tensor) -> Tensor:\nclass ModelArgs:\nclass KVCache(nn.Module):\nclass Transformer(nn.Module):\nclass TransformerBlock(nn.Module):\nclass Attention(nn.Module):\nclass FeedForward(nn.Module):\nclass RMSNorm(nn.Module):" }, { "identifier": "_load_model", "path": "generate.py", "snippet": "def _load_model(checkpoint_path, device, precision, use_tp):\n with torch.device('meta'):\n model = Transformer.from_name(checkpoint_path.parent.name)\n\n if \"int8\" in str(checkpoint_path):\n print(\"Using int8 weight-only quantization!\")\n from quantize import WeightOnlyInt8QuantHandler\n simple_quantizer = WeightOnlyInt8QuantHandler(model)\n model = simple_quantizer.convert_for_runtime()\n\n if \"int4\" in str(checkpoint_path):\n print(\"Using int4 quantization!\")\n path_comps = checkpoint_path.name.split(\".\")\n assert path_comps[-2].startswith(\"g\")\n groupsize = int(path_comps[-2][1:])\n from quantize import WeightOnlyInt4QuantHandler\n simple_quantizer = WeightOnlyInt4QuantHandler(model, groupsize)\n model = simple_quantizer.convert_for_runtime()\n\n checkpoint = torch.load(str(checkpoint_path), mmap=True, weights_only=True)\n model.load_state_dict(checkpoint, assign=True)\n\n if use_tp:\n from tp import apply_tp\n print(\"Applying tensor parallel to model ...\")\n apply_tp(model)\n\n model = model.to(device=device, dtype=precision)\n return model.eval()" }, { "identifier": "encode_tokens", "path": "generate.py", "snippet": "def encode_tokens(tokenizer, string, bos=True, device='cuda'):\n tokens = tokenizer.encode(string)\n if bos:\n tokens = [tokenizer.bos_id()] + tokens\n return torch.tensor(tokens, dtype=torch.int, device=device)" }, { "identifier": "model_forward", "path": "generate.py", "snippet": "def model_forward(model, x, input_pos):\n return model(x, input_pos)" } ]
import sys import time import torch import torch._inductor.config import torch._dynamo.config import os import sys import main as lm_evaluation_harness_main import lm_eval import argparse from pathlib import Path from typing import Optional from model import LLaMA from sentencepiece import SentencePieceProcessor from generate import ( _load_model, encode_tokens, model_forward, )
1,860
torch._dynamo.config.automatic_dynamic_shapes = True torch._inductor.config.triton.unique_kernel_names = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.triton.cudagraphs = True torch._dynamo.config.cache_size_limit = 100000 # support running without installing as a package wd = Path(__file__).parent.parent.resolve() sys.path.append(str(wd)) # hacky path setup for lm-evaluation-harness lm_evaluation_harness_path = '/'.join( os.getcwd().split('/')[:-1] + ['lm-evaluation-harness']) sys.path.insert(0, lm_evaluation_harness_path) def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( model: LLaMA, prompt: torch.Tensor, max_new_tokens: int, max_seq_length: Optional[int] = None, ): """ Sets up model cache and does some bookkeeping calculations for prompt, input_pos and max_seq_length that are needed for prefill or model_forward Args: model (LLaMA): The model whose cache gets set up prompt (torch.Tensor): Tensor of shape (T) with indices of the prompt sequence. max_new_tokens (int): The desired maximum number of new tokens that can be generated. max_seq_length (Optional[int], optional): The maximum sequence length allowed. Returns: seq (torch.Tensor): prompt but padded with zeros to size max_seq_length input_pos (torch.Tensor): tensor of integers in increasing order max_seq_length (int): The maximum sequence length allowed, updated based on other numbers """ T = prompt.size(0) T_new = T + max_new_tokens if max_seq_length is None: max_seq_length = min(T_new, model.config.block_size) device, dtype = prompt.device, prompt.dtype # create an empty tensor of the expected final shape and fill in the current tokens empty = torch.empty(T_new, dtype=dtype, device=device) empty[:T] = prompt seq = empty input_pos = torch.arange(0, T, device=device) with torch.device(device): model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length) return seq, input_pos, max_seq_length class SimpleGPTEvalWrapper(lm_eval.base.BaseLM): """ A wrapper class for SimpleGPT, providing integration with the lm-evaluation-harness library. """ def __init__( self, model: LLaMA, tokenizer, max_seq_length: Optional[int]=None, ): super().__init__() self._model = model self._tokenizer = tokenizer self._device = torch.device('cuda') self._max_seq_length = 2048 if max_seq_length is None else max_seq_length @property def eot_token_id(self): return self._tokenizer.eos_id() @property def max_length(self): return self._max_seq_length @property def max_gen_toks(self): return 50 @property def batch_size(self): return 1 @property def device(self): return self._device def tok_encode(self, string: str): encoded = encode_tokens(self._tokenizer, string, bos=True, eos=False, device=self._device) # encoded is a pytorch tensor, but some internal logic in the # eval harness expects it to be a list instead # TODO: verify this for multi-batch as well encoded = encoded.tolist() return encoded def tok_decode(self, tokens): decoded = self._tokenizer.decode(tokens) return decoded def _model_call(self, inps): # TODO: make batches work inps = inps.squeeze(0) max_new_tokens = 1 seq, input_pos, max_seq_length = \ setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( self._model, inps, max_new_tokens, self.max_length, ) x = seq.index_select(0, input_pos).view(1, -1)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. torch._dynamo.config.automatic_dynamic_shapes = True torch._inductor.config.triton.unique_kernel_names = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.triton.cudagraphs = True torch._dynamo.config.cache_size_limit = 100000 # support running without installing as a package wd = Path(__file__).parent.parent.resolve() sys.path.append(str(wd)) # hacky path setup for lm-evaluation-harness lm_evaluation_harness_path = '/'.join( os.getcwd().split('/')[:-1] + ['lm-evaluation-harness']) sys.path.insert(0, lm_evaluation_harness_path) def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( model: LLaMA, prompt: torch.Tensor, max_new_tokens: int, max_seq_length: Optional[int] = None, ): """ Sets up model cache and does some bookkeeping calculations for prompt, input_pos and max_seq_length that are needed for prefill or model_forward Args: model (LLaMA): The model whose cache gets set up prompt (torch.Tensor): Tensor of shape (T) with indices of the prompt sequence. max_new_tokens (int): The desired maximum number of new tokens that can be generated. max_seq_length (Optional[int], optional): The maximum sequence length allowed. Returns: seq (torch.Tensor): prompt but padded with zeros to size max_seq_length input_pos (torch.Tensor): tensor of integers in increasing order max_seq_length (int): The maximum sequence length allowed, updated based on other numbers """ T = prompt.size(0) T_new = T + max_new_tokens if max_seq_length is None: max_seq_length = min(T_new, model.config.block_size) device, dtype = prompt.device, prompt.dtype # create an empty tensor of the expected final shape and fill in the current tokens empty = torch.empty(T_new, dtype=dtype, device=device) empty[:T] = prompt seq = empty input_pos = torch.arange(0, T, device=device) with torch.device(device): model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length) return seq, input_pos, max_seq_length class SimpleGPTEvalWrapper(lm_eval.base.BaseLM): """ A wrapper class for SimpleGPT, providing integration with the lm-evaluation-harness library. """ def __init__( self, model: LLaMA, tokenizer, max_seq_length: Optional[int]=None, ): super().__init__() self._model = model self._tokenizer = tokenizer self._device = torch.device('cuda') self._max_seq_length = 2048 if max_seq_length is None else max_seq_length @property def eot_token_id(self): return self._tokenizer.eos_id() @property def max_length(self): return self._max_seq_length @property def max_gen_toks(self): return 50 @property def batch_size(self): return 1 @property def device(self): return self._device def tok_encode(self, string: str): encoded = encode_tokens(self._tokenizer, string, bos=True, eos=False, device=self._device) # encoded is a pytorch tensor, but some internal logic in the # eval harness expects it to be a list instead # TODO: verify this for multi-batch as well encoded = encoded.tolist() return encoded def tok_decode(self, tokens): decoded = self._tokenizer.decode(tokens) return decoded def _model_call(self, inps): # TODO: make batches work inps = inps.squeeze(0) max_new_tokens = 1 seq, input_pos, max_seq_length = \ setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( self._model, inps, max_new_tokens, self.max_length, ) x = seq.index_select(0, input_pos).view(1, -1)
logits = model_forward(self._model, x, input_pos)
3
2023-12-05 14:07:19+00:00
4k
Yanyutin753/CowAndPandoraNext
bot/linkai/link_ai_bot.py
[ { "identifier": "Bot", "path": "bot/bot.py", "snippet": "class Bot(object):\n def reply(self, query, context: Context = None) -> Reply:\n \"\"\"\n bot auto-reply content\n :param req: received message\n :return: reply content\n \"\"\"\n raise NotImplementedError" }, { "identifier": "ChatGPTSession", "path": "bot/chatgpt/chat_gpt_session.py", "snippet": "class ChatGPTSession(Session):\n def __init__(self, session_id, system_prompt=None, model=\"gpt-3.5-turbo\"):\n super().__init__(session_id, system_prompt)\n self.model = model\n self.reset()\n\n def discard_exceeding(self, max_tokens, cur_tokens=None):\n precise = True\n try:\n cur_tokens = self.calc_tokens()\n except Exception as e:\n precise = False\n if cur_tokens is None:\n raise e\n logger.debug(\"Exception when counting tokens precisely for query: {}\".format(e))\n while cur_tokens > max_tokens:\n if len(self.messages) > 2:\n self.messages.pop(1)\n elif len(self.messages) == 2 and self.messages[1][\"role\"] == \"assistant\":\n self.messages.pop(1)\n if precise:\n cur_tokens = self.calc_tokens()\n else:\n cur_tokens = cur_tokens - max_tokens\n break\n elif len(self.messages) == 2 and self.messages[1][\"role\"] == \"user\":\n logger.warn(\"user message exceed max_tokens. total_tokens={}\".format(cur_tokens))\n break\n else:\n logger.debug(\"max_tokens={}, total_tokens={}, len(messages)={}\".format(max_tokens, cur_tokens, len(self.messages)))\n break\n if precise:\n cur_tokens = self.calc_tokens()\n else:\n cur_tokens = cur_tokens - max_tokens\n return cur_tokens\n\n def calc_tokens(self):\n return num_tokens_from_messages(self.messages, self.model)" }, { "identifier": "OpenAIImage", "path": "bot/openai/open_ai_image.py", "snippet": "class OpenAIImage(object):\n def __init__(self):\n openai.api_key = conf().get(\"open_ai_api_key\")\n if conf().get(\"rate_limit_dalle\"):\n self.tb4dalle = TokenBucket(conf().get(\"rate_limit_dalle\", 50))\n\n def create_img(self, query, retry_count=0, api_key=None):\n try:\n if conf().get(\"rate_limit_dalle\") and not self.tb4dalle.get_token():\n return False, \"请求太快了,请休息一下再问我吧\"\n logger.info(\"[OPEN_AI] image_query={}\".format(query))\n response = openai.Image.create(\n api_key=api_key,\n prompt=query, # 图片描述\n n=1, # 每次生成图片的数量\n size=conf().get(\"image_create_size\", \"256x256\"), # 图片大小,可选有 256x256, 512x512, 1024x1024\n )\n image_url = response[\"data\"][0][\"url\"]\n logger.info(\"[OPEN_AI] image_url={}\".format(image_url))\n return True, image_url\n except openai.error.RateLimitError as e:\n logger.warn(e)\n if retry_count < 1:\n time.sleep(5)\n logger.warn(\"[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试\".format(retry_count + 1))\n return self.create_img(query, retry_count + 1)\n else:\n return False, \"提问太快啦,请休息一下再问我吧\"\n except Exception as e:\n logger.exception(e)\n return False, str(e)" }, { "identifier": "SessionManager", "path": "bot/session_manager.py", "snippet": "class SessionManager(object):\n def __init__(self, sessioncls, **session_args):\n if conf().get(\"expires_in_seconds\"):\n sessions = ExpiredDict(conf().get(\"expires_in_seconds\"))\n else:\n sessions = dict()\n self.sessions = sessions\n self.sessioncls = sessioncls\n self.session_args = session_args\n\n def build_session(self, session_id, system_prompt=None):\n \"\"\"\n 如果session_id不在sessions中,创建一个新的session并添加到sessions中\n 如果system_prompt不会空,会更新session的system_prompt并重置session\n \"\"\"\n if session_id is None:\n return self.sessioncls(session_id, system_prompt, **self.session_args)\n\n if session_id not in self.sessions:\n self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)\n elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session\n self.sessions[session_id].set_system_prompt(system_prompt)\n session = self.sessions[session_id]\n return session\n\n def session_query(self, query, session_id):\n session = self.build_session(session_id)\n session.add_query(query)\n try:\n max_tokens = conf().get(\"conversation_max_tokens\", 1000)\n total_tokens = session.discard_exceeding(max_tokens, None)\n logger.debug(\"prompt tokens used={}\".format(total_tokens))\n except Exception as e:\n logger.debug(\"Exception when counting tokens precisely for prompt: {}\".format(str(e)))\n return session\n\n def session_reply(self, reply, session_id, total_tokens=None):\n session = self.build_session(session_id)\n session.add_reply(reply)\n try:\n max_tokens = conf().get(\"conversation_max_tokens\", 1000)\n tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)\n logger.debug(\"raw total_tokens={}, savesession tokens={}\".format(total_tokens, tokens_cnt))\n except Exception as e:\n logger.debug(\"Exception when counting tokens precisely for session: {}\".format(str(e)))\n return session\n\n def clear_session(self, session_id):\n if session_id in self.sessions:\n del self.sessions[session_id]\n\n def clear_all_session(self):\n self.sessions.clear()" }, { "identifier": "Context", "path": "bridge/context.py", "snippet": "class Context:\n def __init__(self, type: ContextType = None, content=None, kwargs=dict()):\n self.type = type\n self.content = content\n self.kwargs = kwargs\n\n def __contains__(self, key):\n if key == \"type\":\n return self.type is not None\n elif key == \"content\":\n return self.content is not None\n else:\n return key in self.kwargs\n\n def __getitem__(self, key):\n if key == \"type\":\n return self.type\n elif key == \"content\":\n return self.content\n else:\n return self.kwargs[key]\n\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n\n def __setitem__(self, key, value):\n if key == \"type\":\n self.type = value\n elif key == \"content\":\n self.content = value\n else:\n self.kwargs[key] = value\n\n def __delitem__(self, key):\n if key == \"type\":\n self.type = None\n elif key == \"content\":\n self.content = None\n else:\n del self.kwargs[key]\n\n def __str__(self):\n return \"Context(type={}, content={}, kwargs={})\".format(self.type, self.content, self.kwargs)" }, { "identifier": "ContextType", "path": "bridge/context.py", "snippet": "class ContextType(Enum):\n TEXT = 1 # 文本消息\n VOICE = 2 # 音频消息\n IMAGE = 3 # 图片消息\n IMAGE_CREATE = 10 # 创建图片命令\n JOIN_GROUP = 20 # 加入群聊\n PATPAT = 21 # 拍了拍\n\n def __str__(self):\n return self.name" }, { "identifier": "Reply", "path": "bridge/reply.py", "snippet": "class Reply:\n def __init__(self, type: ReplyType = None, content=None):\n self.type = type\n self.content = content\n\n def __str__(self):\n return \"Reply(type={}, content={})\".format(self.type, self.content)" }, { "identifier": "ReplyType", "path": "bridge/reply.py", "snippet": "class ReplyType(Enum):\n TEXT = 1 # 文本\n VOICE = 2 # 音频文件\n IMAGE = 3 # 图片文件\n IMAGE_URL = 4 # 图片URL\n\n INFO = 9\n ERROR = 10\n\n def __str__(self):\n return self.name" }, { "identifier": "logger", "path": "common/log.py", "snippet": "def _reset_logger(log):\ndef _get_logger():" }, { "identifier": "conf", "path": "config.py", "snippet": "def conf():\n return config" } ]
import time import requests from bot.bot import Bot from bot.chatgpt.chat_gpt_session import ChatGPTSession from bot.openai.open_ai_image import OpenAIImage from bot.session_manager import SessionManager from bridge.context import Context, ContextType from bridge.reply import Reply, ReplyType from common.log import logger from config import conf
2,223
# access LinkAI knowledge base platform # docs: https://link-ai.tech/platform/link-app/wechat class LinkAIBot(Bot, OpenAIImage): # authentication failed AUTH_FAILED_CODE = 401 NO_QUOTA_CODE = 406 def __init__(self): super().__init__()
# access LinkAI knowledge base platform # docs: https://link-ai.tech/platform/link-app/wechat class LinkAIBot(Bot, OpenAIImage): # authentication failed AUTH_FAILED_CODE = 401 NO_QUOTA_CODE = 406 def __init__(self): super().__init__()
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
9
2023-12-14 15:21:17+00:00
4k
nerdslab/bams
fruit_flies.py
[ { "identifier": "KeypointsDataset", "path": "bams/data/dataset.py", "snippet": "class KeypointsDataset(Dataset):\n r\"\"\"Simplified dataset for cases where the data is keypoints only (usually obtained\n using vision-based pose estimation and tracking methods). \n \n The state is defined by the keypoints while the action will be computed as the \n difference between consecutive keypoints.\n\n Caching is possible if you need to avoid processing the data every time you run\n the script. The cache file will be saved in `cache_path` and will be loaded if\n `cache` is set to True. Be careful when using the cache, as it will not be updated\n if the data changes. Only use once the data processing pipeline is finalized. \n Deleteing the cache file will force the data to be processed again.\n\n\n Args:\n keypoints (np.ndarray): Array of shape (num_sequences, sequence_len,\n num_feats). Use np.nan for missing values or padding frames.\n hoa_bins (int): Number of bins for the histograms of actions.\n hoa_window (int): Window size for the histograms of actions.\n cache_path (str): Path to the cache file.\n cache (bool): Whether to use the cache file.\n \"\"\"\n\n def __init__(\n self,\n keypoints,\n **kwargs,\n ):\n self.keypoints = keypoints\n input_feats, target_feats, ignore_frames = self.keypoints_to_feats(keypoints)\n\n super().__init__(input_feats, target_feats, ignore_frames, **kwargs)\n\n def keypoints_to_feats(self, keypoints):\n # sometimes there are missing frames\n # find frames where any features might be missing\n ignore_frames = np.any(np.isnan(self.keypoints), axis=-1)\n # replace nan values with zeros\n keypoints = np.nan_to_num(self.keypoints)\n\n # define states and derive actions\n # action[t] = state[t] - state[t-1]\n states = keypoints\n actions = diff(states, axis=1, h=1, padding=\"edge\")\n\n input_feats = np.concatenate([states, actions], axis=-1)\n target_feats = actions\n return input_feats, target_feats, ignore_frames" }, { "identifier": "BAMS", "path": "bams/models/bams.py", "snippet": "class BAMS(nn.Module):\n r\"\"\"BAMS model.\n\n Args:\n input_size (int): Number of input features.\n predictor (dict): Parameters for the predictor MLP.\n encoders (dict[dict]): A dictionnary of encoders, where each key is the name of\n the encoder, and each value is a dictionnary of parameters for the encoder.\n Each encoder is a TemporalConvNet.\n \"\"\"\n\n def __init__(\n self,\n input_size,\n *,\n predictor=None,\n **encoder_kwargs,\n ):\n super().__init__()\n\n self.input_size = input_size\n self.representation_size = 0\n\n encoders = dict()\n for name, tcn_kwargs in encoder_kwargs.items():\n assert \"num_inputs\" not in tcn_kwargs\n encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)\n self.representation_size += tcn_kwargs[\"num_channels\"][-1]\n\n self.encoders = torch.nn.ModuleDict(encoders)\n\n # hoa predictor (first layer is a lazy linear layer)\n self.predictor = MLP(**predictor)\n\n # byol predictors\n byol_predictors = dict()\n for name, tcn_kwargs in encoder_kwargs.items():\n emb_dim = tcn_kwargs[\"num_channels\"][-1]\n byol_predictors[name] = nn.Sequential(\n nn.Linear(emb_dim, emb_dim * 4, bias=False),\n nn.BatchNorm1d(emb_dim * 4, eps=1e-5, momentum=0.1),\n nn.ReLU(inplace=True),\n nn.Linear(emb_dim * 4, emb_dim, bias=True),\n )\n self.byol_predictors = torch.nn.ModuleDict(byol_predictors)\n\n def forward(self, x):\n # input shape: (B: batch_size, L:sequence_length, N: num_feats)\n # forward through TCNs\n embs = OrderedDict()\n byol_preds = OrderedDict()\n for name, encoder in self.encoders.items():\n embs[name] = encoder(x) # (B, L, N)\n flattened_emb = embs[name].flatten(0, 1) # (B*L, N)\n pred_emb = self.byol_predictors[name](flattened_emb)\n byol_preds[name] = pred_emb.reshape(embs[name].shape)\n\n # concatenate embeddings\n h = torch.cat(list(embs.values()), dim=2) # (B, L, N)\n\n # concatenate input and embeddings\n hx = torch.cat([h, x], dim=2)\n # prediction\n hoa_pred = self.predictor(hx)\n return embs, hoa_pred, byol_preds\n\n def __repr__(self) -> str:\n args = [\n f\" {name}: {encoder.__class__.__name__}\"\n f\" (receptive field: {encoder.receptive_field},\"\n f\" feature dim: {encoder.feat_dim})\"\n for name, encoder in self.encoders.items()\n ]\n args.append(\n f\" predictor: {self.predictor.__class__.__name__}\"\n f\" (input size: {self.input_size},\"\n f\" output size: {self.predictor.out_dim})\"\n )\n return \"{}([\\n{}\\n])\".format(self.__class__.__name__, \",\\n\".join(args))" }, { "identifier": "HoALoss", "path": "bams/hoa_loss.py", "snippet": "class HoALoss(nn.Module):\n def __init__(self, hoa_bins=32, skip_frames=60):\n super().__init__()\n\n self.hoa_bins = hoa_bins\n self.skip_frames = skip_frames\n\n def forward(self, target, pred, ignore_weights=None):\n r\"\"\"\n target: (B, L, N)\n pred: (B, L, N)\n ignore_weights: (B, L)\"\"\"\n n = target.size(2)\n\n # reshape\n target = target.reshape(-1, self.hoa_bins)\n pred = pred.reshape(-1, self.hoa_bins)\n \n # make each histogram sum to 1\n pred = torch.softmax(pred, dim=1)\n\n # compute EMD using Mallow's distance\n loss = earth_mover_distance(target, pred)\n\n # ignore first `self.skip_frames` frames\n ignore_weights[:, :self.skip_frames] = 1.0\n ignore_weights = ignore_weights.unsqueeze(2).repeat((1, 1, n, 1))\n weights = 1 - ignore_weights.view(-1)\n loss = torch.sum(loss * weights) / torch.sum(weights)\n return loss" } ]
import os import numpy as np import argparse import torch import torch.nn.functional as F from datetime import datetime from torch import optim from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from bams.data import KeypointsDataset from bams.models import BAMS from bams import HoALoss
2,900
os.path.join(path, "fly_group_train.npy"), allow_pickle=True ).item() sequence_ids_train, sequence_data_train = zip(*data_train["sequences"].items()) keypoints_train = np.stack([data["keypoints"] for data in sequence_data_train]) # load submission data (no annoations) data_submission = np.load( os.path.join(path, "fly_group_test.npy"), allow_pickle=True ).item() sequence_ids_submission, sequence_data_submission = zip( *data_submission["sequences"].items() ) keypoints_submission = np.stack( [data["keypoints"] for data in sequence_data_submission] ) # concatenate train and submission data sequence_ids = np.concatenate([sequence_ids_train, sequence_ids_submission], axis=0) keypoints = np.concatenate([keypoints_train, keypoints_submission], axis=0) split_mask = np.ones(len(sequence_ids), dtype=bool) split_mask[-len(sequence_ids_submission) :] = False # treat each fly independently, keep track of which video each fly came from num_samples, sequence_length, num_flies, num_keypoints, _ = keypoints.shape keypoints = keypoints.transpose((0, 2, 1, 3, 4)) keypoints = keypoints.reshape((-1, sequence_length, num_keypoints * 2)) batch = np.repeat(np.arange(num_samples), num_flies) return keypoints, split_mask, batch def train(model, device, loader, optimizer, criterion, writer, step, log_every_step): model.train() for data in tqdm(loader, position=1, leave=False): # todo convert to float input = data["input"].float().to(device) # (B, N, L) target = data["target_hist"].float().to(device) ignore_weights = data["ignore_weights"].to(device) # forward pass optimizer.zero_grad() embs, hoa_pred, byol_preds = model(input) # prediction task hoa_loss = criterion(target, hoa_pred, ignore_weights) # contrastive loss: short term batch_size, sequence_length, emb_dim = embs["short_term"].size() skip_frames, delta = 60, 5 view_1_id = ( torch.randint(sequence_length - skip_frames - delta, (batch_size,)) + skip_frames ) view_2_id = torch.randint(delta + 1, (batch_size,)) + view_1_id view_2_id = torch.clip(view_2_id, 0, sequence_length) view_1 = byol_preds["short_term"][torch.arange(batch_size), view_1_id] view_2 = embs["short_term"][torch.arange(batch_size), view_2_id] byol_loss_short_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # contrastive loss: long term batch_size, sequence_length, emb_dim = embs["long_term"].size() skip_frames = 100 view_1_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_2_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id] view_2 = embs["long_term"][torch.arange(batch_size), view_2_id] byol_loss_long_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # backprop loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term loss.backward() optimizer.step() step += 1 if step % log_every_step == 0: writer.add_scalar("train/hoa_loss", hoa_loss.item(), step) writer.add_scalar( "train/byol_loss_short_term", byol_loss_short_term.item(), step ) writer.add_scalar( "train/byol_loss_long_term", byol_loss_long_term.item(), step ) writer.add_scalar("train/total_loss", loss.item(), step) return step def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_root", type=str, default="./data/mabe") parser.add_argument("--cache_path", type=str, default="./data/mabe/fruit_flies") parser.add_argument("--hoa_bins", type=int, default=32) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--num_workers", type=int, default=16) parser.add_argument("--epochs", type=int, default=500) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--weight_decay", type=float, default=4e-5) parser.add_argument("--log_every_step", type=int, default=50) args = parser.parse_args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # dataset keypoints, split_mask, batch = load_fruit_flies(args.data_root)
def load_fruit_flies(path): # load raw train data (with annotations for 2 tasks) data_train = np.load( os.path.join(path, "fly_group_train.npy"), allow_pickle=True ).item() sequence_ids_train, sequence_data_train = zip(*data_train["sequences"].items()) keypoints_train = np.stack([data["keypoints"] for data in sequence_data_train]) # load submission data (no annoations) data_submission = np.load( os.path.join(path, "fly_group_test.npy"), allow_pickle=True ).item() sequence_ids_submission, sequence_data_submission = zip( *data_submission["sequences"].items() ) keypoints_submission = np.stack( [data["keypoints"] for data in sequence_data_submission] ) # concatenate train and submission data sequence_ids = np.concatenate([sequence_ids_train, sequence_ids_submission], axis=0) keypoints = np.concatenate([keypoints_train, keypoints_submission], axis=0) split_mask = np.ones(len(sequence_ids), dtype=bool) split_mask[-len(sequence_ids_submission) :] = False # treat each fly independently, keep track of which video each fly came from num_samples, sequence_length, num_flies, num_keypoints, _ = keypoints.shape keypoints = keypoints.transpose((0, 2, 1, 3, 4)) keypoints = keypoints.reshape((-1, sequence_length, num_keypoints * 2)) batch = np.repeat(np.arange(num_samples), num_flies) return keypoints, split_mask, batch def train(model, device, loader, optimizer, criterion, writer, step, log_every_step): model.train() for data in tqdm(loader, position=1, leave=False): # todo convert to float input = data["input"].float().to(device) # (B, N, L) target = data["target_hist"].float().to(device) ignore_weights = data["ignore_weights"].to(device) # forward pass optimizer.zero_grad() embs, hoa_pred, byol_preds = model(input) # prediction task hoa_loss = criterion(target, hoa_pred, ignore_weights) # contrastive loss: short term batch_size, sequence_length, emb_dim = embs["short_term"].size() skip_frames, delta = 60, 5 view_1_id = ( torch.randint(sequence_length - skip_frames - delta, (batch_size,)) + skip_frames ) view_2_id = torch.randint(delta + 1, (batch_size,)) + view_1_id view_2_id = torch.clip(view_2_id, 0, sequence_length) view_1 = byol_preds["short_term"][torch.arange(batch_size), view_1_id] view_2 = embs["short_term"][torch.arange(batch_size), view_2_id] byol_loss_short_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # contrastive loss: long term batch_size, sequence_length, emb_dim = embs["long_term"].size() skip_frames = 100 view_1_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_2_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id] view_2 = embs["long_term"][torch.arange(batch_size), view_2_id] byol_loss_long_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # backprop loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term loss.backward() optimizer.step() step += 1 if step % log_every_step == 0: writer.add_scalar("train/hoa_loss", hoa_loss.item(), step) writer.add_scalar( "train/byol_loss_short_term", byol_loss_short_term.item(), step ) writer.add_scalar( "train/byol_loss_long_term", byol_loss_long_term.item(), step ) writer.add_scalar("train/total_loss", loss.item(), step) return step def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_root", type=str, default="./data/mabe") parser.add_argument("--cache_path", type=str, default="./data/mabe/fruit_flies") parser.add_argument("--hoa_bins", type=int, default=32) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--num_workers", type=int, default=16) parser.add_argument("--epochs", type=int, default=500) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--weight_decay", type=float, default=4e-5) parser.add_argument("--log_every_step", type=int, default=50) args = parser.parse_args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # dataset keypoints, split_mask, batch = load_fruit_flies(args.data_root)
dataset = KeypointsDataset(
0
2023-12-05 16:26:57+00:00
4k
FF14CN/Sarean-arsenal
Utility/sqMall/sqMallDoSign.py
[ { "identifier": "Daoyu", "path": "Utility/sdoLogin/Daoyu.py", "snippet": "def dykey_encrypt(self):\ndef config_handler():\ndef initialize():\ndef get_guid(device_id, manuid):\ndef get_flowid(manuid, deviceid, sessionid, show_username):\ndef get_account_id_list(flowid, deviceid, manuid, sessionid, show_username):\ndef make_confirm(account_id, flowid, deviceid, manuid, sessionid, show_username):\ndef get_sub_account_key(flowid, manuid, deviceid, sessionid, show_username):\ndef get_temp_sessionid(main_key):\ndef get_sub_account_session(sub_account_key, temp_account_sessionid):" }, { "identifier": "daoyumall_sign", "path": "Utility/sqMall/daoyuBuildinMallSign.py", "snippet": "def daoyumall_sign(sub_session_id, account_id):\n \"\"\"\n 仅适用于叨鱼内的盛趣商城签到操作 PC端不适用\n :param sub_session_id: 子账号的Daoyukey值\n :param account_id: 子账号的AccountID\n :return: 0: 签到成功 1: 重复签到 2: 签到失败\n \"\"\"\n sign_url = 'https://sqmallservice.u.sdo.com/api/us/integration/checkIn'\n sign_data = {'merchantId': 1}\n sign_header = {\n 'authority': 'sqmallservice.u.sdo.com',\n 'method': 'PUT',\n 'scheme': 'https',\n 'qu-web-host': 'https://m.qu.sdo.com',\n 'qu-hardware-platform': '1',\n 'qu-software-platform': '2',\n 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DaoYu/9.3.3',\n 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'accept': 'application/json, text/javascript, */*; q=0.01',\n 'qu-deploy-platform': '4',\n 'qu-merchant-id': '1',\n 'origin': 'https://m.qu.sdo.com',\n 'x-requested-with': 'com.sdo.sdaccountkey',\n 'sec-fetch-site': 'same-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://m.qu.sdo.com/',\n }\n sign_cookies = {\n 'sessionId': sub_session_id,\n 'direbmemllam': account_id,\n }\n sign_response = requests.put(sign_url, headers=sign_header, cookies=sign_cookies, data=sign_data, verify=False)\n sign_json = sign_response.json()\n if sign_json['resultMsg'] == 'SUCCESS':\n return 0\n elif sign_json['resultMsg'] == '今日已签到,请勿重复签到':\n return 1\n else:\n return 2" }, { "identifier": "daoyu_mall_balance", "path": "Utility/sqMall/daoyuBuildinMallBalance.py", "snippet": "def daoyu_mall_balance(session_id):\n \"\"\"\n 仅适用于叨鱼内部商城的查询签到积分 PC端不适用\n :param session_id: 子账号的Daoyukey值\n :return: 返回签到积分余额\n \"\"\"\n get_balance_url = 'https://sqmallservice.u.sdo.com/api/rs/member/integral/balance?merchantId=1'\n get_balance_header = {\n 'authority': 'sqmallservice.u.sdo.com',\n 'method': 'GET',\n 'scheme': 'https',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'qu-deploy-platform': '4',\n 'accept': 'application/json, text/javascript, */*; q=0.01',\n 'qu-merchant-id': '1',\n 'qu-hardware-platform': '1',\n 'qu-software-platform': '2',\n 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DaoYu/9.3.3',\n 'qu-web-host': 'https://m.qu.sdo.com',\n 'origin': 'https://m.qu.sdo.com',\n 'x-requested-with': 'com.sdo.sdaccountkey',\n 'sec-fetch-site': 'same-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://m.qu.sdo.com/',\n 'accept-encoding': 'gzip, deflate',\n 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',\n }\n get_balance_cookies = {\n 'sessionId': session_id\n }\n get_balance_response = requests.get(get_balance_url, headers=get_balance_header, cookies=get_balance_cookies,\n verify=False)\n get_balance_json = get_balance_response.json()\n balance = get_balance_json['data']['balance']\n return balance" } ]
from Utility.sdoLogin import Daoyu from Utility.sqMall.daoyuBuildinMallSign import daoyumall_sign from Utility.sqMall.daoyuBuildinMallBalance import daoyu_mall_balance import Utility.Notifications.push as pusher
1,786
""" Author: KuliPoi Contact: [email protected] Created: 2023-12-21 File: sqMailDoSign.py Version: 2.5.0 Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY """ def main(): if Daoyu.initialize(): device_id, manuid, main_key, show_username = Daoyu.config_handler() Daoyu.logger_logs.info(f'Get Config File Success,' f'show_username: {show_username}' f'daoyu_key: {Daoyu.dykey_encrypt(main_key)}' f'device_id: {device_id}, ' f'manuid: {manuid}') if main_key != '' and show_username != '': Daoyu.logger_stream.info('读取到了你手动设置的DaoyuKey和ShowUserName') elif main_key == '' or show_username == '': Daoyu.logger_stream.info('DaoyuKey 或者 showUsername 为空 看Github上的教程 求求你辣') exit() else: Daoyu.logger_stream.info('config.ini可能存在问题,发个issue看看,注意不要直接将你的Config文件直接发在issue里') exit() flowid = Daoyu.get_flowid(manuid, device_id, main_key, show_username) account_id_list = Daoyu.get_account_id_list(flowid, device_id, manuid, main_key, show_username) temp_account_sessionid = Daoyu.get_temp_sessionid(main_key) if account_id_list is not None: results = [] for index, account_id in enumerate(account_id_list): if Daoyu.make_confirm(account_id["accountId"], flowid, device_id, manuid, main_key, show_username): sub_account_key = Daoyu.get_sub_account_key(flowid, manuid, device_id, main_key, show_username) sub_account_session = Daoyu.get_sub_account_session(sub_account_key, temp_account_sessionid) sign_msg = daoyumall_sign(sub_account_session, account_id["accountId"]) if sign_msg == 0: Daoyu.logger_stream.info(
""" Author: KuliPoi Contact: [email protected] Created: 2023-12-21 File: sqMailDoSign.py Version: 2.5.0 Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY """ def main(): if Daoyu.initialize(): device_id, manuid, main_key, show_username = Daoyu.config_handler() Daoyu.logger_logs.info(f'Get Config File Success,' f'show_username: {show_username}' f'daoyu_key: {Daoyu.dykey_encrypt(main_key)}' f'device_id: {device_id}, ' f'manuid: {manuid}') if main_key != '' and show_username != '': Daoyu.logger_stream.info('读取到了你手动设置的DaoyuKey和ShowUserName') elif main_key == '' or show_username == '': Daoyu.logger_stream.info('DaoyuKey 或者 showUsername 为空 看Github上的教程 求求你辣') exit() else: Daoyu.logger_stream.info('config.ini可能存在问题,发个issue看看,注意不要直接将你的Config文件直接发在issue里') exit() flowid = Daoyu.get_flowid(manuid, device_id, main_key, show_username) account_id_list = Daoyu.get_account_id_list(flowid, device_id, manuid, main_key, show_username) temp_account_sessionid = Daoyu.get_temp_sessionid(main_key) if account_id_list is not None: results = [] for index, account_id in enumerate(account_id_list): if Daoyu.make_confirm(account_id["accountId"], flowid, device_id, manuid, main_key, show_username): sub_account_key = Daoyu.get_sub_account_key(flowid, manuid, device_id, main_key, show_username) sub_account_session = Daoyu.get_sub_account_session(sub_account_key, temp_account_sessionid) sign_msg = daoyumall_sign(sub_account_session, account_id["accountId"]) if sign_msg == 0: Daoyu.logger_stream.info(
f'账号{account_id["displayName"]}签到成功,当前积分余额{daoyu_mall_balance(sub_account_session)}')
2
2023-12-06 08:48:02+00:00
4k
janmartchouk/vidgen
src/audio_generator.py
[ { "identifier": "tts", "path": "utils/tiktok_tts.py", "snippet": "def tts(text: str, voice: str = \"none\", filename: str = \"output.mp3\", play_sound: bool = False) -> None:\n # checking if the website is available\n global current_endpoint\n\n if get_api_response().status_code == 200:\n #print(\"Service available!\")\n pass\n else:\n current_endpoint = (current_endpoint + 1) % 2\n if get_api_response().status_code == 200:\n # print(\"Service available!\")\n pass\n else:\n #print(f\"Service not available and probably temporarily rate limited, try again later...\")\n return\n \n # checking if arguments are valid\n if voice == \"none\":\n #print(\"No voice has been selected\")\n pass\n return\n \n if not voice in VOICES:\n #print(\"Voice does not exist\")\n pass\n return\n\n if len(text) == 0:\n #print(\"Insert a valid text\")\n pass\n return\n\n # creating the audio file\n try:\n if len(text) < TEXT_BYTE_LIMIT:\n audio = generate_audio((text), voice)\n if current_endpoint == 0:\n audio_base64_data = str(audio).split('\"')[5]\n else:\n audio_base64_data = str(audio).split('\"')[3].split(\",\")[1]\n \n if audio_base64_data == \"error\":\n #print(\"This voice is unavailable right now\")\n pass\n return\n \n else:\n # Split longer text into smaller parts\n text_parts = split_string(text, 299)\n audio_base64_data = [None] * len(text_parts)\n \n # Define a thread function to generate audio for each text part\n def generate_audio_thread(text_part, index):\n audio = generate_audio(text_part, voice)\n if current_endpoint == 0:\n base64_data = str(audio).split('\"')[5]\n else:\n base64_data = str(audio).split('\"')[3].split(\",\")[1]\n\n if audio_base64_data == \"error\":\n #print(\"This voice is unavailable right now\")\n pass\n return \"error\"\n \n audio_base64_data[index] = base64_data\n\n threads = []\n for index, text_part in enumerate(text_parts):\n # Create and start a new thread for each text part\n thread = threading.Thread(target=generate_audio_thread, args=(text_part, index))\n thread.start()\n threads.append(thread)\n\n # Wait for all threads to complete\n for thread in threads:\n thread.join()\n\n # Concatenate the base64 data in the correct order\n audio_base64_data = \"\".join(audio_base64_data)\n \n save_audio_file(audio_base64_data, filename)\n #print(f\"Audio file saved successfully as '{filename}'\")\n pass\n\n except Exception as e:\n #print(\"Error occurred while generating audio:\", str(e))\n pass" }, { "identifier": "setup_logger", "path": "utils/logger.py", "snippet": "def setup_logger(name, level=logging.INFO, emoji='⚙️'):\n \"\"\"To setup as many loggers as you want\"\"\"\n\n # Create handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(level)\n\n # Create formatters and add it to handlers\n c_format = ColoredFormatter(emoji + ' | %(name)s | %(message)s')\n c_handler.setFormatter(c_format)\n\n # Add handlers to the logger\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(c_handler)\n\n return logger" }, { "identifier": "Post", "path": "models/post.py", "snippet": "class Post:\n \"\"\"\n A class representing a Reddit post.\n \"\"\"\n def __init__(self, title, author, subreddit, content, crawl_date):\n \"\"\"\n Initialize a Post object.\n\n :param title: The title of the post.\n :type title: str\n :param author: The author of the post.\n :type author: str\n :param subreddit: The subreddit of the post.\n :type subreddit: str\n :param content: The content of the post.\n :type content: str\n :param crawl_date: The date the post was crawled.\n :type crawl_date: datetime.datetime\n \n \"\"\"\n # Simple data stores\n self.author = author\n self.subreddit = subreddit\n self.crawl_date = crawl_date\n\n # Replace Reddit slang in title and content\n self.title = replace_words(title, REDDIT_SLANG)\n self.content = replace_words(content, REDDIT_SLANG)\n\n # Remove Age/Gender Reddit-typical tuples\n self.title = re.sub(r\"\\(?\\d{1,3}[mfMF]\\)?\", '', self.title).strip()\n self.content = re.sub(r\"\\(?\\d{1,3}[mfMF]\\)?\", '', self.content).strip()\n\n # Clean up potentially spammy fields\n self.author = self.author.replace('\\n', ' ').replace('\\t', ' ')\n self.author = re.sub(' +', ' ', self.author).strip()\n self.title = self.title.replace('\\n', ' ').replace('\\t', ' ')\n self.title = re.sub(' +', ' ', self.title).strip()\n self.content = self.content.replace('\\n', ' ').replace('\\t', ' ')\n self.content = re.sub(' +', ' ', self.content).strip()\n\n # Calculate hash from title + author + post\n self.hash = hashlib.sha256(\n str.encode(self.title) + str.encode(self.author) +\n str.encode(self.subreddit)\n ).hexdigest()\n\n # Shorten title and hash\n self.short_title = shorten_string(self.title)\n self.short_hash = shorten_hash(self.hash)\n\n # By default, we don't have a generated audio, subtitles or video yet\n self.audio = False\n self.subtitles = False\n self.video = False\n self.uploaded_youtube = False\n\n # Used for storing which platforms the post has been uploaded to\n self.posted_to = []\n\n def __str__(self, short=True) -> str:\n return f\"\"\"{self.hash}\n├── title: {self.title},\n├── author: {self.author},\n├── subreddit: {self.subreddit},\n├── content: {shorten_string(self.content, max_length=50) if short else self.content},\n└── crawl_date: {self.crawl_date})\"\"\"" }, { "identifier": "AUDIO_DIR", "path": "config/structure.py", "snippet": "AUDIO_DIR = 'data/audio'" }, { "identifier": "TIKTOK_VOICES", "path": "config/dicts.py", "snippet": "TIKTOK_VOICES = [ \n 'en_uk_001', # English UK - Male 1\n 'en_uk_003', # English UK - Male 2\n 'en_us_001', # English US - Female (Int. 1)\n 'en_us_002', # English US - Female (Int. 2)\n 'en_us_006', # English US - Male 1\n 'en_us_007', # English US - Male 2\n 'en_us_009', # English US - Male 3\n 'en_us_010', # English US - Male 4\n ]" }, { "identifier": "split_text_into_chunks", "path": "utils/text.py", "snippet": "def split_text_into_chunks(text, max_chunk_length=300):\n \"\"\"\n Split a given text into chunks of a maximum character count.\n\n Args:\n text (str): The text to be split into chunks.\n max_chunk_length (int, optional): The maximum character count for each chunk. Defaults to 300.\n\n Returns:\n list: A list of chunks, where each chunk is a string.\n\n Example:\n >>> text = \"This is a sentence. This is another sentence. This is a third sentence.\"\n >>> split_text_into_chunks(text, max_chunk_length=20)\n ['This is a sentence.', ' This is another', ' sentence. This is a', ' third sentence.']\n \"\"\"\n \n # Use regex to find sentence boundaries\n sentence_pattern = re.compile(r\"(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s\")\n\n # Find sentence boundaries in the text\n sentence_boundaries = sentence_pattern.split(text)\n\n # Initialize chunks and current_chunk\n chunks = []\n current_chunk = \"\"\n\n for sentence in sentence_boundaries:\n sentence = sentence.replace('.', '. ') # make sure there are no weird.dots inside the sentences\n # Check if adding the current sentence to the current_chunk exceeds the max_chunk_length\n if len(current_chunk) + len(sentence) <= max_chunk_length:\n current_chunk += ' ' + sentence # same as the replace above\n else:\n # If it exceeds, start a new chunk with the current sentence\n chunks.append(current_chunk)\n current_chunk = sentence\n\n # Add the last chunk\n if current_chunk:\n chunks.append(current_chunk)\n\n return chunks" }, { "identifier": "shorten_hash", "path": "utils/text.py", "snippet": "def shorten_hash(sha_string, prefix_length=6, suffix_length=6):\n \"\"\"\n Shortens a SHA string by truncating it and adding ellipsis in the middle.\n \n Args:\n sha_string (str): The SHA string to be shortened.\n prefix_length (int): The length of the prefix to keep. Default is 6.\n suffix_length (int): The length of the suffix to keep. Default is 6.\n \n Returns:\n str: The shortened SHA string.\n\n Example:\n >>> shorten_hash(\"1234567890abcdef\", prefix_length=4, suffix_length=4)\n '1234...cdef'\n \"\"\"\n if len(sha_string) <= prefix_length + suffix_length:\n return sha_string\n else:\n return f\"{sha_string[:prefix_length]}...{sha_string[-suffix_length:]}\"" }, { "identifier": "shorten_string", "path": "utils/text.py", "snippet": "def shorten_string(input_string, max_length=20):\n \"\"\"\n Shortens a given input string to a maximum length, appending '...' if necessary.\n\n Args:\n input_string (str): The input string to be shortened.\n max_length (int, optional): The maximum length of the shortened string. Defaults to 20.\n\n Returns:\n str: The shortened string.\n\n Example:\n >>> shorten_string(\"This is a sentence.\", max_length=10)\n 'This is a...'\n \"\"\"\n if len(input_string) <= max_length:\n return input_string\n else:\n return input_string[:max_length-3] + '...'" } ]
import random import os import sys import logging import tempfile from pydub import AudioSegment from tqdm import tqdm from tqdm.contrib.logging import logging_redirect_tqdm from utils.tiktok_tts import tts as tiktok_tts from utils.logger import setup_logger from models.post import Post from config.structure import AUDIO_DIR from config.dicts import TIKTOK_VOICES from utils.text import split_text_into_chunks, shorten_hash, shorten_string
2,727
class AudioGenerator: def __init__(self, loglevel = logging.INFO): self.logger = setup_logger(__name__, loglevel, emoji='🎵') self.output_dir = AUDIO_DIR def from_post(self, post): """ Generate audio from a post. Args: post (Post): The post content to generate audio from. Returns: bool: True if audio generation is successful, False otherwise. """
class AudioGenerator: def __init__(self, loglevel = logging.INFO): self.logger = setup_logger(__name__, loglevel, emoji='🎵') self.output_dir = AUDIO_DIR def from_post(self, post): """ Generate audio from a post. Args: post (Post): The post content to generate audio from. Returns: bool: True if audio generation is successful, False otherwise. """
voice = random.choice(TIKTOK_VOICES)
4
2023-12-14 13:00:22+00:00
4k
asdfghjil/XMUCourseCheckin
app.py
[ { "identifier": "courseCheckin", "path": "checkin.py", "snippet": "def courseCheckin(session, http_header, userInfo):\n lesson = printCheckinList(session, http_header, userInfo, today=True)\n checkin(session, http_header, userInfo, lesson)" }, { "identifier": "autoCheckin", "path": "checkin.py", "snippet": "def autoCheckin(session, http_header, userInfo):\n print('自动签到检测已启动!')\n while True:\n try:\n print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))\n print('正在检测签到列表...')\n lessons = getCheckinList(session, http_header, userInfo, today=False)\n flag_fail = False\n flag_success = False\n for lesson in lessons:\n if (lesson['qdQk'] == '1' and lesson['xsQdQk'] == '0') or (lesson['qdQk'] != '0' and lesson['bqMode'] == '2' and lesson['qdNum'] == 1):\n print('正在自动签到:', lesson['kcMc'])\n if checkin(session, http_header, userInfo, lesson, tips=False):\n print('自动签到成功!')\n flag_success = True\n else:\n print('自动签到失败!')\n flag_fail = True\n # print('当前没有需要自动签到的课程,或者签到失败,10分钟后重新检测')\n if flag_fail:\n print('签到失败,1分钟后重新检测')\n time.sleep(60)\n elif not flag_success:\n print('当前没有需要自动签到的课程,10分钟后重新检测')\n time.sleep(600)\n except:\n print('自动签到检测出现异常,1分钟后重新检测')\n time.sleep(60)" }, { "identifier": "scanCheckin", "path": "checkinScanCode.py", "snippet": "def scanCheckin(session, http_header, userInfo):\n lesson = printCheckinList(session, http_header, userInfo, today=True)\n scanCodeCheckin(session, http_header, userInfo, lesson['qdId'])" }, { "identifier": "courseQuery", "path": "courseQuery.py", "snippet": "def courseQuery(session, http_header, userInfo):\n try:\n schools, weeks, levels = getCourseQueryDataInfo(session, http_header, userInfo)\n print('课程层次:')\n for id, level in enumerate(levels):\n print(id, level['dicName'])\n c_level = int(input('请输入课程层次:'))\n if c_level < 0 or c_level >= len(levels):\n raise Exception('Invalid level')\n print('学院:')\n for id, school in enumerate(schools):\n print(id, school['c1'], end='\\n' if id % 7 == 6 or id == len(schools) - 1 else '\\t')\n c_school = int(input('请输入学院:'))\n if c_school < 0 or c_school >= len(schools):\n raise Exception('Invalid school')\n print('周次:')\n for id, week in enumerate(weeks):\n print(id, week['c2'], end='\\n' if id % 5 == 4 or id == len(weeks) - 1 else '\\t')\n c_week = int(input('请输入周次:'))\n if c_week < 0 or c_week >= len(weeks):\n raise Exception('Invalid week')\n name = input('请输入教师姓名:')\n\n key = schools[c_school]['c1'] + \"|\" + weeks[c_week]['c1'] + \"|\" + name\n\n url = serverUrl + \"/getSearchKcKbList\"\n data = {\n 'sign': userInfo['sign'],\n 'userType': userInfo['userType'],\n 'userCode': userInfo['userCode'],\n 'unitCode': userInfo['unitCode'],\n 'inputValue': key,\n 'isToday': 0,\n 'type': 5,\n 'djj': '',\n 'skcd': '',\n 'xqj': '',\n 'curXq': '',\n 'curXqInt': '',\n 'kcCc': levels[c_level]['dicCode']\n }\n res = session.post(url, data=data, headers=http_header)\n if res.status_code != 200:\n print('course query failed')\n return\n res = json.loads(res.text)\n courses = res['Rows']\n print(\"查询结果:\")\n for id, course in enumerate(courses):\n print(id)\n displayCourse(course)\n \n except:\n print(json.dumps({\n \"status\": \"failed\",\n \"reason\": \"Invalid input\",\n }, indent=4))\n return" }, { "identifier": "attendanceQuery", "path": "attendanceQuery.py", "snippet": "def attendanceQuery(session, http_header, userInfo):\n try:\n name = input('请输入姓名或学号:')\n url = serverUrl + \"/searchXsCqList\"\n data = {\n 'sign': userInfo['sign'],\n 'userType': userInfo['userType'],\n 'userCode': userInfo['userCode'],\n 'unitCode': userInfo['unitCode'],\n 'inputValue': name,\n 'kcCc': '1'\n }\n res = session.post(url, data=data, headers=http_header)\n if res.status_code != 200:\n print('Get attendance query data info failed')\n return\n res = json.loads(res.text)\n if res['status'] != 1:\n print('Get attendance query data info failed')\n return\n info = res['Rows']\n print(name, '的出勤课程:')\n print('0 显示全部(刷屏警告)')\n for id, course in enumerate(info):\n print(id + 1, course['kcMc'], '\\t', course['jsXm'])\n c_course = int(input('请输入查询的课程序号:'))\n if c_course < 0 or c_course > len(info):\n raise Exception('Invalid course')\n if c_course == 0:\n for id, course in enumerate(info):\n display(course['kcMc'] + ' ' + course['jsXm'], course['qdLi'])\n else:\n display(info[c_course - 1]['kcMc'] + ' ' + info[c_course - 1]['jsXm'], info[c_course - 1]['qdLi'])\n except:\n print(json.dumps({\n \"status\": \"failed\",\n \"reason\": \"Get attendance query data info failed\"\n }, indent=4))\n return" }, { "identifier": "CourseReportQuery", "path": "courseReportQuery.py", "snippet": "def CourseReportQuery(session, http_header, userInfo):\n qdId = printCheckinList(session, http_header, userInfo, type=\"查询\")['qdId']\n data = getCourseReportData(session, http_header, userInfo, qdId)\n print('')\n if len(data) == 0:\n print('您的同学团结友爱,没有举报情况!')\n else:\n print('举报总数:', len(data), \":\")\n for id, stu in enumerate(data):\n print(str(id) + \".\", stu['xsXm'], stu['xsXh'], stu['xsQdSj'], stu['jbYyXm'])" } ]
import json import requests import sys from checkin import courseCheckin, autoCheckin from checkinScanCode import scanCheckin from courseQuery import courseQuery from attendanceQuery import attendanceQuery from courseReportQuery import CourseReportQuery
2,210
serverUrl = "https://tingke.xmu.edu.cn/app" serverImg = "https://tingke.xmu.edu.cn/uploadFile" serverIcon = "https://tingke.xmu.edu.cn/images/icon" serverPhoto = "https://tingke.xmu.edu.cn/photo" serverPdf = "https://tingke.xmu.edu.cn/pdf/" userInfo = json.load(open("userInfo.json", "r", encoding="utf-8")) # print(userInfo) http_header = { "Host": "tingke.xmu.edu.cn", "Content-Type": "application/x-www-form-urlencoded", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Accept": "*/*", "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac", "Content-Length": "126", "Accept-Language": "zh-CN,zh-Hans;q=0.9" } session = requests.Session() while True: print('') print('------------------ 小鸾的智慧教务 ------------------') print('1. 课程签到') print('2. 扫码签到') print('3. 课程自动签到') print('4. 课程查询') print('5. 学生出勤查询') print('6. 课程举报查询') print('0. 退出') try: choice = int(input('请选择:')) if choice < 0 or choice > 6: raise Exception except: print('输入错误,请重新输入') continue try: if choice == 0: break if choice == 1: courseCheckin(session, http_header, userInfo) elif choice == 2:
serverUrl = "https://tingke.xmu.edu.cn/app" serverImg = "https://tingke.xmu.edu.cn/uploadFile" serverIcon = "https://tingke.xmu.edu.cn/images/icon" serverPhoto = "https://tingke.xmu.edu.cn/photo" serverPdf = "https://tingke.xmu.edu.cn/pdf/" userInfo = json.load(open("userInfo.json", "r", encoding="utf-8")) # print(userInfo) http_header = { "Host": "tingke.xmu.edu.cn", "Content-Type": "application/x-www-form-urlencoded", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Accept": "*/*", "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac", "Content-Length": "126", "Accept-Language": "zh-CN,zh-Hans;q=0.9" } session = requests.Session() while True: print('') print('------------------ 小鸾的智慧教务 ------------------') print('1. 课程签到') print('2. 扫码签到') print('3. 课程自动签到') print('4. 课程查询') print('5. 学生出勤查询') print('6. 课程举报查询') print('0. 退出') try: choice = int(input('请选择:')) if choice < 0 or choice > 6: raise Exception except: print('输入错误,请重新输入') continue try: if choice == 0: break if choice == 1: courseCheckin(session, http_header, userInfo) elif choice == 2:
scanCheckin(session, http_header, userInfo)
2
2023-12-13 10:42:20+00:00
4k
kurtnettle/wttrbarpy
wttrbarpy/__main__.py
[ { "identifier": "Config", "path": "wttrbarpy/config.py", "snippet": "class Config:\n data: dict\n unit: str\n ampm: bool\n main_indicator: str\n custom_indicator: str\n format_type: int\n hour_text_only: bool\n plain_text: bool\n hide_wind_details: bool\n hide_conditions: bool\n show_temp_unit: bool\n max_conditions: int\n vertical_view: bool\n date_format: str\n emoji: Emoji\n neutral_icon: bool" }, { "identifier": "build_config", "path": "wttrbarpy/config.py", "snippet": "def build_config(data: dict, args: Namespace) -> Config:\n return Config(\n data=data,\n unit=\"USCS\" if args.fahrenheit or (args.main_indicator == \"temp_F\") else \"SI\",\n ampm=args.ampm,\n main_indicator=args.main_indicator,\n custom_indicator=args.custom_indicator,\n format_type=args.format_type,\n hour_text_only=args.hour_text_only,\n plain_text=args.plain_text,\n hide_wind_details=args.hide_wind_details,\n hide_conditions=args.hide_conditions,\n show_temp_unit=args.show_temp_unit,\n max_conditions=args.max_conditions,\n vertical_view=args.vertical_view,\n date_format=args.date_format,\n emoji=Emoji(enabled=args.emoji),\n neutral_icon=args.neutral_icon,\n )" }, { "identifier": "format_text", "path": "wttrbarpy/formats.py", "snippet": "def format_text(config: Config):\n current_condition = config.data[\"current_condition\"][0]\n temp_keys = [\"FeelsLikeC\", \"FeelsLikeF\", \"temp_C\", \"temp_F\", \"tempF\", \"tempC\"]\n\n if config.neutral_icon:\n icon_type = \"neutral\"\n else:\n today_astronomy = config.data[\"weather\"][0][\"astronomy\"][0]\n if is_day(today_astronomy):\n icon_type = \"day\"\n else:\n icon_type = \"night\"\n\n weather_icon = get_weather_icon(\n code=current_condition[\"weatherCode\"],\n icon_type=icon_type,\n is_emoji=config.emoji.enabled,\n )\n\n text = \"N/A\"\n if config.custom_indicator:\n text = Template(config.custom_indicator)\n try:\n text = text.substitute(current_condition, icon=weather_icon)\n except Exception as e:\n raise KeyError(f\"Invalid placeholder: {e}\") from e\n\n else:\n if config.main_indicator in temp_keys:\n if config.main_indicator == \"temp_C\" and config.unit == \"USCS\":\n config.main_indicator = \"temp_F\"\n\n text = format_temp_txt(\n current_condition[config.main_indicator],\n unit=config.unit,\n show_temp_unit=config.show_temp_unit,\n )\n else:\n text = current_condition[config.main_indicator]\n\n if config.custom_indicator:\n return text\n\n if config.vertical_view:\n return f\"{weather_icon}\\n{text}\"\n else:\n return f\"{weather_icon} {text}\"" }, { "identifier": "format_tooltip", "path": "wttrbarpy/formats.py", "snippet": "def format_tooltip(config: Config) -> str:\n nearest_area = config.data[\"nearest_area\"][0]\n current_condition = config.data[\"current_condition\"][0]\n report = gen_brief_report(\n data=current_condition,\n config=config,\n hr_txt=config.data[\"weather\"][0][\"astronomy\"][0],\n )\n\n txt = \"\"\n if not config.plain_text:\n txt += f\"<b>{report['desc']}</b> \"\n else:\n txt += f\"{report['desc']}, \"\n\n txt += f\"{format_temp_txt(temp=report['temp'],unit=config.unit,show_temp_unit=config.show_temp_unit)}\\n\"\n txt += f\"Feels Like: {format_temp_txt(temp=report['feels_like'],unit=config.unit,show_temp_unit=config.show_temp_unit)}\\n\"\n txt += f\"Humidity: {current_condition['humidity']}%\\n\"\n txt += f\"Wind: {format_wind_txt(data=current_condition,config=config)}\\n\"\n \n today_astronomy = config.data[\"weather\"][0][\"astronomy\"][0]\n if not is_day(today_astronomy):\n moon_phase_icon=get_moon_phase_icon(phase=today_astronomy['moon_phase'],emoji=config.emoji.enabled)\n txt += f\"Moon Phase: {moon_phase_icon} ({today_astronomy['moon_phase']})\\n\"\n \n txt += f\"UV Index: {current_condition['uvIndex']} ({get_uv_index_lvl(current_condition['uvIndex'])}) \\n\"\n\n txt += format_location_txt(config)\n txt += \"\\n\\n\"\n txt += format_days_report(config)\n\n return txt.strip()" } ]
from argparse import ArgumentParser from json import dumps, loads from urllib.error import HTTPError from urllib.parse import urlparse from urllib.request import urlopen from wttrbarpy.config import Config, build_config from wttrbarpy.formats import format_text, format_tooltip
2,207
type=str, default="temp_C", help="decide which current_conditions key will be shown on waybar. defaults to temp_C", ) parser.add_argument( "--custom-indicator", dest="custom_indicator", type=str, default=None, help="customize the indicator. example: $temp_C", ) parser.add_argument( "--date-format", dest="date_format", type=str, default="%A %b %d", help="formats the date next to the days. defaults to %%A-%%b-%%d", ) parser.add_argument( "--hide-conditions", action="store_true", dest="hide_conditions", help='hide extra conditions next to each hour description. like "20° Cloudy" instead of "20° Cloudy, Overcast 81%%, Sunshine 13%%". defaults to False', ) parser.add_argument( "--hide-wind-details", action="store_true", dest="hide_wind_details", help="removes extra wind details (wind direction and degree). defaults to False", ) parser.add_argument( "--max-conditions", dest="max_conditions", type=int, default=0, help="limit the number of conditions to show next to each hour description. defaults to 0 (shows all available)", ) parser.add_argument( "--fahrenheit", "-f", action="store_true", dest="fahrenheit", help="use fahrenheit instead of celsius. defaults to False", ) parser.add_argument( "--vertical-view", action="store_true", dest="vertical_view", help="shows the icon on the first line and temperature in a new line (doesn't work for custom-indicator). defaults to False", ) parser.add_argument( "--format-type", dest="format_type", type=int, default=2, help="specify the global output format type (1 only text, 2 only icon/emoji, 3 text with icon/emoji). defaults to 2", ) parser.add_argument( "--hour-text-only", action="store_true", dest="hour_text_only", help="show hour as text only. defaults to False", ) parser.add_argument( "--emoji", action="store_true", dest="emoji", help="replace icons with emojis. defaults to False", ) parser.add_argument( "--neutral-icon", action="store_true", dest="neutral_icon", help="show neutral icon instead of daytime/nighttime icons. defaults to False", ) parser.add_argument( "--plain-text", action="store_true", dest="plain_text", help="shows the plain text removing all pango markup tags and json output. defaults to False", ) parser.add_argument( "--show-temp-unit", action="store_true", dest="show_temp_unit", help="show temperature value with unit like 20°C or 20°F. defaults to False", ) parser.add_argument( "--version", action="version", version="%(prog)s 1.0.0", help="show wttrbarpy version.", ) parser.add_argument( "--debug", action="store_true", dest="debug_mode", help="lets not spam wttr.in :)", ) args = parser.parse_args() api_url = "https://wttr.in/{}?format=j1".format(args.location) if args.debug_mode: api_url = "http://0.0.0.0:8000/{}.json?format=j1".format(args.location) try: with urlopen(api_url, timeout=60) as response: resp = response.read() data = loads(resp.decode()) except HTTPError as e: output = {"text": "⚠️", "tooltip": str(e)} print_json(output) return config = build_config(data, args) output = { "text": format_text(config=config),
def print_json(data: dict) -> None: print(dumps(data, ensure_ascii=False)) def main() -> None: parser = ArgumentParser( prog="wttrbarpy", description="a highly customizable weather module for Waybar", allow_abbrev=False, ) parser.add_argument( "--ampm", action="store_true", dest="ampm", help="show time in AM/PM format. defaults to False", ) parser.add_argument( "--location", "-l", dest="location", type=str, default="", help="specify a location. defaults to None (i.e your current location)", ) parser.add_argument( "--main-indicator", dest="main_indicator", type=str, default="temp_C", help="decide which current_conditions key will be shown on waybar. defaults to temp_C", ) parser.add_argument( "--custom-indicator", dest="custom_indicator", type=str, default=None, help="customize the indicator. example: $temp_C", ) parser.add_argument( "--date-format", dest="date_format", type=str, default="%A %b %d", help="formats the date next to the days. defaults to %%A-%%b-%%d", ) parser.add_argument( "--hide-conditions", action="store_true", dest="hide_conditions", help='hide extra conditions next to each hour description. like "20° Cloudy" instead of "20° Cloudy, Overcast 81%%, Sunshine 13%%". defaults to False', ) parser.add_argument( "--hide-wind-details", action="store_true", dest="hide_wind_details", help="removes extra wind details (wind direction and degree). defaults to False", ) parser.add_argument( "--max-conditions", dest="max_conditions", type=int, default=0, help="limit the number of conditions to show next to each hour description. defaults to 0 (shows all available)", ) parser.add_argument( "--fahrenheit", "-f", action="store_true", dest="fahrenheit", help="use fahrenheit instead of celsius. defaults to False", ) parser.add_argument( "--vertical-view", action="store_true", dest="vertical_view", help="shows the icon on the first line and temperature in a new line (doesn't work for custom-indicator). defaults to False", ) parser.add_argument( "--format-type", dest="format_type", type=int, default=2, help="specify the global output format type (1 only text, 2 only icon/emoji, 3 text with icon/emoji). defaults to 2", ) parser.add_argument( "--hour-text-only", action="store_true", dest="hour_text_only", help="show hour as text only. defaults to False", ) parser.add_argument( "--emoji", action="store_true", dest="emoji", help="replace icons with emojis. defaults to False", ) parser.add_argument( "--neutral-icon", action="store_true", dest="neutral_icon", help="show neutral icon instead of daytime/nighttime icons. defaults to False", ) parser.add_argument( "--plain-text", action="store_true", dest="plain_text", help="shows the plain text removing all pango markup tags and json output. defaults to False", ) parser.add_argument( "--show-temp-unit", action="store_true", dest="show_temp_unit", help="show temperature value with unit like 20°C or 20°F. defaults to False", ) parser.add_argument( "--version", action="version", version="%(prog)s 1.0.0", help="show wttrbarpy version.", ) parser.add_argument( "--debug", action="store_true", dest="debug_mode", help="lets not spam wttr.in :)", ) args = parser.parse_args() api_url = "https://wttr.in/{}?format=j1".format(args.location) if args.debug_mode: api_url = "http://0.0.0.0:8000/{}.json?format=j1".format(args.location) try: with urlopen(api_url, timeout=60) as response: resp = response.read() data = loads(resp.decode()) except HTTPError as e: output = {"text": "⚠️", "tooltip": str(e)} print_json(output) return config = build_config(data, args) output = { "text": format_text(config=config),
"tooltip": format_tooltip(config=config),
3
2023-12-08 19:59:06+00:00
4k
camenduru/MotionDirector-hf
MotionDirector_inference_batch.py
[ { "identifier": "export_to_video", "path": "MotionDirector_train.py", "snippet": "def export_to_video(video_frames, output_video_path, fps):\n video_writer = imageio.get_writer(output_video_path, fps=fps)\n for img in video_frames:\n video_writer.append_data(np.array(img))\n video_writer.close()" }, { "identifier": "handle_memory_attention", "path": "MotionDirector_train.py", "snippet": "def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet):\n try:\n is_torch_2 = hasattr(F, 'scaled_dot_product_attention')\n enable_torch_2 = is_torch_2 and enable_torch_2_attn\n\n if enable_xformers_memory_efficient_attention and not enable_torch_2:\n if is_xformers_available():\n from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n else:\n raise ValueError(\"xformers is not available. Make sure it is installed correctly\")\n\n if enable_torch_2:\n set_torch_2_attn(unet)\n\n except:\n print(\"Could not enable memory efficient attention for xformers or Torch 2.0.\")" }, { "identifier": "load_primary_models", "path": "MotionDirector_train.py", "snippet": "def load_primary_models(pretrained_model_path):\n noise_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder=\"scheduler\")\n tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder=\"tokenizer\")\n text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder=\"text_encoder\")\n vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder=\"vae\")\n unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder=\"unet\")\n\n return noise_scheduler, tokenizer, text_encoder, vae, unet" }, { "identifier": "unet_and_text_g_c", "path": "MotionDirector_train.py", "snippet": "def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable):\n unet._set_gradient_checkpointing(value=unet_enable)\n text_encoder._set_gradient_checkpointing(CLIPEncoder, value=text_enable)" }, { "identifier": "freeze_models", "path": "MotionDirector_train.py", "snippet": "def freeze_models(models_to_freeze):\n for model in models_to_freeze:\n if model is not None: model.requires_grad_(False)" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import os import platform import re import warnings import torch import random import imageio import decord from typing import Optional from diffusers import DDIMScheduler, TextToVideoSDPipeline from einops import rearrange from torch import Tensor from torch.nn.functional import interpolate from tqdm import trange from MotionDirector_train import export_to_video, handle_memory_attention, load_primary_models, unet_and_text_g_c, freeze_models from utils.lora_handler import LoraHandler from utils.ddim_utils import ddim_inversion
2,932
def initialize_pipeline( model: str, device: str = "cuda", xformers: bool = False, sdp: bool = False, lora_path: str = "", lora_rank: int = 64, lora_scale: float = 1.0, ): with warnings.catch_warnings(): warnings.simplefilter("ignore") scheduler, tokenizer, text_encoder, vae, unet = load_primary_models(model) # Freeze any necessary models freeze_models([vae, text_encoder, unet]) # Enable xformers if available handle_memory_attention(xformers, sdp, unet) lora_manager_temporal = LoraHandler( version="cloneofsimo", use_unet_lora=True, use_text_lora=False, save_for_webui=False, only_for_webui=False, unet_replace_modules=["TransformerTemporalModel"], text_encoder_replace_modules=None, lora_bias=None ) unet_lora_params, unet_negation = lora_manager_temporal.add_lora_to_model( True, unet, lora_manager_temporal.unet_replace_modules, 0, lora_path, r=lora_rank, scale=lora_scale) unet.eval() text_encoder.eval()
def initialize_pipeline( model: str, device: str = "cuda", xformers: bool = False, sdp: bool = False, lora_path: str = "", lora_rank: int = 64, lora_scale: float = 1.0, ): with warnings.catch_warnings(): warnings.simplefilter("ignore") scheduler, tokenizer, text_encoder, vae, unet = load_primary_models(model) # Freeze any necessary models freeze_models([vae, text_encoder, unet]) # Enable xformers if available handle_memory_attention(xformers, sdp, unet) lora_manager_temporal = LoraHandler( version="cloneofsimo", use_unet_lora=True, use_text_lora=False, save_for_webui=False, only_for_webui=False, unet_replace_modules=["TransformerTemporalModel"], text_encoder_replace_modules=None, lora_bias=None ) unet_lora_params, unet_negation = lora_manager_temporal.add_lora_to_model( True, unet, lora_manager_temporal.unet_replace_modules, 0, lora_path, r=lora_rank, scale=lora_scale) unet.eval() text_encoder.eval()
unet_and_text_g_c(unet, text_encoder, False, False)
3
2023-12-11 04:51:39+00:00
4k
Yingyue-L/Mamba-LLaVA
llava/model/llava_arch.py
[ { "identifier": "build_vision_tower", "path": "llava/model/multimodal_encoder/builder.py", "snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')" }, { "identifier": "build_vision_projector", "path": "llava/model/multimodal_projector/builder.py", "snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')" }, { "identifier": "IGNORE_INDEX", "path": "llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_PATCH_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" } ]
from abc import ABC, abstractmethod from .multimodal_encoder.builder import build_vision_tower from .multimodal_projector.builder import build_vision_projector from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN import torch import torch.nn as nn
3,064
# it is a headache to deal with None all the time. # But it is not ideal, and if you have a better idea, # please open an issue / submit a PR, thanks. _labels = labels _position_ids = position_ids _attention_mask = attention_mask if attention_mask is None: attention_mask = torch.ones_like(input_ids, dtype=torch.bool) else: attention_mask = attention_mask.bool() if position_ids is None: position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) if labels is None: labels = torch.full_like(input_ids, IGNORE_INDEX) # remove the padding using attention_mask -- TODO: double check input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)] labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] new_input_embeds = [] new_labels = [] cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() if num_images == 0: cur_image_features = image_features[cur_image_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) new_input_embeds.append(cur_input_embeds) new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] cur_input_ids_noim = [] cur_labels = labels[batch_idx] cur_labels_noim = [] for i in range(len(image_token_indices) - 1): cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]]) cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]]) split_sizes = [x.shape[0] for x in cur_labels_noim] cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) cur_new_input_embeds = [] cur_new_labels = [] for i in range(num_images + 1): cur_new_input_embeds.append(cur_input_embeds_no_im[i]) cur_new_labels.append(cur_labels_noim[i]) if i < num_images: cur_image_features = image_features[cur_image_idx] cur_image_idx += 1 cur_new_input_embeds.append(cur_image_features) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) cur_new_input_embeds = torch.cat(cur_new_input_embeds) cur_new_labels = torch.cat(cur_new_labels) new_input_embeds.append(cur_new_input_embeds) new_labels.append(cur_new_labels) # Truncate sequences to max length as image embeddings can make the sequence longer tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) if tokenizer_model_max_length is not None: new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] new_labels = [x[:tokenizer_model_max_length] for x in new_labels] # Combine them max_len = max(x.shape[0] for x in new_input_embeds) batch_size = len(new_input_embeds) new_input_embeds_padded = [] new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device) attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): cur_len = cur_new_embed.shape[0] if getattr(self.config, 'tokenizer_padding_side', 'right') == "left": new_input_embeds_padded.append(torch.cat(( torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), cur_new_embed ), dim=0)) if cur_len > 0: new_labels_padded[i, -cur_len:] = cur_new_labels attention_mask[i, -cur_len:] = True position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) else: new_input_embeds_padded.append(torch.cat(( cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device) ), dim=0)) if cur_len > 0: new_labels_padded[i, :cur_len] = cur_new_labels attention_mask[i, :cur_len] = True position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) if _labels is None: new_labels = None else: new_labels = new_labels_padded if _attention_mask is None: attention_mask = None else: attention_mask = attention_mask.to(dtype=_attention_mask.dtype) if _position_ids is None: position_ids = None return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.resize_token_embeddings(len(tokenizer)) if model_args.mm_use_im_start_end:
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LlavaMetaModel: def __init__(self, config): super(LlavaMetaModel, self).__init__(config) if hasattr(config, "mm_vision_tower"): self.vision_tower = build_vision_tower(config, delay_load=True) self.mm_projector = build_vision_projector(config) def get_vision_tower(self): vision_tower = getattr(self, 'vision_tower', None) if type(vision_tower) is list: vision_tower = vision_tower[0] return vision_tower def initialize_vision_modules(self, model_args, fsdp=None): vision_tower = model_args.vision_tower mm_vision_select_layer = model_args.mm_vision_select_layer mm_vision_select_feature = model_args.mm_vision_select_feature pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter self.config.mm_vision_tower = vision_tower if self.get_vision_tower() is None: vision_tower = build_vision_tower(model_args) if fsdp is not None and len(fsdp) > 0: self.vision_tower = [vision_tower] else: self.vision_tower = vision_tower else: if fsdp is not None and len(fsdp) > 0: vision_tower = self.vision_tower[0] else: vision_tower = self.vision_tower vision_tower.load_model() self.config.use_mm_proj = True self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') self.config.mm_hidden_size = vision_tower.hidden_size self.config.mm_vision_select_layer = mm_vision_select_layer self.config.mm_vision_select_feature = mm_vision_select_feature if getattr(self, 'mm_projector', None) is None: self.mm_projector = build_vision_projector(self.config) else: # In case it is frozen by LoRA for p in self.mm_projector.parameters(): p.requires_grad = True if pretrain_mm_mlp_adapter is not None: mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') def get_w(weights, keyword): return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) class LlavaMetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_images(self, images): image_features = self.get_model().get_vision_tower()(images) image_features = self.get_model().mm_projector(image_features) return image_features def prepare_inputs_labels_for_multimodal( self, input_ids, position_ids, attention_mask, past_key_values, labels, images ): vision_tower = self.get_vision_tower() if vision_tower is None or images is None or input_ids.shape[1] == 1: if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1: target_shape = past_key_values[-1][-1].shape[-2] + 1 attention_mask = torch.cat((attention_mask, torch.ones( (attention_mask.shape[0], target_shape - attention_mask.shape[1]), dtype=attention_mask.dtype, device=attention_mask.device )), dim=1) position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1 return input_ids, position_ids, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1).to(self.device) for x in image_features] else: image_features = self.encode_images(images).to(self.device) # TODO: image start / end is not implemented here to support pretraining. if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): raise NotImplementedError # Let's just add dummy tensors if they do not exist, # it is a headache to deal with None all the time. # But it is not ideal, and if you have a better idea, # please open an issue / submit a PR, thanks. _labels = labels _position_ids = position_ids _attention_mask = attention_mask if attention_mask is None: attention_mask = torch.ones_like(input_ids, dtype=torch.bool) else: attention_mask = attention_mask.bool() if position_ids is None: position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) if labels is None: labels = torch.full_like(input_ids, IGNORE_INDEX) # remove the padding using attention_mask -- TODO: double check input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)] labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] new_input_embeds = [] new_labels = [] cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() if num_images == 0: cur_image_features = image_features[cur_image_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) new_input_embeds.append(cur_input_embeds) new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] cur_input_ids_noim = [] cur_labels = labels[batch_idx] cur_labels_noim = [] for i in range(len(image_token_indices) - 1): cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]]) cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]]) split_sizes = [x.shape[0] for x in cur_labels_noim] cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) cur_new_input_embeds = [] cur_new_labels = [] for i in range(num_images + 1): cur_new_input_embeds.append(cur_input_embeds_no_im[i]) cur_new_labels.append(cur_labels_noim[i]) if i < num_images: cur_image_features = image_features[cur_image_idx] cur_image_idx += 1 cur_new_input_embeds.append(cur_image_features) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) cur_new_input_embeds = torch.cat(cur_new_input_embeds) cur_new_labels = torch.cat(cur_new_labels) new_input_embeds.append(cur_new_input_embeds) new_labels.append(cur_new_labels) # Truncate sequences to max length as image embeddings can make the sequence longer tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) if tokenizer_model_max_length is not None: new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] new_labels = [x[:tokenizer_model_max_length] for x in new_labels] # Combine them max_len = max(x.shape[0] for x in new_input_embeds) batch_size = len(new_input_embeds) new_input_embeds_padded = [] new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device) attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): cur_len = cur_new_embed.shape[0] if getattr(self.config, 'tokenizer_padding_side', 'right') == "left": new_input_embeds_padded.append(torch.cat(( torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), cur_new_embed ), dim=0)) if cur_len > 0: new_labels_padded[i, -cur_len:] = cur_new_labels attention_mask[i, -cur_len:] = True position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) else: new_input_embeds_padded.append(torch.cat(( cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device) ), dim=0)) if cur_len > 0: new_labels_padded[i, :cur_len] = cur_new_labels attention_mask[i, :cur_len] = True position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) if _labels is None: new_labels = None else: new_labels = new_labels_padded if _attention_mask is None: attention_mask = None else: attention_mask = attention_mask.to(dtype=_attention_mask.dtype) if _position_ids is None: position_ids = None return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.resize_token_embeddings(len(tokenizer)) if model_args.mm_use_im_start_end:
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
5
2023-12-09 09:39:13+00:00
4k
Theia-4869/MoSA
src/engine/trainer.py
[ { "identifier": "Evaluator", "path": "src/engine/evaluator.py", "snippet": "class Evaluator():\n \"\"\"\n An evaluator with below logics:\n\n 1. find which eval module to use.\n 2. store the eval results, pretty print it in log file as well.\n \"\"\"\n\n def __init__(\n self,\n ) -> None:\n self.results = defaultdict(dict)\n self.iteration = -1\n self.threshold_end = 0.5\n\n def update_iteration(self, iteration: int) -> None:\n \"\"\"update iteration info\"\"\"\n self.iteration = iteration\n\n def update_result(self, metric: str, value: Union[float, dict]) -> None:\n if self.iteration > -1:\n key_name = \"epoch_\" + str(self.iteration)\n else:\n key_name = \"final\"\n if isinstance(value, float):\n self.results[key_name].update({metric: value})\n else:\n if metric in self.results[key_name]:\n self.results[key_name][metric].update(value)\n else:\n self.results[key_name].update({metric: value})\n\n def classify(self, probs, targets, test_data, multilabel=False):\n \"\"\"\n Evaluate classification result.\n Args:\n probs: np.ndarray for num_data x num_class, predicted probabilities\n targets: np.ndarray for multilabel, list of integers for single label\n test_labels: map test image ids to a list of class labels\n \"\"\"\n if not targets:\n raise ValueError(\n \"When evaluating classification, need at least give targets\")\n\n if multilabel:\n self._eval_multilabel(probs, targets, test_data)\n else:\n self._eval_singlelabel(probs, targets, test_data)\n\n def _eval_singlelabel(\n self,\n scores: np.ndarray,\n targets: List[int],\n eval_type: str\n ) -> None:\n \"\"\"\n if number of labels > 2:\n top1 and topk (5 by default) accuracy\n if number of labels == 2:\n top1 and rocauc\n \"\"\"\n acc_dict = singlelabel.compute_acc_auc(scores, targets)\n\n log_results = {\n k: np.around(v * 100, decimals=2) for k, v in acc_dict.items()\n }\n save_results = acc_dict\n\n self.log_and_update(log_results, save_results, eval_type)\n\n def _eval_multilabel(\n self,\n scores: np.ndarray,\n targets: np.ndarray,\n eval_type: str\n ) -> None:\n num_labels = scores.shape[-1]\n targets = multilabel.multihot(targets, num_labels)\n\n log_results = {}\n ap, ar, mAP, mAR = multilabel.compute_map(scores, targets)\n f1_dict = multilabel.get_best_f1_scores(\n targets, scores, self.threshold_end)\n\n log_results[\"mAP\"] = np.around(mAP * 100, decimals=2)\n log_results[\"mAR\"] = np.around(mAR * 100, decimals=2)\n log_results.update({\n k: np.around(v * 100, decimals=2) for k, v in f1_dict.items()})\n save_results = {\n \"ap\": ap, \"ar\": ar, \"mAP\": mAP, \"mAR\": mAR, \"f1\": f1_dict\n }\n self.log_and_update(log_results, save_results, eval_type)\n\n def log_and_update(self, log_results, save_results, eval_type):\n log_str = \"\"\n for k, result in log_results.items():\n if not isinstance(result, np.ndarray):\n log_str += f\"{k}: {result:.2f}\\t\"\n else:\n log_str += f\"{k}: {list(result)}\\t\"\n logger.info(f\"Classification results with {eval_type}: {log_str}\")\n # save everything\n self.update_result(\"classification\", {eval_type: save_results})" }, { "identifier": "make_scheduler", "path": "src/solver/lr_scheduler.py", "snippet": "def make_scheduler(\n optimizer: optim.Optimizer, train_params: CfgNode\n) -> LambdaLR:\n warmup = train_params.WARMUP_EPOCH\n total_iters = train_params.TOTAL_EPOCH\n\n if train_params.SCHEDULER == \"cosine\":\n scheduler = WarmupCosineSchedule(\n optimizer,\n warmup_steps=warmup,\n t_total=total_iters\n )\n elif train_params.SCHEDULER == \"cosine_hardrestart\":\n scheduler = WarmupCosineWithHardRestartsSchedule(\n optimizer,\n warmup_steps=warmup,\n t_total=total_iters\n )\n\n elif train_params.SCHEDULER == \"plateau\":\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n \"max\",\n patience=5,\n verbose=True,\n factor=train_params.LR_DECAY_FACTOR,\n )\n else:\n scheduler = None\n return scheduler" }, { "identifier": "make_optimizer", "path": "src/solver/optimizer.py", "snippet": "def make_optimizer(\n models: List[Any], train_params: CfgNode\n) -> Optimizer:\n params = []\n for model in models:\n # only include learnable params\n if train_params.DBG_TRAINABLE:\n logger.info(\"Trainable params:\")\n\n for key, value in model.named_parameters():\n \n if value.requires_grad:\n\n if train_params.DBG_TRAINABLE:\n logger.info(\"\\t{}, {}, {}\".format(key, value.numel(), value.shape))\n params.append((key, value))\n\n if train_params.WEIGHT_DECAY > 0:\n if train_params.OPTIMIZER == 'adamw':\n\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in params\n if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in params\n if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=train_params.BASE_LR,\n )\n else:\n _params = []\n for p in params:\n key, value = p\n # print(key)\n # if not value.requires_grad:\n # continue\n lr = train_params.BASE_LR\n weight_decay = train_params.WEIGHT_DECAY\n if \"last_layer.bias\" in key:\n # no regularization (weight decay) for last layer's bias\n weight_decay = 0.0\n\n if train_params.BIAS_MULTIPLIER == 1.:\n _params += [{\n \"params\": [value],\n \"lr\": lr,\n \"weight_decay\": weight_decay\n }]\n else:\n if \"bias\" in key and \"last_layer.bias\" not in key:\n # use updated lr for this param\n lr_value = lr * train_params.BIAS_MULTIPLIER\n else:\n lr_value = lr\n\n if train_params.DBG_TRAINABLE:\n logger.info(\"\\t{}, {:.4f}\".format(key, lr_value))\n\n _params += [{\n \"params\": [value],\n \"lr\": lr_value,\n \"weight_decay\": weight_decay\n }]\n\n if train_params.OPTIMIZER == 'adam':\n optimizer = optim.Adam(\n _params,\n lr=train_params.BASE_LR,\n weight_decay=train_params.WEIGHT_DECAY,\n )\n else:\n optimizer = optim.SGD(\n _params,\n train_params.BASE_LR,\n momentum=train_params.MOMENTUM,\n weight_decay=train_params.WEIGHT_DECAY\n )\n return optimizer\n else:\n if train_params.OPTIMIZER == 'adam':\n optimizer = optim.Adam(\n model.parameters(),\n lr=train_params.BASE_LR\n )\n else:\n _params = []\n for p in params:\n key, value = p\n\n lr = train_params.BASE_LR\n\n if train_params.BIAS_MULTIPLIER == 1.:\n _params += [{\n \"params\": [value],\n \"lr\": lr,\n }]\n else:\n if \"bias\" in key and \"last_layer.bias\" not in key:\n # use updated lr for this param\n lr_value = lr * train_params.BIAS_MULTIPLIER\n else:\n lr_value = lr\n\n if train_params.DBG_TRAINABLE:\n logger.info(\"\\t{}, {:.4f}\".format(key, lr_value))\n\n _params += [{\n \"params\": [value],\n \"lr\": lr_value,\n }]\n optimizer = optim.SGD(\n _params,\n train_params.BASE_LR,\n momentum=train_params.MOMENTUM,\n )\n return optimizer" }, { "identifier": "build_loss", "path": "src/solver/losses.py", "snippet": "def build_loss(cfg):\n loss_name = cfg.SOLVER.LOSS\n assert loss_name in LOSS, \\\n f'loss name {loss_name} is not supported'\n loss_fn = LOSS[loss_name]\n if not loss_fn:\n return None\n else:\n return loss_fn(cfg)" }, { "identifier": "symmetric_KL_loss", "path": "src/solver/losses.py", "snippet": "def symmetric_KL_loss(input, target, reduction='batchmean'):\n \"\"\" symmetric KL-divergence 1/2*(KL(p||q)+KL(q||p)) \"\"\"\n\n input = input.float()\n target = target.float()\n loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),\n F.softmax(target.detach(), dim=-1, dtype=torch.float32), reduction=reduction) + \\\n F.kl_div(F.log_softmax(target, dim=-1, dtype=torch.float32),\n F.softmax(input.detach(), dim=-1, dtype=torch.float32), reduction=reduction)\n return 0.5 * loss.sum()" }, { "identifier": "deepreg_MSE_loss", "path": "src/solver/losses.py", "snippet": "def deepreg_MSE_loss(input, target, reduction='mean'):\n \"\"\" deep regulerization MSE loss \"\"\"\n\n loss = 0\n for i in range(6):\n loss += F.mse_loss(input[i], target[i], reduction=reduction)\n return loss.sum() / len(input) * 0.01" }, { "identifier": "logging", "path": "src/utils/logging.py", "snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"MOSA\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):" }, { "identifier": "AverageMeter", "path": "src/utils/train_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)" }, { "identifier": "gpu_mem_usage", "path": "src/utils/train_utils.py", "snippet": "def gpu_mem_usage():\n \"\"\"Computes the GPU memory usage for the current device (GB).\"\"\"\n if not torch.cuda.is_available():\n return 0\n # Number of bytes in a megabyte\n _B_IN_GB = 1024 * 1024 * 1024\n\n mem_usage_bytes = torch.cuda.max_memory_allocated()\n return mem_usage_bytes / _B_IN_GB" } ]
import datetime import time import torch import torch.nn as nn import os import shutil import random import wandb from fvcore.common.config import CfgNode from fvcore.common.checkpoint import Checkpointer from ..engine.evaluator import Evaluator from ..solver.lr_scheduler import make_scheduler from ..solver.optimizer import make_optimizer from ..solver.losses import build_loss, symmetric_KL_loss, deepreg_MSE_loss from ..utils import logging from ..utils.train_utils import AverageMeter, gpu_mem_usage
3,228
#!/usr/bin/env python3 """ a trainer class """ logger = logging.get_logger("MOSA") class Trainer(): """ a trainer with below logics: 1. Build optimizer, scheduler 2. Load checkpoints if provided 3. Train and eval at each epoch """ def __init__( self, cfg: CfgNode, args, model: nn.Module, evaluator: Evaluator, device: torch.device, ) -> None: self.cfg = cfg self.args = args self.model = model self.device = device # solver related logger.info("Setting up the optimizer...") self.optimizer = make_optimizer([self.model], cfg.SOLVER)
#!/usr/bin/env python3 """ a trainer class """ logger = logging.get_logger("MOSA") class Trainer(): """ a trainer with below logics: 1. Build optimizer, scheduler 2. Load checkpoints if provided 3. Train and eval at each epoch """ def __init__( self, cfg: CfgNode, args, model: nn.Module, evaluator: Evaluator, device: torch.device, ) -> None: self.cfg = cfg self.args = args self.model = model self.device = device # solver related logger.info("Setting up the optimizer...") self.optimizer = make_optimizer([self.model], cfg.SOLVER)
self.scheduler = make_scheduler(self.optimizer, cfg.SOLVER)
1
2023-12-06 07:50:16+00:00
4k
khwong-c/syn-magia
magia/std/bundles.py
[ { "identifier": "IOBundle", "path": "magia/bundle.py", "snippet": "class IOBundle:\n \"\"\"\n Define a bundle of I/O, which can be used as the input or output of a module.\n An IOBundle can be added with Input and Output.\n However, the bundle cannot be used as normal signals.\n The actual signals can be accessed from `input` and `output` of the instance instead.\n\n We can use `signal_bundle()` to create a SignalBundle that turns all the ports into normal signals,\n which we can connect to the instance of the module and other destinations.\n It can be accessed by individual port by attributes, or connect to multiple instance directly.\n \"\"\"\n\n def __init__(self, owner_instance: Optional[\"Instance\"] = None, **kwargs):\n self._signals = SignalDict()\n self._input_names: list[str] = []\n self._output_names: list[str] = []\n self._owner_instance: Optional[\"Instance\"] = owner_instance\n\n def __add__(self, other: Union[\"IOBundle\", list[Union[Input, Output]], Input, Output]) -> \"IOBundle\":\n new_bundle = IOBundle()\n new_bundle += self\n new_bundle += other\n return new_bundle\n\n def __iadd__(self, other: Union[\"IOBundle\", list[Union[Input, Output]], Input, Output]) -> \"IOBundle\":\n if isinstance(other, IOBundle):\n other = other.inputs + other.outputs\n if isinstance(other, (Input, Output)):\n other = [other]\n\n for port in other:\n if port.name in self.input_names + self.output_names:\n raise KeyError(f\"Port {port.name} is already defined.\")\n\n if port.type == SignalType.INPUT:\n self._input_names.append(port.name)\n elif port.type == SignalType.OUTPUT:\n self._output_names.append(port.name)\n else:\n raise TypeError(f\"Signal Type {port.type} is forbidden in IOBundle.\")\n\n self._signals[port.name] = port.copy(owner_instance=self.owner_instance)\n\n return self\n\n def __getattr__(self, name: str) -> Union[Input, Output]:\n if name.startswith(\"_\"):\n return super().__getattribute__(name)\n if name in self.input_names + self.output_names:\n return self.__getitem__(name)\n return super().__getattribute__(name)\n\n def __setattr__(self, name: str, value: Union[Input, Output]):\n if name.startswith(\"_\"):\n super().__setattr__(name, value)\n if isinstance(value, Signal):\n self.__setitem__(name, value)\n else:\n super().__setattr__(name, value)\n\n def __getitem__(self, item: str) -> Union[Input, Output]:\n return self._signals[item]\n\n def __setitem__(self, key, value):\n self._signals[key] = value\n\n @property\n def inputs(self) -> list[Signal]:\n return [\n signal for signal in self._signals.values()\n if signal.type == SignalType.INPUT\n ]\n\n @property\n def outputs(self) -> list[Signal]:\n return [\n signal for signal in self._signals.values()\n if signal.type == SignalType.OUTPUT\n ]\n\n @property\n def input_names(self) -> list[str]:\n return self._input_names\n\n @property\n def output_names(self) -> list[str]:\n return self._output_names\n\n @property\n def signals(self) -> SignalDict:\n return self._signals\n\n @property\n def owner_instance(self) -> Optional[\"Instance\"]:\n return self._owner_instance\n\n def flip(self, ignore: Optional[list[str]] = None) -> \"IOBundle\":\n \"\"\"\n Create a new IOBundle with the Input and Output inverted.\n Ports specified in the `ignore` arguments will not be inverted.\n\n If `ignore` is not specified, default ports: \"clk\", \"rst_n\", \"reset\" are remain unchanged.\n \"\"\"\n if ignore is None:\n ignore = [\"clk\", \"rst_n\", \"reset\"]\n new_bundle = IOBundle()\n for port in self._signals.values():\n if port.name in ignore:\n new_bundle += port\n else:\n new_port_type = {\n SignalType.INPUT: Output,\n SignalType.OUTPUT: Input,\n }[port.type]\n new_port = new_port_type(name=port.name, width=len(port), signed=port.signed)\n new_bundle += new_port\n return new_bundle\n\n def signal_bundle(self, name: Optional[str] = None) -> SignalBundle:\n \"\"\"\n Return a SignalBundle that turns all the ports into normal signals\n The bundle can be connected to the instance of the module and other destinations.\n \"\"\"\n new_bundle = SignalBundle(name)\n for port in self._signals.values():\n new_bundle += Signal(name=port.name, width=len(port), signed=port.signed)\n return new_bundle\n\n def with_name(self, prefix: str = \"\", suffix: str = \"\", upper_case=False) -> \"IOBundle\":\n \"\"\"\n Create a new IOBundle with the name of each port prefixed with `prefix` and suffixed with `suffix`.\n \"\"\"\n new_bundle = IOBundle()\n for port in self._signals.values():\n new_port_type = {\n SignalType.INPUT: Input,\n SignalType.OUTPUT: Output,\n }[port.type]\n new_name = f\"{prefix}{port.name}{suffix}\"\n if upper_case:\n new_name = new_name.upper()\n new_port = new_port_type(name=new_name, width=len(port), signed=port.signed)\n new_bundle += new_port\n return new_bundle" }, { "identifier": "Input", "path": "magia/core.py", "snippet": "class Input(Signal):\n \"\"\"\n Representing an input signal.\n It has no driver, but it is driving other signals.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Input name is not set\")\n if width == 0:\n raise ValueError(\"Input width is not set\")\n\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.INPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the input signal in the module declaration.\n :return: input logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"input {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None) -> \"Input\":\n \"\"\"\n Copy the input signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new input signal with the same configuration.\n \"\"\"\n return Input(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )" }, { "identifier": "Output", "path": "magia/core.py", "snippet": "class Output(Signal):\n \"\"\"\n Representing an output signal.\n They are the starting points when we elaborate the module.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Output name is not set\")\n if width == 0:\n raise ValueError(\"Output width is not set\")\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.OUTPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the output signal in the module declaration.\n :return: output logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"output {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None, **kwargs) -> \"Output\":\n \"\"\"\n Copy the output signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new output signal with the same configuration.\n \"\"\"\n return Output(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )" } ]
from typing import Union from magia import Input, IOBundle, Output
2,399
class StdIO: @classmethod def valid_multi(cls, bundle_name: str, data_spec: dict[str, Union[tuple[int, bool], int]], sep="_") -> IOBundle: if not isinstance(data_spec, dict): raise TypeError("data_spec must be a dict") if len(data_spec) == 0: raise ValueError("data_spec must not be empty") new_bundle = IOBundle() for name, spec in data_spec.items(): if isinstance(spec, tuple): width, signed = spec else: width, signed = spec, False
class StdIO: @classmethod def valid_multi(cls, bundle_name: str, data_spec: dict[str, Union[tuple[int, bool], int]], sep="_") -> IOBundle: if not isinstance(data_spec, dict): raise TypeError("data_spec must be a dict") if len(data_spec) == 0: raise ValueError("data_spec must not be empty") new_bundle = IOBundle() for name, spec in data_spec.items(): if isinstance(spec, tuple): width, signed = spec else: width, signed = spec, False
new_bundle += Output(f"{name}", width, signed)
2
2023-12-12 22:50:43+00:00
4k
IBM/AI-assisted-chemical-sensing
src/chemsense/vision/cli/few_shot_analysis.py
[ { "identifier": "setup_basic_logging_for_scripts", "path": "src/chemsense/vision/logging_configuration.py", "snippet": "def setup_basic_logging_for_scripts() -> None:\n \"\"\"Setup basic stdout logging for scripts.\"\"\"\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n )" }, { "identifier": "CLASSIFICATION_HEADS", "path": "src/chemsense/vision/modeling/classification.py", "snippet": "CLASSIFICATION_HEADS = {\r\n \"LDA\": LinearDiscriminantAnalysis(),\r\n \"RF\": RandomForestClassifier(n_estimators=50, max_depth=None),\r\n \"KNN\": KNeighborsClassifier(n_neighbors=3),\r\n \"SVM\": svm.SVC(),\r\n \"ET\": ExtraTreesClassifier(),\r\n \"XGB\": GradientBoostingClassifier(),\r\n}\r" }, { "identifier": "ENCODERS_REGISTRY", "path": "src/chemsense/vision/modeling/encoders.py", "snippet": "ENCODERS_REGISTRY = {\n \"mobilenetv2_35_96\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"google/mobilenet_v2_0.35_96\"),\n \"model\": MobileNetV2Model.from_pretrained(\"google/mobilenet_v2_0.35_96\"),\n \"size\": 96,\n },\n \"mobilenetv2_100_224\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"google/mobilenet_v2_1.0_224\"),\n \"model\": MobileNetV2Model.from_pretrained(\"google/mobilenet_v2_1.0_224\"),\n \"size\": 224,\n },\n \"mobilenetv2_140_224\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"google/mobilenet_v2_1.4_224\"),\n \"model\": MobileNetV2Model.from_pretrained(\"google/mobilenet_v2_1.4_224\"),\n \"size\": 224,\n },\n \"resnet_18\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"microsoft/resnet-18\"),\n \"model\": ResNetModel.from_pretrained(\"microsoft/resnet-18\"),\n \"size\": 224,\n },\n \"resnet_50\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"microsoft/resnet-50\"),\n \"model\": ResNetModel.from_pretrained(\"microsoft/resnet-50\"),\n \"size\": 224,\n },\n \"resnet_101\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"microsoft/resnet-101\"),\n \"model\": ResNetModel.from_pretrained(\"microsoft/resnet-101\"),\n \"size\": 224,\n },\n \"vit_base_224\": {\n \"processor\": ViTImageProcessor.from_pretrained(\"google/vit-base-patch16-224\"),\n \"model\": ViTModel.from_pretrained(\"google/vit-base-patch16-224\"),\n \"size\": 224,\n },\n \"vit_base_384\": {\n \"processor\": ViTImageProcessor.from_pretrained(\"google/vit-base-patch16-384\"),\n \"model\": ViTModel.from_pretrained(\"google/vit-base-patch16-384\"),\n \"size\": 384,\n },\n \"vit_large_224\": {\n \"processor\": ViTImageProcessor.from_pretrained(\"google/vit-large-patch16-224\"),\n \"model\": ViTModel.from_pretrained(\"google/vit-large-patch16-224\"),\n \"size\": 224,\n },\n \"beit_base_224\": {\n \"processor\": BeitImageProcessor.from_pretrained(\n \"microsoft/beit-base-patch16-224-pt22k-ft22k\"\n ),\n \"model\": BeitModel.from_pretrained(\n \"microsoft/beit-base-patch16-224-pt22k-ft22k\"\n ),\n \"size\": 224,\n },\n \"beit_base_384\": {\n \"processor\": BeitImageProcessor.from_pretrained(\n \"microsoft/beit-base-patch16-384\"\n ),\n \"model\": BeitModel.from_pretrained(\"microsoft/beit-base-patch16-384\"),\n \"size\": 384,\n },\n \"beit_large_224\": {\n \"processor\": BeitImageProcessor.from_pretrained(\n \"microsoft/beit-large-patch16-224-pt22k-ft22k\"\n ),\n \"model\": BeitModel.from_pretrained(\n \"microsoft/beit-large-patch16-224-pt22k-ft22k\"\n ),\n \"size\": 224,\n },\n}" } ]
import os import random import click import numpy as np import pandas as pd import torch.utils.data from copy import deepcopy from pathlib import Path from typing import Dict, List from sklearn.decomposition import PCA from sklearn.metrics import accuracy_score from torchvision import datasets, transforms from ..logging_configuration import setup_basic_logging_for_scripts from ..modeling.classification import CLASSIFICATION_HEADS from ..modeling.encoders import ENCODERS_REGISTRY
1,614
"""Train and test models with few shots and image augmentation.""" __copyright__ = """ LICENSED INTERNAL CODE. PROPERTY OF IBM. IBM Research Licensed Internal Code (C) Copyright IBM Corp. 2023 ALL RIGHTS RESERVED """ num_images = int(os.getenv("NUMBER_OF_IMAGES", 50)) num_rep = int(os.getenv("NUMBER_OF_REPEATS", 50)) @click.command() @click.option("--task", type=str, default="red_wines", help="Dataset name identifier.") @click.option( "--n_comp", type=int, default=10, help="Number of principal components to be used as predictors.", ) @click.option( "--mix_ratio", type=float, default=0.95, help="Fraction of pixel intensity for image mixing and data augmentation. Needs to be between 0 and 1.", ) @click.option( "--batch_size", type=int, default=10, help="Batch size for image loading and processing.", ) @click.option( "--data_path", required=True, type=click.Path(path_type=Path, exists=True), help="Path to image directory.", ) @click.option( "--output_path", required=True, type=click.Path(path_type=Path), help="Path to save classification model validation results.", ) def main( task: str, n_comp: int, mix_ratio: float, batch_size: int, data_path: Path, output_path: Path, ) -> None: setup_basic_logging_for_scripts() w_class = mix_ratio w_other = 1 - w_class data_path = Path.joinpath(data_path, task) data_transforms = transforms.Compose( [ transforms.Resize((224, 224)), transforms.ToTensor(), ] ) dataset = datasets.ImageFolder(data_path, transform=data_transforms) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=False ) class_names = np.array(dataset.classes) Path(output_path).mkdir(exist_ok=True) result_path = Path.joinpath(output_path, task)
"""Train and test models with few shots and image augmentation.""" __copyright__ = """ LICENSED INTERNAL CODE. PROPERTY OF IBM. IBM Research Licensed Internal Code (C) Copyright IBM Corp. 2023 ALL RIGHTS RESERVED """ num_images = int(os.getenv("NUMBER_OF_IMAGES", 50)) num_rep = int(os.getenv("NUMBER_OF_REPEATS", 50)) @click.command() @click.option("--task", type=str, default="red_wines", help="Dataset name identifier.") @click.option( "--n_comp", type=int, default=10, help="Number of principal components to be used as predictors.", ) @click.option( "--mix_ratio", type=float, default=0.95, help="Fraction of pixel intensity for image mixing and data augmentation. Needs to be between 0 and 1.", ) @click.option( "--batch_size", type=int, default=10, help="Batch size for image loading and processing.", ) @click.option( "--data_path", required=True, type=click.Path(path_type=Path, exists=True), help="Path to image directory.", ) @click.option( "--output_path", required=True, type=click.Path(path_type=Path), help="Path to save classification model validation results.", ) def main( task: str, n_comp: int, mix_ratio: float, batch_size: int, data_path: Path, output_path: Path, ) -> None: setup_basic_logging_for_scripts() w_class = mix_ratio w_other = 1 - w_class data_path = Path.joinpath(data_path, task) data_transforms = transforms.Compose( [ transforms.Resize((224, 224)), transforms.ToTensor(), ] ) dataset = datasets.ImageFolder(data_path, transform=data_transforms) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=False ) class_names = np.array(dataset.classes) Path(output_path).mkdir(exist_ok=True) result_path = Path.joinpath(output_path, task)
model_heads = CLASSIFICATION_HEADS.keys()
1
2023-12-05 15:56:12+00:00
4k
batmanlab/DrasCLR
extract_feature.py
[ { "identifier": "Encoder", "path": "models/cnn3d.py", "snippet": "class Encoder(nn.Module):\n\n def __init__(self, rep_dim, moco_dim, num_experts, num_coordinates):\n super(Encoder, self).__init__()\n self.rep_dim = rep_dim\n self.moco_dim = moco_dim\n self.num_experts = num_experts\n self.num_coordinates = num_coordinates\n self.conv1 = Conv3d(1, 8, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn1 = nn.BatchNorm3d(8)\n self.act = nn.ELU()\n self.conv2 = Conv3d(8, 8, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn2 = nn.BatchNorm3d(8)\n self.downsample1 = Block(8, 16, self.num_experts, self.num_coordinates)\n self.downsample2 = Block(16, 32, self.num_experts, self.num_coordinates)\n self.downsample3 = Block(32, 64, self.num_experts, self.num_coordinates)\n self.conv3 = Conv3d(64, 128, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn3 = nn.BatchNorm3d(128)\n self.conv4 = Conv3d(128, rep_dim, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn4 = nn.BatchNorm3d(rep_dim)\n self.fc = nn.Linear(rep_dim, moco_dim)\n\n def forward(self, x, loc):\n x = self.conv1(x, loc)\n x = self.bn1(x)\n x = self.act(x)\n x = self.conv2(x, loc)\n x = self.bn2(x)\n x = self.act(x)\n x = self.downsample1(x, loc)\n x = self.downsample2(x, loc)\n x = self.downsample3(x, loc)\n x = self.conv3(x, loc)\n x = self.bn3(x)\n x = self.act(x)\n x = self.conv4(x, loc)\n x = self.bn4(x)\n x = self.act(x)\n h = torch.flatten(x, 1)\n z = self.fc(h)\n return z, h" }, { "identifier": "COPD_dataset", "path": "data/copd_patch.py", "snippet": "class COPD_dataset(Dataset):\n\n def __init__(self, stage, args, patch_transforms=default_transform, neighbor_transforms=default_transform):\n self.stage = stage\n self.args = args\n self.root_dir = args.root_dir\n self.metric_dict = dict() # initialize metric dictionary\n self.patch_transforms = patch_transforms\n self.neighbor_transforms = neighbor_transforms\n\n # atlas patch locations, our refernce file can be found at ./preprocess/misc/atlas_patch_loc.npy\n self.patch_loc = np.load(self.args.root_dir + \"19676E_INSP_STD_JHU_COPD_BSpline_Iso1_patch_loc.npy\")\n # pairwise distance\n self.dists = pairwise_distances(self.patch_loc, metric='euclidean')\n # normalize patch locations\n self.patch_loc = (self.patch_loc / self.patch_loc.max(0)) * 2 - 1 # normalize position to [-1, 1]\n\n self.patch_idx = 0\n self.patch_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.patch_idx)+\".npy\")\n # top k nearest patches\n self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]\n neighbor_lst = []\n for k in range(self.args.k_neighbors):\n neighbor_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.k_neighbor_idx[k])+\".npy\")\n neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32\n self.neighbor_data = np.concatenate(neighbor_lst, axis=0)\n del neighbor_lst\n\n if stage == 'training':\n # Specific to COPDGene dataset, you can change depends on your needs\n FILE = open(DATA_DIR + \"phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.label_name]\n race_idx = mylist.index(\"race\")\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n tmp = [mylist[idx] for idx in metric_idx]\n if \"\" in tmp:\n continue\n if self.args.nhw_only and mylist[race_idx] != \"1\":\n continue\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]] = metric_list\n FILE.close()\n\n if stage == 'testing':\n # Specific to COPDGene dataset, you can change depends on your needs\n self.label_name = self.args.label_name + self.args.label_name_set2\n FILE = open(DATA_DIR + \"phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.label_name]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n tmp = [mylist[idx] for idx in metric_idx]\n if \"\" in tmp[:3]:\n continue\n metric_list = []\n for i in range(len(metric_idx)):\n if tmp[i] == \"\":\n metric_list.append(-1024)\n else:\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]] = metric_list + [-1024, -1024, -1024]\n FILE = open(DATA_DIR + \"CT_scan_datasets/CT_visual_scoring/COPDGene_CT_Visual_20JUL17.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.visual_score]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n if mylist[0] not in self.metric_dict:\n continue\n tmp = [mylist[idx] for idx in metric_idx]\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]][\n -len(self.args.visual_score) - len(self.args.P2_Pheno):-len(self.args.P2_Pheno)] = metric_list\n FILE.close()\n FILE = open(\n DATA_DIR + 'P1-P2 First 5K Long Data/Subject-flattened- one row per subject/First5000_P1P2_Pheno_Flat24sep16.txt',\n 'r')\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.P2_Pheno]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n if mylist[0] not in self.metric_dict:\n continue\n tmp = [mylist[idx] for idx in metric_idx]\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]][-len(self.args.P2_Pheno):] = metric_list\n FILE.close()\n\n self.sid_list = []\n for item in glob.glob(self.args.root_dir+\"patch/\"+\"*_patch.npy\"):\n if item.split('/')[-1][:6] not in self.metric_dict:\n continue\n self.sid_list.append(item.split('/')[-1][:-10])\n self.sid_list.sort()\n assert len(self.sid_list) == self.patch_data.shape[0]\n\n print(\"Fold: full\")\n self.sid_list = np.asarray(self.sid_list)\n self.sid_list_len = len(self.sid_list)\n print(stage+\" dataset size:\", self.sid_list_len)\n\n def set_patch_idx(self, patch_idx):\n self.patch_idx = patch_idx\n self.patch_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.patch_idx)+\".npy\")\n # top k nearest patches\n self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]\n neighbor_lst = []\n for k in range(self.args.k_neighbors):\n neighbor_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.k_neighbor_idx[k])+\".npy\")\n neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32\n self.neighbor_data = np.concatenate(neighbor_lst, axis=0)\n del neighbor_lst\n\n def __len__(self):\n if self.stage == 'training':\n return self.sid_list_len * self.args.num_patch\n if self.stage == 'testing':\n return self.sid_list_len\n\n def __getitem__(self, idx):\n\n if self.stage == 'training':\n idx = idx % self.sid_list_len\n\n # patch data\n pch = self.patch_data[idx, :, :, :]\n pch = np.clip(pch, -1024, 240) # clip input intensity to [-1024, 240]\n pch = pch + 1024.\n pch = self.patch_transforms(pch[None, :, :, :])\n pch[0] = pch[0]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n pch[1] = pch[1]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n # patch location\n patch_loc_idx = self.patch_loc[self.patch_idx, :]\n\n # neighbor data\n ngb = self.neighbor_data[:, idx, :, :, :]\n ngb = np.clip(ngb, -1024, 240) # clip input intensity to [-1024, 240]\n ngb = ngb + 1024.\n ngb = self.neighbor_transforms(ngb)\n ngb = ngb/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n # neighbor location\n neighor_loc_idx = self.patch_loc[self.k_neighbor_idx, :]\n\n # labels\n key = self.sid_list[idx][:6]\n label = np.asarray(self.metric_dict[key])\n return key, pch, patch_loc_idx, ngb, neighor_loc_idx, label\n\n if self.stage == 'testing':\n sid = self.sid_list[idx]\n\n # read the entire image including 581 patches\n img = np.load(self.root_dir + \"patch/\" + sid + \"_patch.npy\")\n img = np.clip(img, -1024, 240) # clip input intensity to [-1024, 240]\n img = img + 1024.\n img = img[:, None, :, :, :] / 632. - 1 # Normalize to [-1,1], 632=(1024+240)/2\n\n # patch locations for all 581 patches\n patch_loc_idx = self.patch_loc\n\n # study id\n key = self.sid_list[idx][:6]\n\n # labels\n label = np.asarray(self.metric_dict[key]) # extract sid from the first 6 letters\n\n return sid, img, patch_loc_idx, label" } ]
import os import argparse import json import random import numpy as np import torch from easydict import EasyDict as edict from tqdm import tqdm from models.cnn3d import Encoder from data.copd_patch import COPD_dataset
3,143
parser = argparse.ArgumentParser(description='Extract 3D Images Representations') parser.add_argument('--exp-name', default='./ssl_exp/exp_neighbor_0_128') parser.add_argument('--checkpoint-patch', default='checkpoint_patch_0001.pth.tar') parser.add_argument('--batch-size', type=int, default=1) def main(): # read configurations p = parser.parse_args() patch_epoch = p.checkpoint_patch.split('.')[0][-4:] with open(os.path.join(p.exp_name, 'configs.json')) as f: args = edict(json.load(f)) args.checkpoint = os.path.join(p.exp_name, p.checkpoint_patch) args.batch_size = p.batch_size args.patch_rep_dir = os.path.join(p.exp_name, 'patch_rep', patch_epoch) os.makedirs(args.patch_rep_dir, exist_ok=True) # Set random seed if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.benchmark = True main_worker(args) def main_worker(args): #args.gpu = 0 #torch.cuda.set_device(args.gpu) # create patch-level encoder
parser = argparse.ArgumentParser(description='Extract 3D Images Representations') parser.add_argument('--exp-name', default='./ssl_exp/exp_neighbor_0_128') parser.add_argument('--checkpoint-patch', default='checkpoint_patch_0001.pth.tar') parser.add_argument('--batch-size', type=int, default=1) def main(): # read configurations p = parser.parse_args() patch_epoch = p.checkpoint_patch.split('.')[0][-4:] with open(os.path.join(p.exp_name, 'configs.json')) as f: args = edict(json.load(f)) args.checkpoint = os.path.join(p.exp_name, p.checkpoint_patch) args.batch_size = p.batch_size args.patch_rep_dir = os.path.join(p.exp_name, 'patch_rep', patch_epoch) os.makedirs(args.patch_rep_dir, exist_ok=True) # Set random seed if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.benchmark = True main_worker(args) def main_worker(args): #args.gpu = 0 #torch.cuda.set_device(args.gpu) # create patch-level encoder
model_patch = Encoder(rep_dim=args.rep_dim_patch, moco_dim=args.moco_dim_patch, num_experts=args.num_experts, num_coordinates=args.num_coordinates)
0
2023-12-09 02:33:53+00:00
4k
casiatao/PAD
detection/detectron2/modeling/backbone/vit_adapt.py
[ { "identifier": "Backbone", "path": "detection/detectron2/modeling/backbone/backbone.py", "snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of arguments.\n \"\"\"\n super().__init__()\n\n @abstractmethod\n def forward(self):\n \"\"\"\n Subclasses must override this method, but adhere to the same return type.\n\n Returns:\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\n \"\"\"\n pass\n\n @property\n def size_divisibility(self) -> int:\n \"\"\"\n Some backbones require the input height and width to be divisible by a\n specific integer. This is typically true for encoder / decoder type networks\n with lateral connection (e.g., FPN) for which feature maps need to match\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\n input size divisibility is required.\n \"\"\"\n return 0\n\n @property\n def padding_constraints(self) -> Dict[str, int]:\n \"\"\"\n This property is a generalization of size_divisibility. Some backbones and training\n recipes require specific padding constraints, such as enforcing divisibility by a specific\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\n in :paper:vitdet). `padding_constraints` contains these optional items like:\n {\n \"size_divisibility\": int,\n \"square_size\": int,\n # Future options are possible\n }\n `size_divisibility` will read from here if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\n \"\"\"\n return {}\n\n def output_shape(self):\n \"\"\"\n Returns:\n dict[str->ShapeSpec]\n \"\"\"\n # this is a backward-compatible default\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }" }, { "identifier": "PatchEmbed", "path": "detection/detectron2/modeling/backbone/utils.py", "snippet": "class PatchEmbed(nn.Module):\n \"\"\"\n Image to Patch Embedding.\n \"\"\"\n\n def __init__(\n self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768\n ):\n \"\"\"\n Args:\n kernel_size (Tuple): kernel size of the projection layer.\n stride (Tuple): stride of the projection layer.\n padding (Tuple): padding size of the projection layer.\n in_chans (int): Number of input image channels.\n embed_dim (int): embed_dim (int): Patch embedding dimension.\n \"\"\"\n super().__init__()\n\n self.proj = nn.Conv2d(\n in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding\n )\n\n def forward(self, x):\n x = self.proj(x)\n # B C H W -> B H W C\n x = x.permute(0, 2, 3, 1)\n return x" }, { "identifier": "add_decomposed_rel_pos", "path": "detection/detectron2/modeling/backbone/utils.py", "snippet": "def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):\n \"\"\"\n Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950\n Args:\n attn (Tensor): attention map.\n q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).\n rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.\n rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.\n q_size (Tuple): spatial sequence size of query q with (q_h, q_w).\n k_size (Tuple): spatial sequence size of key k with (k_h, k_w).\n\n Returns:\n attn (Tensor): attention map with added relative positional embeddings.\n \"\"\"\n q_h, q_w = q_size\n k_h, k_w = k_size\n Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n\n B, _, dim = q.shape\n r_q = q.reshape(B, q_h, q_w, dim)\n rel_h = torch.einsum(\"bhwc,hkc->bhwk\", r_q, Rh)\n rel_w = torch.einsum(\"bhwc,wkc->bhwk\", r_q, Rw)\n\n attn = (\n attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n ).view(B, q_h * q_w, k_h * k_w)\n\n return attn" }, { "identifier": "get_abs_pos", "path": "detection/detectron2/modeling/backbone/utils.py", "snippet": "def get_abs_pos(abs_pos, has_cls_token, hw):\n \"\"\"\n Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token\n dimension for the original embeddings.\n Args:\n abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).\n has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.\n hw (Tuple): size of input image tokens.\n\n Returns:\n Absolute positional embeddings after processing with shape (1, H, W, C)\n \"\"\"\n h, w = hw\n if has_cls_token:\n abs_pos = abs_pos[:, 1:]\n xy_num = abs_pos.shape[1]\n size = int(math.sqrt(xy_num))\n assert size * size == xy_num\n\n if size != h or size != w:\n new_abs_pos = F.interpolate(\n abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),\n size=(h, w),\n mode=\"bicubic\",\n align_corners=False,\n )\n\n return new_abs_pos.permute(0, 2, 3, 1)\n else:\n return abs_pos.reshape(1, h, w, -1)" }, { "identifier": "window_partition", "path": "detection/detectron2/modeling/backbone/utils.py", "snippet": "def window_partition(x, window_size):\n \"\"\"\n Partition into non-overlapping windows with padding if needed.\n Args:\n x (tensor): input tokens with [B, H, W, C].\n window_size (int): window size.\n\n Returns:\n windows: windows after partition with [B * num_windows, window_size, window_size, C].\n (Hp, Wp): padded height and width before partition\n \"\"\"\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)" }, { "identifier": "window_unpartition", "path": "detection/detectron2/modeling/backbone/utils.py", "snippet": "def window_unpartition(windows, window_size, pad_hw, hw):\n \"\"\"\n Window unpartition into original sequences and removing padding.\n Args:\n x (tensor): input tokens with [B * num_windows, window_size, window_size, C].\n window_size (int): window size.\n pad_hw (Tuple): padded height and width (Hp, Wp).\n hw (Tuple): original height and width (H, W) before padding.\n\n Returns:\n x: unpartitioned sequences with [B, H, W, C].\n \"\"\"\n Hp, Wp = pad_hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n if Hp > H or Wp > W:\n x = x[:, :H, :W, :].contiguous()\n return x" } ]
import logging import math import fvcore.nn.weight_init as weight_init import torch import torch.nn as nn import torch.nn.functional as F from detectron2.layers import CNNBlockBase, Conv2d, get_norm from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous from .backbone import Backbone from .utils import ( PatchEmbed, add_decomposed_rel_pos, get_abs_pos, window_partition, window_unpartition, ) from timm.models.layers import DropPath, Mlp from fairscale.nn.checkpoint import checkpoint_wrapper
3,319
logger = logging.getLogger(__name__) __all__ = ["ViT_adapt", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"] class Adapter(nn.Module): def __init__(self, d_model, down_size = 64, dropout=0.0, adapter_scalar="frozen", init_value="0.0", adapter_layernorm_option="in", patch_wise_scalar=False): super().__init__() self.n_embd = d_model self.down_size = down_size #_before self.adapter_layernorm_option = adapter_layernorm_option self.adapter_layer_norm_before = None if adapter_layernorm_option == "in" or adapter_layernorm_option == "out": self.adapter_layer_norm_before = nn.LayerNorm(self.n_embd) self.patch_wise_scalar = patch_wise_scalar if patch_wise_scalar: self.scale = None else: if adapter_scalar == "learnable_scalar": self.scale = nn.Parameter(torch.ones(1) * 0.5) else: if init_value != "0.0": self.scale = float(init_value) else: self.register_buffer('scale', torch.ones(1) * 0.5) self.down_proj = nn.Linear(self.n_embd, self.down_size) self.non_linear_func = nn.ReLU() self.up_proj = nn.Linear(self.down_size, self.n_embd) self.dropout = dropout def forward(self, x, add_residual=False, residual=None): residual = x if residual is None else residual if self.adapter_layernorm_option == 'in': x = self.adapter_layer_norm_before(x) down = self.down_proj(x) down = self.non_linear_func(down) down = nn.functional.dropout(down, p=self.dropout, training=self.training) up = self.up_proj(down) if add_residual: output = up + residual else: output = up return output, self.scale class Attention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__( self, dim, num_heads=8, qkv_bias=True, use_rel_pos=False, rel_pos_zero_init=True, input_size=None, ): """ Args: dim (int): Number of input channels. num_heads (int): Number of attention heads. qkv_bias (bool: If True, add a learnable bias to query, key, value. rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. input_size (int or None): Input resolution for calculating the relative positional parameter size. """ super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.use_rel_pos = use_rel_pos if self.use_rel_pos: # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) if not rel_pos_zero_init: nn.init.trunc_normal_(self.rel_pos_h, std=0.02) nn.init.trunc_normal_(self.rel_pos_w, std=0.02) def forward(self, x): B, H, W, _ = x.shape # qkv with shape (3, B, nHead, H * W, C) qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # q, k, v with shape (B * nHead, H * W, C) q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) attn = (q * self.scale) @ k.transpose(-2, -1) if self.use_rel_pos:
logger = logging.getLogger(__name__) __all__ = ["ViT_adapt", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"] class Adapter(nn.Module): def __init__(self, d_model, down_size = 64, dropout=0.0, adapter_scalar="frozen", init_value="0.0", adapter_layernorm_option="in", patch_wise_scalar=False): super().__init__() self.n_embd = d_model self.down_size = down_size #_before self.adapter_layernorm_option = adapter_layernorm_option self.adapter_layer_norm_before = None if adapter_layernorm_option == "in" or adapter_layernorm_option == "out": self.adapter_layer_norm_before = nn.LayerNorm(self.n_embd) self.patch_wise_scalar = patch_wise_scalar if patch_wise_scalar: self.scale = None else: if adapter_scalar == "learnable_scalar": self.scale = nn.Parameter(torch.ones(1) * 0.5) else: if init_value != "0.0": self.scale = float(init_value) else: self.register_buffer('scale', torch.ones(1) * 0.5) self.down_proj = nn.Linear(self.n_embd, self.down_size) self.non_linear_func = nn.ReLU() self.up_proj = nn.Linear(self.down_size, self.n_embd) self.dropout = dropout def forward(self, x, add_residual=False, residual=None): residual = x if residual is None else residual if self.adapter_layernorm_option == 'in': x = self.adapter_layer_norm_before(x) down = self.down_proj(x) down = self.non_linear_func(down) down = nn.functional.dropout(down, p=self.dropout, training=self.training) up = self.up_proj(down) if add_residual: output = up + residual else: output = up return output, self.scale class Attention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__( self, dim, num_heads=8, qkv_bias=True, use_rel_pos=False, rel_pos_zero_init=True, input_size=None, ): """ Args: dim (int): Number of input channels. num_heads (int): Number of attention heads. qkv_bias (bool: If True, add a learnable bias to query, key, value. rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. input_size (int or None): Input resolution for calculating the relative positional parameter size. """ super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.use_rel_pos = use_rel_pos if self.use_rel_pos: # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) if not rel_pos_zero_init: nn.init.trunc_normal_(self.rel_pos_h, std=0.02) nn.init.trunc_normal_(self.rel_pos_w, std=0.02) def forward(self, x): B, H, W, _ = x.shape # qkv with shape (3, B, nHead, H * W, C) qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # q, k, v with shape (B * nHead, H * W, C) q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) attn = (q * self.scale) @ k.transpose(-2, -1) if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
2
2023-12-13 13:14:36+00:00
4k
pymike00/tinychat
tinychat/ui/chat.py
[ { "identifier": "FONT_FAMILY", "path": "tinychat/settings.py", "snippet": "FONT_FAMILY = \"Verdana\" # Consolas" }, { "identifier": "MAIN_WINDOW_RESOLUTION", "path": "tinychat/settings.py", "snippet": "MAIN_WINDOW_RESOLUTION = \"1200x700\"" }, { "identifier": "MAIN_WINDOW_TITLE", "path": "tinychat/settings.py", "snippet": "MAIN_WINDOW_TITLE = f\"TinyChat LM Client - v{APP_VERSION}\"" }, { "identifier": "get_icon_path", "path": "tinychat/settings.py", "snippet": "def get_icon_path():\n if os.name == \"nt\":\n icon_file_name = \"tinychat.ico\"\n else:\n icon_file_name = \"tinychat.png\" \n if not hasattr(sys, \"frozen\"):\n return os.path.join(os.path.dirname(__file__), icon_file_name)\n else:\n return os.path.join(sys.prefix, icon_file_name)" }, { "identifier": "SettingsFrame", "path": "tinychat/ui/frames.py", "snippet": "class SettingsFrame(ctk.CTkFrame):\n \"\"\"\n Allows model selection and access to api_key settings.\n \"\"\"\n\n def __init__(\n self,\n parent,\n available_models,\n on_model_select_callback,\n on_reset_callback,\n on_export_callback,\n *args,\n **kwargs\n ):\n super().__init__(parent, *args, **kwargs)\n self.grid_columnconfigure(2, weight=1)\n\n # Create model selection menu\n self.model_selection_menu = ctk.CTkOptionMenu(\n self,\n values=available_models,\n command=on_model_select_callback,\n font=ctk.CTkFont(family=\"Arial\", size=13, weight=\"bold\"),\n dropdown_font=ctk.CTkFont(family=\"Arial\", size=13, weight=\"bold\"),\n fg_color=(\"#0C955A\", \"#106A43\"),\n )\n self.model_selection_menu.grid(\n row=0, column=0, padx=(20, 0), pady=(10, 5), sticky=\"w\"\n )\n\n # Create settings button\n self.settings_button = ctk.CTkButton(\n self,\n text=\"Settings\",\n command=self.open_settings_window,\n font=ctk.CTkFont(family=\"Arial\", size=13, weight=\"bold\"),\n fg_color=(\"#0C955A\", \"#106A43\"),\n hover_color=\"#2c6e49\",\n )\n self.settings_button.grid(\n row=0, column=1, padx=(10, 20), pady=(10, 5), sticky=\"e\"\n )\n\n # Create the new_chat button\n self.reset_button = ctk.CTkButton(\n self,\n text=\"New Chat\",\n command=on_reset_callback,\n font=ctk.CTkFont(family=\"Arial\", size=13, weight=\"bold\"),\n fg_color=(\"#0C955A\", \"#106A43\"),\n hover_color=\"#2c6e49\",\n )\n self.reset_button.grid(row=0, column=2, padx=(10, 0), pady=(10, 5), sticky=\"e\")\n\n # Create the export chat button\n self.export_button = ctk.CTkButton(\n self,\n text=\"Export Conversation\",\n command=on_export_callback,\n font=ctk.CTkFont(family=\"Arial\", size=13, weight=\"bold\"),\n fg_color=(\"#0C955A\", \"#106A43\"),\n hover_color=\"#2c6e49\",\n )\n self.export_button.grid(\n row=0, column=3, padx=(10, 20), pady=(10, 5), sticky=\"e\"\n )\n\n def open_settings_window(self):\n \"\"\"\n Open settings window where API keys can be configured.\n \"\"\"\n # TODO: fix layout and refactor\n\n # Create a new top-level window for settings\n settings_window = ctk.CTkToplevel(self)\n settings_window.title(\"API Key Settings\")\n settings_window.geometry(\"600x240\") # Adjusted size to fit API key entries\n settings_window.transient(self) # type:ignore - Set to be on top of the main window\n\n # Configure grid layout\n settings_window.grid_columnconfigure(1, weight=1)\n\n # Add widgets to the settings window for API key entries\n api_key_label_1 = ctk.CTkLabel(settings_window, text=\"OpenAI API Key: \")\n api_key_label_1.grid(row=0, column=0, padx=(20, 2), pady=(20, 2), sticky=\"e\")\n self.api_key_entry_1 = ctk.CTkEntry(settings_window)\n self.api_key_entry_1.insert(0, get_api_key(OPENAI_API_KEY_NAME))\n self.api_key_entry_1.grid(\n row=0, column=1, padx=(2, 20), pady=(20, 2), sticky=\"ew\"\n )\n\n api_key_label_2 = ctk.CTkLabel(settings_window, text=\"Mistral API Key: \")\n api_key_label_2.grid(row=1, column=0, padx=(20, 2), pady=(10, 2), sticky=\"e\")\n self.api_key_entry_2 = ctk.CTkEntry(settings_window)\n self.api_key_entry_2.insert(0, get_api_key(MISTRAL_API_KEY_NAME))\n self.api_key_entry_2.grid(\n row=1, column=1, padx=(2, 20), pady=(10, 2), sticky=\"ew\"\n )\n\n api_key_label_3 = ctk.CTkLabel(settings_window, text=\"Cohere API Key: \")\n api_key_label_3.grid(row=2, column=0, padx=(20, 2), pady=(10, 2), sticky=\"e\")\n self.api_key_entry_3 = ctk.CTkEntry(settings_window)\n self.api_key_entry_3.insert(0, get_api_key(COHERE_API_KEY_NAME))\n self.api_key_entry_3.grid(\n row=2, column=1, padx=(2, 20), pady=(10, 2), sticky=\"ew\"\n )\n\n api_key_label_4 = ctk.CTkLabel(settings_window, text=\"Google API Key: \")\n api_key_label_4.grid(row=3, column=0, padx=(20, 2), pady=(10, 2), sticky=\"e\")\n self.api_key_entry_4 = ctk.CTkEntry(settings_window)\n self.api_key_entry_4.insert(0, get_api_key(GOOGLE_API_KEY_NAME))\n self.api_key_entry_4.grid(\n row=3, column=1, padx=(2, 20), pady=(10, 2), sticky=\"ew\"\n )\n\n self.status_label = ctk.CTkLabel(settings_window, text=\"\")\n self.status_label.grid(row=4, column=0, padx=(20, 2), pady=(10, 2), sticky=\"w\")\n\n # Add a close button to the settings window\n close = ctk.CTkButton(\n settings_window,\n text=\"Close\",\n command=settings_window.destroy,\n fg_color=(\"#0C955A\", \"#106A43\"),\n hover_color=\"#2c6e49\",\n )\n close.grid(row=4, column=1, padx=(0, 0), pady=(20, 0), sticky=\"w\")\n\n # Add a save button to the settings window\n save = ctk.CTkButton(\n settings_window,\n text=\"Save Settings\",\n command=self.save_settings,\n fg_color=(\"#0C955A\", \"#106A43\"),\n hover_color=\"#2c6e49\",\n )\n save.grid(row=4, column=1, padx=(150, 0), pady=(20, 0), sticky=\"w\")\n\n def save_settings(self):\n set_api_key(OPENAI_API_KEY_NAME, self.api_key_entry_1.get())\n set_api_key(MISTRAL_API_KEY_NAME, self.api_key_entry_2.get())\n set_api_key(COHERE_API_KEY_NAME, self.api_key_entry_3.get())\n set_api_key(GOOGLE_API_KEY_NAME, self.api_key_entry_4.get())\n self.status_label.configure(text=\"Saved.\")" } ]
import os import threading import tkinter as tk import customtkinter as ctk from tkinter import PhotoImage from tinychat.settings import FONT_FAMILY, MAIN_WINDOW_RESOLUTION, MAIN_WINDOW_TITLE from tinychat.settings import get_icon_path from tinychat.ui.frames import SettingsFrame
2,009
class ChatApp(ctk.CTk): def __init__(self, backend) -> None: super().__init__() self.set_icon() self.model_name = "" # Initialize font object to use with the chat text areas
class ChatApp(ctk.CTk): def __init__(self, backend) -> None: super().__init__() self.set_icon() self.model_name = "" # Initialize font object to use with the chat text areas
chat_font = ctk.CTkFont(family=FONT_FAMILY, size=14)
0
2023-12-11 20:40:02+00:00
4k
nickruggeri/hypergraph-message-passing
test/model/test_numerical.py
[ { "identifier": "approx_log_factorial", "path": "src/model/numerical.py", "snippet": "def approx_log_factorial(a: int | float) -> float:\n \"\"\"Compute :math::`\\log(a!)` utilizing a Ramanujan approximation, see\n https://math.stackexchange.com/questions/152342/ramanujans-approximation-to-factorial\n\n Parameters\n ----------\n a: positive float or integer value\n\n Returns\n -------\n The approximate value of :math::`log(a!)`.\n \"\"\"\n if a == 0 or a == 1:\n return 0\n if a == 2:\n return np.log(a)\n\n m = a * (1 + 4 * a * (1 + 2 * a))\n return a * np.log(a) - a + 0.5 * (1 / 3 * np.log(1 / 30 + m) + LN_PI)" }, { "identifier": "log_binomial_coefficient", "path": "src/model/numerical.py", "snippet": "def log_binomial_coefficient(a: int, b: int, allow_approx: bool = True) -> float:\n \"\"\"Logarithm of the binomial coefficient of a over b.\n\n Parameters\n ----------\n a: integer value\n b: integer value\n allow_approx: allow numerical approximation for factorials of large numbers.\n\n Returns\n -------\n The logarithm of the binomial coefficient of a over b.\n \"\"\"\n if a == b or b == 0:\n return 0.0\n if a < b:\n raise ValueError(\n \"The binomial coefficient is not defined for a smaller than b.\"\n )\n\n if not allow_approx or a - b < 5:\n log_numerator = np.sum(np.log(np.arange(a - b + 1, a + 1)))\n log_denominator = log_factorial(b)\n else:\n log_numerator = approx_log_factorial(a)\n if b > 5:\n log_denominator = approx_log_factorial(a - b) + approx_log_factorial(b)\n else:\n log_denominator = approx_log_factorial(a - b) + log_factorial(b)\n return log_numerator - log_denominator" }, { "identifier": "log_factorial", "path": "src/model/numerical.py", "snippet": "def log_factorial(a: int | float) -> float:\n \"\"\"Compute :math::`log(a!)`.\n\n Parameters\n ----------\n a: positive float or integer value\n\n Returns\n -------\n The value of :math::`log(a!)`.\n \"\"\"\n if a == 0:\n return 0.0\n\n return np.sum(np.log(np.arange(1, a + 1)))" }, { "identifier": "sparse_reduce_lse", "path": "src/model/numerical.py", "snippet": "def sparse_reduce_lse(\n *args: sparse.csc_array | sparse.csr_array,\n) -> sparse.csc_array | sparse.csr_array:\n \"\"\"Perform the elementwise log-sum-exp operation on a sequence of sparse arrays.\n The arrays are assumed to have all the same pattern of non-zero entries, and to have\n sorted indices.\n \"\"\"\n data = np.stack([mat.data for mat in args], axis=1)\n lse_vals = special.logsumexp(data, axis=1)\n\n lse_mat = args[0].copy()\n lse_mat.data = lse_vals\n return lse_mat" } ]
import itertools import numpy as np import pytest from scipy import sparse, special from src.model.numerical import ( approx_log_factorial, log_binomial_coefficient, log_factorial, sparse_reduce_lse, )
1,938
######################################################################################## # Test sparse_reduce_lse # Some arbitrary matrices created by hand. matrix_list = [ [ np.array([[1, 1, 0], [-1, 0, 0], [2, 0, 2]]), np.array([[10, 1, 0], [-1, 0, 0], [3, 0, 1]]), ], [ np.array([[-5, 0, 0, 0], [2, 3, 4, 5], [1.3, -5.1, 0, 1]]), np.array([[2, 0, 0, 0], [1, 2, -1.1, 1.3], [1.3, -5.1, 0, 1]]), np.array([[-1.1, 0, 0, 0], [3, 3, 5, 0.3], [-2.1, 2.0, 0, 1]]), ], [ np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [0, 0, 0, 0]]), np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [0, 0, 0, 0]]), np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [0, 0, 0, 0]]), ], [ np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [2, 3, 4, 5]]), np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [-1, 2, 3, 5]]), np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]), np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]), np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]), ], ] # Some additional random matrices. def generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity): zero_idx = rng.random(shape) > sparsity matrices = [rng.random(shape) * scale for _ in range(n)] for mat in matrices: mat[zero_idx] = 0 return matrices rng = np.random.default_rng(seed=123) shapes = [ (10, 3), (100, 4), (1000, 50), ] scales = [1, 10, 100] n_matrices = [5, 10, 20] sparsity_vals = [0.1, 0.5, 0.9] matrix_list += [ generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity) for scale in scales for shape in shapes for n in n_matrices for sparsity in sparsity_vals ] @pytest.fixture(params=[sparse.csc_matrix, sparse.csr_matrix]) def sparsity_type(request): return request.param @pytest.fixture(params=range(len(matrix_list))) def sparse_and_dense_matrices(sparsity_type, request): matrices = matrix_list[request.param] sparse_mat = [sparsity_type(mat) for mat in matrices] return matrices, sparse_mat, sparsity_type @pytest.fixture def sparse_and_dense_matrices_and_lse(sparse_and_dense_matrices): matrices, sparse_mat, sparsity_type = sparse_and_dense_matrices lse = sparse_reduce_lse(*sparse_mat) return matrices, sparse_mat, sparsity_type, lse def test_reduce_sparse_lse_type(sparse_and_dense_matrices_and_lse): _, _, sparsity_type, lse = sparse_and_dense_matrices_and_lse assert isinstance(lse, sparsity_type) def test_reduce_sparse_lse_with_dense(sparse_and_dense_matrices_and_lse): matrices, sparse_mat, sparsity_type, lse = sparse_and_dense_matrices_and_lse dense_lse = special.logsumexp(np.stack(matrices, axis=2), axis=2) dense_lse[matrices[0] == 0] = 0 assert np.all(dense_lse == lse) ######################################################################################## # Test log_factorial, approx_log_factorial and log_binomial_coefficient @pytest.mark.parametrize("a", range(100)) def test_stirling_approx_against_log_factorial(a):
######################################################################################## # Test sparse_reduce_lse # Some arbitrary matrices created by hand. matrix_list = [ [ np.array([[1, 1, 0], [-1, 0, 0], [2, 0, 2]]), np.array([[10, 1, 0], [-1, 0, 0], [3, 0, 1]]), ], [ np.array([[-5, 0, 0, 0], [2, 3, 4, 5], [1.3, -5.1, 0, 1]]), np.array([[2, 0, 0, 0], [1, 2, -1.1, 1.3], [1.3, -5.1, 0, 1]]), np.array([[-1.1, 0, 0, 0], [3, 3, 5, 0.3], [-2.1, 2.0, 0, 1]]), ], [ np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [0, 0, 0, 0]]), np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [0, 0, 0, 0]]), np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [0, 0, 0, 0]]), ], [ np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [2, 3, 4, 5]]), np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [-1, 2, 3, 5]]), np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]), np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]), np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]), ], ] # Some additional random matrices. def generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity): zero_idx = rng.random(shape) > sparsity matrices = [rng.random(shape) * scale for _ in range(n)] for mat in matrices: mat[zero_idx] = 0 return matrices rng = np.random.default_rng(seed=123) shapes = [ (10, 3), (100, 4), (1000, 50), ] scales = [1, 10, 100] n_matrices = [5, 10, 20] sparsity_vals = [0.1, 0.5, 0.9] matrix_list += [ generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity) for scale in scales for shape in shapes for n in n_matrices for sparsity in sparsity_vals ] @pytest.fixture(params=[sparse.csc_matrix, sparse.csr_matrix]) def sparsity_type(request): return request.param @pytest.fixture(params=range(len(matrix_list))) def sparse_and_dense_matrices(sparsity_type, request): matrices = matrix_list[request.param] sparse_mat = [sparsity_type(mat) for mat in matrices] return matrices, sparse_mat, sparsity_type @pytest.fixture def sparse_and_dense_matrices_and_lse(sparse_and_dense_matrices): matrices, sparse_mat, sparsity_type = sparse_and_dense_matrices lse = sparse_reduce_lse(*sparse_mat) return matrices, sparse_mat, sparsity_type, lse def test_reduce_sparse_lse_type(sparse_and_dense_matrices_and_lse): _, _, sparsity_type, lse = sparse_and_dense_matrices_and_lse assert isinstance(lse, sparsity_type) def test_reduce_sparse_lse_with_dense(sparse_and_dense_matrices_and_lse): matrices, sparse_mat, sparsity_type, lse = sparse_and_dense_matrices_and_lse dense_lse = special.logsumexp(np.stack(matrices, axis=2), axis=2) dense_lse[matrices[0] == 0] = 0 assert np.all(dense_lse == lse) ######################################################################################## # Test log_factorial, approx_log_factorial and log_binomial_coefficient @pytest.mark.parametrize("a", range(100)) def test_stirling_approx_against_log_factorial(a):
assert np.allclose(approx_log_factorial(a), log_factorial(a))
2
2023-12-06 22:01:38+00:00
4k
sailfishos-chum/sailfishos-chum.github.io
chumweb/atom_feed.py
[ { "identifier": "CONFIG", "path": "chumweb/config.py", "snippet": "CONFIG = init_config()" }, { "identifier": "Package", "path": "chumweb/package.py", "snippet": "class Package:\n \"\"\"\n Metadata of a RPM package with associated Chum metadata\n \"\"\"\n name: str\n summary: str | None = None\n description: str | Markup | None = None\n title: str | None = None\n icon: RemoteImage | None = None\n version: PackageVersion | None = None\n developer_name: str | None = None\n packager_name: str | None = None\n type: PackageApplicationType = PackageApplicationType.generic\n categories: Set[PackageApplicationCategory] = field(default_factory=lambda: {PackageApplicationCategory.other})\n screenshots: List[RemoteImage] = field(default_factory=list)\n links: Dict[str, str] = field(default_factory=dict)\n debuginfo_package: Self | None = None\n debugsource_package: Self | None = None\n url: str | None = None\n licence: str | None = None\n markdown_url: str | None = None\n repo_url: str | None = None\n packaging_repo_url: str | None = None\n debug_yaml: str | None = None\n debug_yaml_errors: List[Exception] = field(default_factory=list)\n updated: datetime | None = field(default_factory=lambda: datetime.fromtimestamp(0))\n\n repos: Set[str] = field(default_factory=set)\n archs: Set[str] = field(default_factory=set)\n download_size: Dict[str, int] = field(default_factory=dict)\n install_size: Dict[str, int] = field(default_factory=dict)\n download_url: Dict[str, str] = field(default_factory=dict)\n checksum_type: Dict[str, str] = field(default_factory=dict)\n checksum_value: Dict[str, str] = field(default_factory=dict)\n\n @staticmethod\n def from_node(dom_element, repo_arch: str):\n \"\"\"\n Creates a Package class instance from a `<package>` XML node `dom_element` as found in the primary.xml\n metadata in RPM repositories.\n \"\"\"\n\n def try_get_str(name) -> str | None:\n \"\"\"Return content of XML tag with `name` or None\"\"\"\n try:\n return dom_element.getElementsByTagName(name)[0].firstChild.nodeValue\n except (IndexError, AttributeError):\n return None\n\n def try_get_attribute_tags(name, *args: str):\n result = (())\n try:\n el = dom_element.getElementsByTagName(name)[0]\n\n for attr in args:\n result += (el.getAttribute(attr),)\n\n return result\n except IndexError:\n return tuple([None for _ in args])\n\n def try_get_version():\n \"\"\"Parse version\"\"\"\n epoch, ver, rel = try_get_attribute_tags(\"version\", \"epoch\", \"ver\", \"rel\")\n return PackageVersion(epoch, ver, rel)\n\n def name_to_title(name: str):\n name_parts: List[str] = name.split(\"-\")\n if name_parts[0] == \"harbour\" or name_parts[0] == \"openrepos\":\n name_parts.pop(0)\n if name_parts[0].startswith(\"lib\"):\n name_parts[0] = name_parts[0].removeprefix(\"lib\")\n name_parts.append(\"(library)\")\n if name_parts[-1] == \"devel\":\n name_parts[-1] = \"(development files)\"\n\n return \" \".join(map(str.capitalize, name_parts))\n\n def parse_description(description: str, name: str):\n from yaml import safe_load as yaml_load\n from yaml.parser import ParserError\n from yaml.scanner import ScannerError\n\n import re\n # Based on\n # https://github.com/sailfishos-chum/sailfishos-chum-gui/blob/0b2882fad79673b762ca184cd242d02334f1d8d1/src/chumpackage.cpp#L152C1-L152C108\n # Metadata, in YAML format, is put as the last paragraph of the application description. Paragraphs are\n # split by two newlines.\n paragraphs = [line for line in re.split(r\"(?m)^\\s*$\", description) if line.strip()]\n if not paragraphs:\n return\n\n yaml_part = paragraphs.pop()\n p.debug_yaml = yaml_part\n try:\n yaml = yaml_load(yaml_part)\n except (ParserError, ScannerError):\n yaml = None\n # If it happens that the description is not YAML, it'll be parsed as a str or generate a ParseError. In that\n # case, add the source back to the description\n if type(yaml) in [str, NoneType]:\n paragraphs.append(yaml_part)\n else:\n # Note: use Dict.get() to avoid IndexError's. We rather have None values\n p.title = yaml.get(\"Title\") or yaml.get(\"PackageName\") or name_to_title(name)\n p.type = yaml.get(\"Type\")\n\n icon_url = yaml.get(\"PackageIcon\") or yaml.get(\"Icon\")\n p.icon = RemoteImage(icon_url) if icon_url else None\n p.screenshots = list(map(lambda s: RemoteImage(s), yaml.get(\"Screenshots\", [])))\n p.developer_name = yaml.get(\"DeveloperName\")\n p.packager_name = yaml.get(\"PackagedBy\")\n\n if \"Custom\" in yaml:\n custom = yaml[\"Custom\"]\n if type(custom) is list:\n custom_list = custom\n custom = {}\n # Handle cases where the Custom value is a list of key-value pairs instead of an object :(\n for list_item in custom_list:\n custom |= {k: v for (k, v) in list_item.items()}\n\n p.repo_url = custom.get(\"Repo\")\n p.packaging_repo_url = custom.get(\"PackagingRepo\")\n p.markdown_url = custom.get(\"DescriptionMD\")\n\n try:\n p.links = {key.lower(): val for key, val in (yaml.get(\"Links\") or yaml.get(\"Url\", {})).items()}\n except AttributeError as e:\n p.debug_yaml_errors.append(e)\n\n try:\n p.categories = set(map(PackageApplicationCategory, yaml[\"Categories\"]))\n except (KeyError, ValueError) as e:\n p.debug_yaml_errors.append(e)\n\n p.description = \"\\n\\n\".join(map(lambda s: s.replace('\\n', ' '), paragraphs))\n\n arch = try_get_str(\"arch\")\n\n p = Package(try_get_str(\"name\"))\n p.repos.add(repo_arch)\n p.archs.add(arch)\n p.summary = try_get_str(\"summary\")\n p.version = try_get_version()\n p.url = try_get_str(\"url\")\n p.title = name_to_title(p.name)\n p.licence = try_get_str(\"rpm:license\")\n p.updated = datetime.fromtimestamp(float(try_get_attribute_tags(\"time\", \"file\")[0]), UTC)\n\n p.download_size[arch], p.install_size[arch] = try_get_attribute_tags(\"size\", \"package\", \"installed\")\n p.download_url[arch] = try_get_attribute_tags(\"location\", \"href\")[0]\n p.checksum_type[arch] = try_get_attribute_tags(\"checksum\", \"type\")[0]\n p.checksum_value[arch] = try_get_str(\"checksum\")\n\n try:\n parse_description(try_get_str(\"description\"), p.name)\n except Exception as e:\n p.description = try_get_str(\"description\")\n p.debug_yaml_errors.append(e)\n\n if p.name.startswith(\"lib\") and PackageApplicationCategory.library not in p.categories:\n p.categories.add(PackageApplicationCategory.library)\n\n return p\n\n def merge_arch(self, other_pkg: Self):\n \"\"\"\n Adds the architecture-specific information from another package to this package\n \"\"\"\n for arch in other_pkg.archs:\n self.repos = self.repos.union(other_pkg.repos)\n self.download_size[arch] = other_pkg.download_size[arch]\n self.install_size[arch] = other_pkg.install_size[arch]\n self.download_url[arch] = other_pkg.download_url[arch]\n self.checksum_type[arch] = other_pkg.checksum_type[arch]\n self.checksum_value[arch] = other_pkg.checksum_value[arch]\n self.archs.add(arch)\n\n def is_app(self) -> bool:\n \"\"\"\n Heuristic to detect whether this is a graphical app that users would like to install\n \"\"\"\n return self.type == PackageApplicationType.desktop_application \\\n or self.name.startswith(\"harbour-\") \\\n and not self.is_debug()\n\n def is_debug(self) -> bool:\n return self.name.endswith(\"-debuginfo\") or self.name.endswith(\"-debugsource\")\n\n def web_url(self):\n \"\"\"\n Returns the url for use in the web interface\n \"\"\"\n if self.is_app():\n return f\"apps/{self.name}/\"\n else:\n return f\"pkgs/{self.name}/\"\n\n def get_download_url(self, arch: str) -> Optional[str]:\n # noarch does not have a dedicated repository, use the first available arch I suppose\n # This may be an idea in the category \"not smart\"\n if arch == \"noarch\":\n repo = next(self.repos.__iter__())\n else:\n for repo in self.repos:\n repo_arch = repo.split(\"_\")[1]\n if repo_arch == arch:\n break\n else:\n logger.warning(f\"No repo found for architecture {arch} (package: {self.name})\")\n #assert False, f\"No repo found for architecture {arch} (package: {self.name})\"\n return None\n\n return f\"{CONFIG.repo_url_prefix}{repo}/\" + self.download_url[arch]\n\n\n def caused_requests(self):\n return type(self.markdown_url) == str\n\n def requested_urls(self):\n return [self.markdown_url]\n\n def to_search_dict(self):\n return {\n \"name\": self.name,\n \"title\": self.title,\n \"url\": self.web_url(),\n \"icon\": self.icon.remote_url if self.icon else None,\n \"summary\": self.summary,\n \"description\": self.description,\n \"version\": self.version.to_full_str(),\n \"version_short\": self.version.to_short_str(),\n \"is_app\": self.is_app(),\n \"is_debug\": self.is_debug()\n }" } ]
from datetime import datetime from typing import List, Optional, Iterable from xml.dom.minidom import Document, Element from chumweb import CONFIG from chumweb.package import Package
2,745
""" This package contains methods for writing Atom feeds """ # Reuse the namespace that the primary.xml.gz file uses REPO_NS = "http://linux.duke.edu/metadata/common" def create_atom_feed(public_url: str, title: str, updated: datetime) -> Document: """ Creates a basic Atom feed, with no entries https://validator.w3.org/feed/docs/atom.html :return: The created feed as an XML Document """ doc = Document() feed = doc.createElementNS("http://www.w3.org/2005/Atom", "feed") feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom") feed.setAttribute("xmlns:repo", "http://linux.duke.edu/metadata/common") doc.appendChild(feed) el_id = _create_simple_element(doc, "id", public_url) feed.appendChild(el_id) el_title = _create_simple_element(doc, "title", title) feed.appendChild(el_title) el_updated = _create_simple_element(doc, "updated", updated.isoformat()) feed.appendChild(el_updated)
""" This package contains methods for writing Atom feeds """ # Reuse the namespace that the primary.xml.gz file uses REPO_NS = "http://linux.duke.edu/metadata/common" def create_atom_feed(public_url: str, title: str, updated: datetime) -> Document: """ Creates a basic Atom feed, with no entries https://validator.w3.org/feed/docs/atom.html :return: The created feed as an XML Document """ doc = Document() feed = doc.createElementNS("http://www.w3.org/2005/Atom", "feed") feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom") feed.setAttribute("xmlns:repo", "http://linux.duke.edu/metadata/common") doc.appendChild(feed) el_id = _create_simple_element(doc, "id", public_url) feed.appendChild(el_id) el_title = _create_simple_element(doc, "title", title) feed.appendChild(el_title) el_updated = _create_simple_element(doc, "updated", updated.isoformat()) feed.appendChild(el_updated)
el_icon = _create_simple_element(doc, "icon", CONFIG.public_url + "static/img/sailfishos-chum.png")
0
2023-12-14 19:25:31+00:00
4k
oVo-HxBots/URLUploadBot
Uploader/echo.py
[ { "identifier": "Translation", "path": "Uploader/script.py", "snippet": "class Translation(object):\n\n START_TEXT = \"\"\"\nHi {} \n\nI am Powerful Url Uploader Bot\n \n\"\"\"\n\n HELP_TEXT = \"\"\"\n\n# Send me the Google Drive | ytdl | direct links.\n\n# Select the desired option.\n\n# Then be relaxed your file will be uploaded soon..\n \n\"\"\"\n\n# give credit to developer\n\n ABOUT_TEXT = \"\"\"\n<b>♻️ My Name</b> : Url Uploader Bot\n\n<b>🌀 Channel</b> : <a href=\"https://t.me/TMWAD\">@TMWAD</a>\n\n<b>🌺 Heroku</b> : <a href=\"https://heroku.com/\">Heroku</a>\n\n<b>📑 Language :</b> <a href=\"https://www.python.org/\">Python 3.10.5</a>\n\n<b>🇵🇲 Framework :</b> <a href=\"https://docs.pyrogram.org/\">Pyrogram 2.0.30</a>\n\n<b>👲 Developer :</b> <a href=\"https://t.me/kinu6\">@kinu6</a>\n\n\"\"\"\n\n PROGRESS = \"\"\"\n🔰 Speed : {3}/s\\n\\n\n🌀 Done : {1}\\n\\n\n🎥 Tᴏᴛᴀʟ sɪᴢᴇ : {2}\\n\\n\n⏳ Tɪᴍᴇ ʟᴇғᴛ : {4}\\n\\n\n\"\"\"\n ID_TEXT = \"\"\"\n🆔 Your Telegram ID 𝐢𝐬 :- <code>{}</code>\n\"\"\"\n\n INFO_TEXT = \"\"\"\n\n 🤹 First Name : <b>{}</b>\n\n 🚴‍♂️ Second Name : <b>{}</b>\n\n 🧑🏻‍🎓 Username : <b>@{}</b>\n\n 🆔 Telegram Id : <code>{}</code>\n\n 📇 Profile Link : <b>{}</b>\n\n 📡 Dc : <b>{}</b>\n\n 📑 Language : <b>{}</b>\n\n 👲 Status : <b>{}</b>\n\"\"\"\n\n START_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('❓ Help', callback_data='help'),\n InlineKeyboardButton('🦊 About', callback_data='about')\n ], [\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n HELP_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('🏠 Home', callback_data='home'),\n InlineKeyboardButton('🦊 About', callback_data='about')\n ], [\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n ABOUT_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('🏠 Home', callback_data='home'),\n InlineKeyboardButton('❓ Help', callback_data='help')\n ], [\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n FORMAT_SELECTION = \"Now Select the desired formats\"\n SET_CUSTOM_USERNAME_PASSWORD = \"\"\"\"\"\"\n DOWNLOAD_START = \"Trying to Download ⌛\\n\\n <i>{} </i>\"\n UPLOAD_START = \"<i>{} </i>\\n\\n📤 Uploading Please Wait \"\n RCHD_TG_API_LIMIT = \"Downloaded in {} seconds.\\nDetected File Size: {}\\nSorry. But, I cannot upload files greater than 2GB due to Telegram API limitations.\"\n AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS = \"Dᴏᴡɴʟᴏᴀᴅᴇᴅ ɪɴ {} sᴇᴄᴏɴᴅs.\\n\\nTʜᴀɴᴋs Fᴏʀ Usɪɴɢ Mᴇ\\n\\nUᴘʟᴏᴀᴅᴇᴅ ɪɴ {} sᴇᴄᴏɴᴅs\"\n FF_MPEG_DEL_ETED_CUSTOM_MEDIA = \"✅ Media cleared succesfully.\"\n CUSTOM_CAPTION_UL_FILE = \" \"\n NO_VOID_FORMAT_FOUND = \"ERROR... <code>{}</code>\"\n SLOW_URL_DECED = \"Gosh that seems to be a very slow URL. Since you were screwing my home, I am in no mood to download this file. Meanwhile, why don't you try this:==> https://shrtz.me/PtsVnf6 and get me a fast URL so that I can upload to Telegram, without me slowing down for other users.\"" }, { "identifier": "random_char", "path": "Uploader/functions/ran_text.py", "snippet": "def random_char(y):\n return ''.join(random.choice(string.ascii_letters) for _ in range(y))" }, { "identifier": "humanbytes", "path": "Uploader/functions/display_progress.py", "snippet": "def humanbytes(size):\n # https://stackoverflow.com/a/49361727/4723940\n # 2**10 = 1024\n if not size:\n return \"\"\n power = 2**10\n n = 0\n Dic_powerN = {0: ' ', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}\n while size > power:\n size /= power\n n += 1\n return f\"{str(round(size, 2))} {Dic_powerN[n]}B\"" }, { "identifier": "humanbytes", "path": "Uploader/functions/display_progress.py", "snippet": "def humanbytes(size):\n # https://stackoverflow.com/a/49361727/4723940\n # 2**10 = 1024\n if not size:\n return \"\"\n power = 2**10\n n = 0\n Dic_powerN = {0: ' ', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}\n while size > power:\n size /= power\n n += 1\n return f\"{str(round(size, 2))} {Dic_powerN[n]}B\"" } ]
import os import time import json import asyncio import logging from opencc import OpenCC from pyrogram.types import Thumbnail from pyrogram import Client, filters from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton from Uploader.config import Config from sample_config import Config from Uploader.script import Translation from Uploader.functions.ran_text import random_char from Uploader.functions.display_progress import humanbytes from Uploader.functions.display_progress import humanbytes
3,432
l = entity.length url = url[o:o + l] if Config.HTTP_PROXY != "": command_to_exec = [ "yt-dlp", "--no-warnings", "--allow-dynamic-mpd", "-j", url, "--proxy", Config.HTTP_PROXY ] else: command_to_exec = [ "yt-dlp", "--no-warnings", "--allow-dynamic-mpd", "-j", url ] if youtube_dl_username is not None: command_to_exec.append("--username") command_to_exec.append(youtube_dl_username) if youtube_dl_password is not None: command_to_exec.append("--password") command_to_exec.append(youtube_dl_password) logger.info(command_to_exec) chk = await bot.send_message( chat_id=update.chat.id, text='Proccesing your ⌛', disable_web_page_preview=True, reply_to_message_id=update.id ) if update.from_user.id not in Config.AUTH_USERS: if str(update.from_user.id) in Config.ADL_BOT_RQ: current_time = time.time() previous_time = Config.ADL_BOT_RQ[str(update.from_user.id)] process_max_timeout = round(Config.PROCESS_MAX_TIMEOUT/60) present_time = round(Config.PROCESS_MAX_TIMEOUT - (current_time - previous_time)) Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time() if round(current_time - previous_time) < Config.PROCESS_MAX_TIMEOUT: await bot.edit_message_text(chat_id=update.chat.id, text=Translation.FREE_USER_LIMIT_Q_SZE.format(process_max_timeout, present_time), disable_web_page_preview=True, message_id=chk.id) return else: Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time() process = await asyncio.create_subprocess_exec( *command_to_exec, # stdout must a pipe to be accessible as process.stdout stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) # Wait for the subprocess to finish stdout, stderr = await process.communicate() e_response = stderr.decode().strip() logger.info(e_response) t_response = stdout.decode().strip() # logger.info(t_response) # https://github.com/rg3/youtube-dl/issues/2630#issuecomment-38635239 if e_response and "nonnumeric port" not in e_response: # logger.warn("Status : FAIL", exc.returncode, exc.output) error_message = e_response.replace( "please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.", "") if "This video is only available for registered users." in error_message: error_message += Translation.SET_CUSTOM_USERNAME_PASSWORD await chk.delete() time.sleep(40.5) await bot.send_message( chat_id=update.chat.id, text=Translation.NO_VOID_FORMAT_FOUND.format(str(error_message)), reply_to_message_id=update.id, disable_web_page_preview=True ) return False if t_response: # logger.info(t_response) x_reponse = t_response if "\n" in x_reponse: x_reponse, _ = x_reponse.split("\n") response_json = json.loads(x_reponse) randem = random_char(5) save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \ "/" + str(update.from_user.id) + f'{randem}' + ".json" with open(save_ytdl_json_path, "w", encoding="utf8") as outfile: json.dump(response_json, outfile, ensure_ascii=False) # logger.info(response_json) inline_keyboard = [] duration = None if "duration" in response_json: duration = response_json["duration"] if "formats" in response_json: for formats in response_json["formats"]: format_id = formats.get("format_id") format_string = formats.get("format_note") if format_string is None: format_string = formats.get("format") if "DASH" in format_string.upper(): continue format_ext = formats.get("ext") if formats.get('filesize'): size = formats['filesize'] elif formats.get('filesize_approx'): size = formats['filesize_approx'] else: size = 0 cb_string_video = "{}|{}|{}|{}".format( "video", format_id, format_ext, randem) cb_string_file = "{}|{}|{}|{}".format( "file", format_id, format_ext, randem) if format_string is not None and not "audio only" in format_string: ikeyboard = [ InlineKeyboardButton( "🎬 " + format_string + " " + format_ext +
# MIT License # Copyright (c) 2022 Hash Minner # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE if bool(os.environ.get("WEBHOOK")): else: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) logging.getLogger("pyrogram").setLevel(logging.WARNING) s2tw = OpenCC('s2tw.json').convert @Client.on_message(filters.private & filters.regex(pattern=".*http.*")) async def echo(bot, update): logger.info(update.from_user) url = update.text youtube_dl_username = None youtube_dl_password = None file_name = None if "youtu.be" in url: return await update.reply_text( "**Choose Download type**", reply_markup=InlineKeyboardMarkup( [ [ InlineKeyboardButton( "Audio 🎵", callback_data="ytdl_audio"), InlineKeyboardButton( "Video 🎬", callback_data="ytdl_video") ] ] ), quote=True ) if "|" in url: url_parts = url.split("|") if len(url_parts) == 2: url = url_parts[0] file_name = url_parts[1] elif len(url_parts) == 4: url = url_parts[0] file_name = url_parts[1] youtube_dl_username = url_parts[2] youtube_dl_password = url_parts[3] else: for entity in update.entities: if entity.type == "text_link": url = entity.url elif entity.type == "url": o = entity.offset l = entity.length url = url[o:o + l] if url is not None: url = url.strip() if file_name is not None: file_name = file_name.strip() # https://stackoverflow.com/a/761825/4723940 if youtube_dl_username is not None: youtube_dl_username = youtube_dl_username.strip() if youtube_dl_password is not None: youtube_dl_password = youtube_dl_password.strip() logger.info(url) logger.info(file_name) else: for entity in update.entities: if entity.type == "text_link": url = entity.url elif entity.type == "url": o = entity.offset l = entity.length url = url[o:o + l] if Config.HTTP_PROXY != "": command_to_exec = [ "yt-dlp", "--no-warnings", "--allow-dynamic-mpd", "-j", url, "--proxy", Config.HTTP_PROXY ] else: command_to_exec = [ "yt-dlp", "--no-warnings", "--allow-dynamic-mpd", "-j", url ] if youtube_dl_username is not None: command_to_exec.append("--username") command_to_exec.append(youtube_dl_username) if youtube_dl_password is not None: command_to_exec.append("--password") command_to_exec.append(youtube_dl_password) logger.info(command_to_exec) chk = await bot.send_message( chat_id=update.chat.id, text='Proccesing your ⌛', disable_web_page_preview=True, reply_to_message_id=update.id ) if update.from_user.id not in Config.AUTH_USERS: if str(update.from_user.id) in Config.ADL_BOT_RQ: current_time = time.time() previous_time = Config.ADL_BOT_RQ[str(update.from_user.id)] process_max_timeout = round(Config.PROCESS_MAX_TIMEOUT/60) present_time = round(Config.PROCESS_MAX_TIMEOUT - (current_time - previous_time)) Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time() if round(current_time - previous_time) < Config.PROCESS_MAX_TIMEOUT: await bot.edit_message_text(chat_id=update.chat.id, text=Translation.FREE_USER_LIMIT_Q_SZE.format(process_max_timeout, present_time), disable_web_page_preview=True, message_id=chk.id) return else: Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time() process = await asyncio.create_subprocess_exec( *command_to_exec, # stdout must a pipe to be accessible as process.stdout stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) # Wait for the subprocess to finish stdout, stderr = await process.communicate() e_response = stderr.decode().strip() logger.info(e_response) t_response = stdout.decode().strip() # logger.info(t_response) # https://github.com/rg3/youtube-dl/issues/2630#issuecomment-38635239 if e_response and "nonnumeric port" not in e_response: # logger.warn("Status : FAIL", exc.returncode, exc.output) error_message = e_response.replace( "please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.", "") if "This video is only available for registered users." in error_message: error_message += Translation.SET_CUSTOM_USERNAME_PASSWORD await chk.delete() time.sleep(40.5) await bot.send_message( chat_id=update.chat.id, text=Translation.NO_VOID_FORMAT_FOUND.format(str(error_message)), reply_to_message_id=update.id, disable_web_page_preview=True ) return False if t_response: # logger.info(t_response) x_reponse = t_response if "\n" in x_reponse: x_reponse, _ = x_reponse.split("\n") response_json = json.loads(x_reponse) randem = random_char(5) save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \ "/" + str(update.from_user.id) + f'{randem}' + ".json" with open(save_ytdl_json_path, "w", encoding="utf8") as outfile: json.dump(response_json, outfile, ensure_ascii=False) # logger.info(response_json) inline_keyboard = [] duration = None if "duration" in response_json: duration = response_json["duration"] if "formats" in response_json: for formats in response_json["formats"]: format_id = formats.get("format_id") format_string = formats.get("format_note") if format_string is None: format_string = formats.get("format") if "DASH" in format_string.upper(): continue format_ext = formats.get("ext") if formats.get('filesize'): size = formats['filesize'] elif formats.get('filesize_approx'): size = formats['filesize_approx'] else: size = 0 cb_string_video = "{}|{}|{}|{}".format( "video", format_id, format_ext, randem) cb_string_file = "{}|{}|{}|{}".format( "file", format_id, format_ext, randem) if format_string is not None and not "audio only" in format_string: ikeyboard = [ InlineKeyboardButton( "🎬 " + format_string + " " + format_ext +
" " + humanbytes(size) + " ",
3
2023-12-09 03:24:55+00:00
4k
ZS-YANG/FemtoDet-v3
mmdet/models/losses/iou_loss.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "bbox_overlaps", "path": "mmdet/structures/bbox/bbox_overlaps.py", "snippet": "def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n \"\"\"Calculate overlap between two set of bboxes.\n\n FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889\n Note:\n Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',\n there are some new generated variable when calculating IOU\n using bbox_overlaps function:\n\n 1) is_aligned is False\n area1: M x 1\n area2: N x 1\n lt: M x N x 2\n rb: M x N x 2\n wh: M x N x 2\n overlap: M x N x 1\n union: M x N x 1\n ious: M x N x 1\n\n Total memory:\n S = (9 x N x M + N + M) * 4 Byte,\n\n When using FP16, we can reduce:\n R = (9 x N x M + N + M) * 4 / 2 Byte\n R large than (N + M) * 4 * 2 is always true when N and M >= 1.\n Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,\n N + 1 < 3 * N, when N or M is 1.\n\n Given M = 40 (ground truth), N = 400000 (three anchor boxes\n in per grid, FPN, R-CNNs),\n R = 275 MB (one times)\n\n A special case (dense detection), M = 512 (ground truth),\n R = 3516 MB = 3.43 GB\n\n When the batch size is B, reduce:\n B x R\n\n Therefore, CUDA memory runs out frequently.\n\n Experiments on GeForce RTX 2080Ti (11019 MiB):\n\n | dtype | M | N | Use | Real | Ideal |\n |:----:|:----:|:----:|:----:|:----:|:----:|\n | FP32 | 512 | 400000 | 8020 MiB | -- | -- |\n | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |\n | FP32 | 40 | 400000 | 1540 MiB | -- | -- |\n | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |\n\n 2) is_aligned is True\n area1: N x 1\n area2: N x 1\n lt: N x 2\n rb: N x 2\n wh: N x 2\n overlap: N x 1\n union: N x 1\n ious: N x 1\n\n Total memory:\n S = 11 x N * 4 Byte\n\n When using FP16, we can reduce:\n R = 11 x N * 4 / 2 Byte\n\n So do the 'giou' (large than 'iou').\n\n Time-wise, FP16 is generally faster than FP32.\n\n When gpu_assign_thr is not -1, it takes more time on cpu\n but not reduce memory.\n There, we can reduce half the memory and keep the speed.\n\n If ``is_aligned`` is ``False``, then calculate the overlaps between each\n bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned\n pair of bboxes1 and bboxes2.\n\n Args:\n bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.\n bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.\n B indicates the batch dim, in shape (B1, B2, ..., Bn).\n If ``is_aligned`` is ``True``, then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n foreground) or \"giou\" (generalized intersection over union).\n Default \"iou\".\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2)\n >>> assert overlaps.shape == (3, 3)\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)\n >>> assert overlaps.shape == (3, )\n\n Example:\n >>> empty = torch.empty(0, 4)\n >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n \"\"\"\n\n assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'\n # Either the boxes are empty or the length of boxes' last dimension is 4\n assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)\n assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)\n\n # Batch dim must be the same\n # Batch dim: (B1, B2, ... Bn)\n assert bboxes1.shape[:-2] == bboxes2.shape[:-2]\n batch_shape = bboxes1.shape[:-2]\n\n rows = bboxes1.size(-2)\n cols = bboxes2.size(-2)\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n if is_aligned:\n return bboxes1.new(batch_shape + (rows, ))\n else:\n return bboxes1.new(batch_shape + (rows, cols))\n\n area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n bboxes1[..., 3] - bboxes1[..., 1])\n area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n bboxes2[..., 3] - bboxes2[..., 1])\n\n if is_aligned:\n lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]\n rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1 + area2 - overlap\n else:\n union = area1\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n else:\n lt = torch.max(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2]) # [B, rows, cols, 2]\n rb = torch.min(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1[..., None] + area2[..., None, :] - overlap\n else:\n union = area1[..., None]\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2])\n enclosed_rb = torch.max(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:])\n\n eps = union.new_tensor([eps])\n union = torch.max(union, eps)\n ious = overlap / union\n if mode in ['iou', 'iof']:\n return ious\n # calculate gious\n enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n enclose_area = torch.max(enclose_area, eps)\n gious = ious - (enclose_area - union) / enclose_area\n return gious" }, { "identifier": "weighted_loss", "path": "mmdet/models/losses/utils.py", "snippet": "def weighted_loss(loss_func: Callable) -> Callable:\n \"\"\"Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @weighted_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, avg_factor=2)\n tensor(1.5000)\n \"\"\"\n\n @functools.wraps(loss_func)\n def wrapper(pred: Tensor,\n target: Tensor,\n weight: Optional[Tensor] = None,\n reduction: str = 'mean',\n avg_factor: Optional[int] = None,\n **kwargs) -> Tensor:\n \"\"\"\n Args:\n pred (Tensor): The prediction.\n target (Tensor): Target bboxes.\n weight (Optional[Tensor], optional): The weight of loss for each\n prediction. Defaults to None.\n reduction (str, optional): Options are \"none\", \"mean\" and \"sum\".\n Defaults to 'mean'.\n avg_factor (Optional[int], optional): Average factor that is used\n to average the loss. Defaults to None.\n\n Returns:\n Tensor: Loss tensor.\n \"\"\"\n # get element-wise loss\n loss = loss_func(pred, target, **kwargs)\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n return loss\n\n return wrapper" } ]
import math import warnings import torch import torch.nn as nn from typing import Optional from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from .utils import weighted_loss
3,363
# Copyright (c) OpenMMLab. All rights reserved. @weighted_loss def iou_loss(pred: Tensor, target: Tensor, linear: bool = False, mode: str = 'log', eps: float = 1e-6) -> Tensor: """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). linear (bool, optional): If True, use linear scale of loss instead of log scale. Default: False. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'iou_loss is deprecated, please use "mode=`linear`" ' 'instead.') # avoid fp16 overflow if pred.dtype == torch.float16: fp16 = True pred = pred.to(torch.float32) else: fp16 = False
# Copyright (c) OpenMMLab. All rights reserved. @weighted_loss def iou_loss(pred: Tensor, target: Tensor, linear: bool = False, mode: str = 'log', eps: float = 1e-6) -> Tensor: """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). linear (bool, optional): If True, use linear scale of loss instead of log scale. Default: False. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'iou_loss is deprecated, please use "mode=`linear`" ' 'instead.') # avoid fp16 overflow if pred.dtype == torch.float16: fp16 = True pred = pred.to(torch.float32) else: fp16 = False
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
1
2023-12-11 15:23:03+00:00
4k
mit-ll-ai-technology/maite
src/maite/_internals/interop/base_model.py
[ { "identifier": "is_list_of_type", "path": "src/maite/_internals/protocols/type_guards.py", "snippet": "def is_list_of_type(d: Any, guard: Type[T] = Any) -> TypeGuard[List[T]]:\n \"\"\"\n Check if object is a list of dictionaries.\n\n Parameters\n ----------\n d : Any\n The object to check.\n guard : Type[T]\n The type guard of the dictionaries. Defaults to dict.\n\n Returns\n -------\n TypeGuard[List[T]]\n True if object is a list of dictionaries.\n\n Examples\n --------\n >>> is_list_dict([{\"a\": 1}, {\"b\": 2}])\n True\n \"\"\"\n return isinstance(d, (list, tuple)) and isinstance(d[0], guard)" }, { "identifier": "is_typed_dict", "path": "src/maite/_internals/protocols/type_guards.py", "snippet": "def is_typed_dict(object: Any, target: Type[Td]) -> TypeGuard[Td]:\n \"\"\"\n Check if object is a typed dictionary.\n\n Parameters\n ----------\n object : Any\n The object to check.\n\n target : Type[T]\n The type of the dictionary.\n\n Returns\n -------\n TypeGuard[T]\n True if object is a typed dictionary.\n\n Examples\n --------\n >>> from typing_extensions import TypedDict\n >>> class Foo(TypedDict):\n ... a: int\n >>> is_typed_dict({\"a\": 1}, Foo)\n True\n \"\"\"\n if not isinstance(object, dict):\n return False\n\n k_obj = set(object.keys())\n ks = set(target.__annotations__.keys())\n\n if hasattr(target, \"__total__\") and target.__total__:\n return all(k in k_obj for k in ks)\n else:\n return any(k in k_obj for k in ks)" }, { "identifier": "ArrayLike", "path": "src/maite/_internals/protocols/typing.py", "snippet": "T = TypeVar(\"T\")\nP = ParamSpec(\"P\")\nclass ArrayLike(Protocol):\nclass DatumMetadata(Protocol):\nclass HasDataImage(TypedDict):\nclass HasDataLabel(TypedDict):\nclass HasDataBoxes(TypedDict):\nclass HasDataBoxesLabels(HasDataBoxes):\nclass HasDataObjects(TypedDict):\nclass HasDataMetadata(TypedDict):\nclass SupportsImageClassification(HasDataImage, HasDataLabel, HasDataMetadata):\nclass SupportsObjectDetection(HasDataImage, HasDataObjects, HasDataMetadata):\nclass Dataset(Protocol[T_co]):\nclass VisionDataset(Dataset[SupportsImageClassification], Protocol):\nclass ObjectDetectionDataset(Dataset[SupportsObjectDetection], Protocol):\nclass VisionDataLoader(DataLoader[SupportsImageClassification], Protocol):\nclass ObjectDetectionDataLoader(DataLoader[SupportsObjectDetection], Protocol):\nclass Augmentation(Protocol[P, T_co]):\nclass HasLabel(Protocol):\nclass HasBoxes(Protocol):\nclass HasLogits(Protocol):\nclass HasProbs(Protocol):\nclass HasScores(Protocol):\nclass HasDetectionLogits(HasBoxes, HasLogits, Protocol):\nclass HasDetectionProbs(HasProbs, HasBoxes, Protocol):\nclass HasDetectionPredictions(HasBoxes, HasScores, Protocol):\nclass ModelMetadata(Protocol):\nclass Model(Protocol[P, T_co]):\nclass ImageClassifier(Model[P, Union[HasLogits, HasProbs, HasScores]], Protocol[P]):\nclass ObjectDetector(\n Model[\n P,\n Union[HasDetectionLogits, HasDetectionProbs, HasDetectionPredictions],\n ],\n Protocol[P],\n):\nclass Metric(Protocol[P, T_co]):\nclass ModelProvider(Protocol):\nclass DatasetProvider(Protocol):\nclass MetricProvider(Protocol):\nclass ArtifactHubEndpoint(Protocol):\n def __array__(self) -> Any:\n def id(self) -> Hashable: # require id field in a class that is hashable\n def __len__(self) -> int:\n def __getitem__(self, index: Any) -> T_co:\n def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T_co:\n def label(self) -> SupportsArray:\n def boxes(self) -> SupportsArray:\n def logits(self) -> SupportsArray:\n def probs(self) -> SupportsArray:\n def scores(self) -> SupportsArray:\n def labels(self) -> SupportsArray:\n def model_name(self) -> str:\n def provider(self) -> str:\n def task(self) -> str:\n def metadata(self) -> ModelMetadata:\n def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T_co:\n def get_labels(self) -> Sequence[str]:\n def reset(self) -> None:\n def update(self, *args: P.args, **kwargs: P.kwargs) -> None:\n def compute(self) -> T_co:\n def to(self, *args: Any, **kwargs: Any) -> Self:\n def help(self, name: str) -> str:\n def list_models(\n self,\n *,\n filter_str: str | List[str] | None = None,\n model_name: str | None = None,\n task: TaskName | None = None,\n ) -> Iterable[Any]:\n def load_model(\n self, model_name: str, task: TaskName | None = None\n ) -> Model[P, T_co]:\n def help(self, name: str) -> str:\n def list_datasets(self) -> Iterable[str]:\n def load_dataset(\n self,\n *,\n dataset_name: str,\n task: TaskName | None = None,\n split: str | None = None,\n ) -> Dataset[T_co]:\n def help(self, name: str) -> str:\n def list_metrics(self) -> Iterable[Any]:\n def load_metric(self, metric_name: str) -> Metric[P, T_co]:\n def __init__(self, path: Any):\n def get_cache_or_reload(self) -> str | os.PathLike[str]:\n def update_options(self) -> Self:" }, { "identifier": "is_pil_image", "path": "src/maite/_internals/interop/utils.py", "snippet": "def is_pil_image(x) -> TypeGuard[ArrayLike]:\n \"\"\"Tests if `x` is a Image array or not.\"\"\"\n\n def _is_pil(x):\n from PIL.Image import Image\n\n return isinstance(x, Image)\n\n return False if not is_pil_available() else _is_pil(x)" } ]
from collections import UserDict from dataclasses import dataclass from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union from torch import Tensor from ..protocols.type_guards import is_list_of_type, is_typed_dict from ..protocols.typing import ArrayLike, HasDataImage, SupportsArray from .utils import is_pil_image import torch as tr
1,667
# Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY # Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014). # SPDX-License-Identifier: MIT @dataclass class InteropModelMetadata: model_name: str = "" provider: str = "" task: str = "" class BaseModel:
# Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY # Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014). # SPDX-License-Identifier: MIT @dataclass class InteropModelMetadata: model_name: str = "" provider: str = "" task: str = "" class BaseModel:
preprocessor: Optional[Callable[[Union[HasDataImage, SupportsArray]], HasDataImage]]
2
2023-12-12 15:34:16+00:00
4k
berglh/zen-focus
zf/ui/sidebar_pane.py
[ { "identifier": "About", "path": "zf/ui/about_pane.py", "snippet": "class About():\n \"\"\"\n About class defines the About content pane\n\n It generates the structure in memory to apply to the navigation split view\n \"\"\"\n def __init__(self):\n self.scrolled_window = Gtk.ScrolledWindow()\n self.scrolled_window.set_vexpand(True)\n\n self.content_box = Gtk.Box()\n self.content_box.set_halign(Gtk.Align.CENTER)\n self.content_box.set_valign(Gtk.Align.CENTER)\n\n # Resolve the theme named colours to RGBA strings\n self.accent_fg_color = self.get_color('theme_selected_fg_color')\n self.accent_bg_color = self.get_color('theme_selected_bg_color')\n\n # Load the SVG file using librsvg\n self.svg_data = Rsvg.Handle.new_from_file('icons/hicolor/scalable/apps/zen-focus-logo.svg')\n\n # Set the SVG colours to the theme colours\n # Check: print(self.accent_fg_color, self.accent_bg_color)\n self.svg_style_sheet = f\"\"\"\n #fg1, #fg2, #fg3, #fg4 {{ fill: {self.accent_fg_color} ; }}\n #bg1, #bg2, #bg3 {{ fill: {self.accent_bg_color} ; }}\n #svg5 {{ width: 256px; height: 256px;}}\n \"\"\"\n self.svg_data.set_stylesheet(self.svg_style_sheet.encode())\n\n # Draw the Gtk.Image from the SVG pixel buffer\n self.logo = Gtk.Image(height_request=128, width_request=128)\n self.logo.set_from_pixbuf(self.svg_data.get_pixbuf()) # TODO below\n self.logo.set_margin_end(40)\n\n self.content_box.append(self.logo)\n self.content_box.append(Gtk.Label(label='Zen Focus'))\n\n self.scrolled_window.set_child(self.content_box)\n\n # TODO: #Deprecated since version 4.12: Use [ctor`Gtk`.Image.new_from_paintable] and [ctor`Gdk`.Texture.new_for_pixbuf] instead\n #self.logo = Gtk.Image()\n #self.svg_data.get_pixbuf()\n \n #self.texture = Gdk.Texture.new_for_pixbuf(self.svg_data.get_pixbuf())\n\n # Get the size of the GdkPixbuf\n # pixbuf_width = pixbuf.get_width()\n # pixbuf_height = pixbuf.get_height()\n\n # Create a Gdk.Paintable from the Gdk.Texture\n #self.paintable = Gdk.Texture.new_for_stream_at_scale(None, self.texture, self.texture.get_width(), self.texture.get_height())\n #self.logo = Gtk.Image.new_from_paintable(self.texture)\n\n def set_content(self, button):\n \"\"\"\n set_content sets the About pane content in the navigation split view\n \"\"\"\n content = Gio.Application.get_default().split_view.get_content()\n content.set_title(\"About\")\n content.pane.set_content(self.scrolled_window)\n content.pane.set_reveal_bottom_bars(False)\n\n def get_color(self, named_color):\n \"\"\"\n Return the libadwaita named color in RGBA from CSS provider\n \"\"\"\n label = Gtk.Label(label=\"Coloured Text\")\n label.set_css_classes([f'cc_{named_color}'])\n rgba_color = label.get_color()\n return rgba_color.to_string()" }, { "identifier": "Power", "path": "zf/ui/power_pane.py", "snippet": "class Power():\n \"\"\"\n Power class defines the Power Usage content pane\n\n It generates the structure in memory to apply to the navigation split view\n \"\"\"\n\n def __init__(self):\n self.scrolled_window = Gtk.ScrolledWindow()\n self.scrolled_window.set_vexpand(True)\n\n self.content_box = Gtk.Box()\n self.content_box.set_halign(Gtk.Align.CENTER)\n self.content_box.set_valign(Gtk.Align.CENTER)\n self.content_box.append(Gtk.Label(label='Power Content'))\n\n self.scrolled_window.set_child(self.content_box)\n\n def set_content(self, button):\n \"\"\"\n set_content sets the Power Usage pane content in the navigation split view\n \"\"\"\n content = Gio.Application.get_default().split_view.get_content()\n content.set_title(\"Power Usage\")\n content.pane.set_content(self.scrolled_window)\n content.pane.set_reveal_bottom_bars(True)" }, { "identifier": "Processor", "path": "zf/ui/processor_pane.py", "snippet": "class Processor():\n \"\"\"\n Processor class defines the Processor information content pane\n\n It generates the structure in memory to apply to the navigation split view\n \"\"\"\n\n def __init__(self):\n self.scrolled_window = Gtk.ScrolledWindow()\n self.scrolled_window.set_vexpand(True)\n\n self.content_box = Gtk.Box()\n self.content_box.set_halign(Gtk.Align.CENTER)\n self.content_box.set_valign(Gtk.Align.CENTER)\n self.content_box.append(Gtk.Label(label='Processor Info'))\n\n self.scrolled_window.set_child(self.content_box)\n\n def set_content(self, button):\n \"\"\"\n set_content sets the Processor Info pane content in the navigation split view\n \"\"\"\n content = Gio.Application.get_default().split_view.get_content()\n content.set_title(\"Processor Info\")\n content.set_visible(True)\n content.pane.set_content(self.scrolled_window)\n content.pane.set_reveal_bottom_bars(True)" }, { "identifier": "Settings", "path": "zf/ui/settings_pane.py", "snippet": "class Settings():\n\n def __init__(self):\n self.scrolled_window = Gtk.ScrolledWindow()\n self.scrolled_window.set_vexpand(True)\n \n self.preference = Adw.PreferencesPage()\n self.preferences_group = Adw.PreferencesGroup()\n\n # Connect to root application to get config object\n application = Gio.Application.get_default()\n \n # https://gnome.pages.gitlab.gnome.org/libadwaita/doc/1-latest/class.SpinRow.html\n self.interval_seconds = Adw.SpinRow(\n title=\"Interval (Seconds)\",\n subtitle=\"Set the metric update interval\",\n digits=1,\n snap_to_ticks=True,\n can_focus=True,\n selectable=False,\n adjustment=Gtk.Adjustment(\n value=application.config.settings.get_value('interval').get_double(), \n lower=0.1, upper=10.0, step_increment=0.1, page_increment=1, page_size=0\n )\n )\n application.config.settings.bind(\n \"interval\",\n self.interval_seconds,\n \"value\",\n Gio.SettingsBindFlags.DEFAULT\n )\n self.preferences_group.add(self.interval_seconds)\n self.preference.add(self.preferences_group)\n self.scrolled_window.set_child(self.preference)\n\n def set_content(self, button):\n \"\"\"\n set_content sets the Settings pane content in the navigation split view\n \"\"\"\n content = Gio.Application.get_default().split_view.get_content()\n content.set_title(\"Settings\")\n content.set_visible(True)\n content.pane.set_content(self.scrolled_window)\n content.pane.set_reveal_bottom_bars(False)" }, { "identifier": "Temperature", "path": "zf/ui/temperature_pane.py", "snippet": "class Temperature():\n \"\"\"\n Temperature class defines the Temperature content pane\n\n It generates the structure in memory to apply to the navigation split view\n \"\"\"\n\n def __init__(self):\n self.scrolled_window = Gtk.ScrolledWindow()\n self.scrolled_window.set_vexpand(True)\n\n self.content_box = Gtk.Box()\n self.content_box.set_halign(Gtk.Align.CENTER)\n self.content_box.set_valign(Gtk.Align.CENTER)\n self.content_box.append(Gtk.Label(label='Temperature'))\n\n self.scrolled_window.set_child(self.content_box)\n\n def set_content(self, button, content=None):\n \"\"\"\n set_content sets the Temperature pane content in the navigation split view\n \"\"\"\n if content == None:\n content = Gio.Application.get_default().split_view.get_content()\n content.set_title(\"Temperature\")\n content.pane.set_content(self.scrolled_window)\n content.pane.set_reveal_bottom_bars(True)" } ]
from gi.repository import Adw, Gdk, Gio, GObject, Gtk from .about_pane import About from .power_pane import Power from .processor_pane import Processor from .settings_pane import Settings from .temperature_pane import Temperature
2,510
class ListItem(): """ ListItem class defines the sidebar button widget """ def __init__(self, title: str, icon: str, pane: object) -> None: self.title = title self.icon = icon self.pane = pane class Sidebar(Adw.NavigationPage): """ Sidebar class defines the sidebar pane for Zen Focus """ def __init__(self): super().__init__() # Primary Settings for Sidebar self.set_title("Zen Focus") self.set_vexpand(True) # Set menu bar min width self.set_size_request(220, -1) # Define sidebar header box self.header_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6) self.theme = Gtk.IconTheme.get_for_display(Gdk.Display.get_default()) self.theme.add_search_path(path='icons') self.header_logo = Gtk.Image.new_from_icon_name("zen-focus-symbolic") self.header_label = Gtk.Label(label="Zen Focus") self.header_box.append(self.header_logo) self.header_box.append(self.header_label) # The sidebar show content button when collapsed self.show_button = Gtk.ToggleButton( icon_name="go-next-symbolic", active=False, visible=False, margin_top=0, margin_bottom=0 ) # Bind to the parent Window split view show-content property self.application = Gio.Application.get_default() self.show_button.bind_property( "active", self.application.split_view, "show-content", GObject.BindingFlags.BIDIRECTIONAL ) # Connect to the 'notify::folded' signal of the Adw.NavigationSplitView to show the button self.application.split_view.connect("notify::collapsed", self.on_split_view_folded, self.show_button) # Add the toolbar and header to the sidebar self.toolbar = Adw.ToolbarView() self.header = Adw.HeaderBar() self.header.set_title_widget(self.header_box) self.header.set_show_back_button(True) self.header.set_can_focus(False) self.header.set_decoration_layout('menu:close') self.header.pack_end(self.show_button) self.toolbar.set_content() self.toolbar.add_top_bar(self.header) self.set_child(self.toolbar) self.list = Gtk.ListBox() self.list.set_vexpand(False) self.list.set_margin_top(12) self.list.set_margin_start(6) self.list.set_margin_end(6) self.list.set_selection_mode(Gtk.SelectionMode.SINGLE) # Connect the signal self.list.connect("row-activated", self.on_row_activated) # The sidebar list items to render as buttons # These need to be defined in the sidebar class otherwise the # the primary Adw.ApplicationWindow and settings is undefined top_list_items = [
class ListItem(): """ ListItem class defines the sidebar button widget """ def __init__(self, title: str, icon: str, pane: object) -> None: self.title = title self.icon = icon self.pane = pane class Sidebar(Adw.NavigationPage): """ Sidebar class defines the sidebar pane for Zen Focus """ def __init__(self): super().__init__() # Primary Settings for Sidebar self.set_title("Zen Focus") self.set_vexpand(True) # Set menu bar min width self.set_size_request(220, -1) # Define sidebar header box self.header_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6) self.theme = Gtk.IconTheme.get_for_display(Gdk.Display.get_default()) self.theme.add_search_path(path='icons') self.header_logo = Gtk.Image.new_from_icon_name("zen-focus-symbolic") self.header_label = Gtk.Label(label="Zen Focus") self.header_box.append(self.header_logo) self.header_box.append(self.header_label) # The sidebar show content button when collapsed self.show_button = Gtk.ToggleButton( icon_name="go-next-symbolic", active=False, visible=False, margin_top=0, margin_bottom=0 ) # Bind to the parent Window split view show-content property self.application = Gio.Application.get_default() self.show_button.bind_property( "active", self.application.split_view, "show-content", GObject.BindingFlags.BIDIRECTIONAL ) # Connect to the 'notify::folded' signal of the Adw.NavigationSplitView to show the button self.application.split_view.connect("notify::collapsed", self.on_split_view_folded, self.show_button) # Add the toolbar and header to the sidebar self.toolbar = Adw.ToolbarView() self.header = Adw.HeaderBar() self.header.set_title_widget(self.header_box) self.header.set_show_back_button(True) self.header.set_can_focus(False) self.header.set_decoration_layout('menu:close') self.header.pack_end(self.show_button) self.toolbar.set_content() self.toolbar.add_top_bar(self.header) self.set_child(self.toolbar) self.list = Gtk.ListBox() self.list.set_vexpand(False) self.list.set_margin_top(12) self.list.set_margin_start(6) self.list.set_margin_end(6) self.list.set_selection_mode(Gtk.SelectionMode.SINGLE) # Connect the signal self.list.connect("row-activated", self.on_row_activated) # The sidebar list items to render as buttons # These need to be defined in the sidebar class otherwise the # the primary Adw.ApplicationWindow and settings is undefined top_list_items = [
ListItem("Temperatures", 'temp-symbolic', Temperature()),
4
2023-12-07 21:58:54+00:00
4k
jupyter-server/pycrdt-websocket
tests/test_ystore.py
[ { "identifier": "SQLiteYStore", "path": "pycrdt_websocket/ystore.py", "snippet": "class SQLiteYStore(BaseYStore):\n \"\"\"A YStore which uses an SQLite database.\n Unlike file-based YStores, the Y updates of all documents are stored in the same database.\n\n Subclass to point to your database file:\n\n ```py\n class MySQLiteYStore(SQLiteYStore):\n db_path = \"path/to/my_ystore.db\"\n ```\n \"\"\"\n\n db_path: str = \"ystore.db\"\n # Determines the \"time to live\" for all documents, i.e. how recent the\n # latest update of a document must be before purging document history.\n # Defaults to never purging document history (None).\n document_ttl: int | None = None\n path: str\n lock: Lock\n db_initialized: Event\n\n def __init__(\n self,\n path: str,\n metadata_callback: Callable[[], Awaitable[bytes] | bytes] | None = None,\n log: Logger | None = None,\n ) -> None:\n \"\"\"Initialize the object.\n\n Arguments:\n path: The file path used to store the updates.\n metadata_callback: An optional callback to call to get the metadata.\n log: An optional logger.\n \"\"\"\n self.path = path\n self.metadata_callback = metadata_callback\n self.log = log or getLogger(__name__)\n self.lock = Lock()\n self.db_initialized = Event()\n\n async def start(self, *, task_status: TaskStatus[None] = TASK_STATUS_IGNORED):\n \"\"\"Start the SQLiteYStore.\n\n Arguments:\n task_status: The status to set when the task has started.\n \"\"\"\n if self._starting:\n return\n else:\n self._starting = True\n\n if self._task_group is not None:\n raise RuntimeError(\"YStore already running\")\n\n async with create_task_group() as self._task_group:\n self._task_group.start_soon(self._init_db)\n self.started.set()\n self._starting = False\n task_status.started()\n\n async def _init_db(self):\n create_db = False\n move_db = False\n if not await anyio.Path(self.db_path).exists():\n create_db = True\n else:\n async with self.lock:\n async with aiosqlite.connect(self.db_path) as db:\n cursor = await db.execute(\n \"SELECT count(name) FROM sqlite_master WHERE type='table' and name='yupdates'\"\n )\n table_exists = (await cursor.fetchone())[0]\n if table_exists:\n cursor = await db.execute(\"pragma user_version\")\n version = (await cursor.fetchone())[0]\n if version != self.version:\n move_db = True\n create_db = True\n else:\n create_db = True\n if move_db:\n new_path = await get_new_path(self.db_path)\n self.log.warning(f\"YStore version mismatch, moving {self.db_path} to {new_path}\")\n await anyio.Path(self.db_path).rename(new_path)\n if create_db:\n async with self.lock:\n async with aiosqlite.connect(self.db_path) as db:\n await db.execute(\n \"CREATE TABLE yupdates (path TEXT NOT NULL, yupdate BLOB, metadata BLOB, timestamp REAL NOT NULL)\"\n )\n await db.execute(\n \"CREATE INDEX idx_yupdates_path_timestamp ON yupdates (path, timestamp)\"\n )\n await db.execute(f\"PRAGMA user_version = {self.version}\")\n await db.commit()\n self.db_initialized.set()\n\n async def read(self) -> AsyncIterator[tuple[bytes, bytes, float]]: # type: ignore\n \"\"\"Async iterator for reading the store content.\n\n Returns:\n A tuple of (update, metadata, timestamp) for each update.\n \"\"\"\n await self.db_initialized.wait()\n try:\n async with self.lock:\n async with aiosqlite.connect(self.db_path) as db:\n async with db.execute(\n \"SELECT yupdate, metadata, timestamp FROM yupdates WHERE path = ?\",\n (self.path,),\n ) as cursor:\n found = False\n async for update, metadata, timestamp in cursor:\n found = True\n yield update, metadata, timestamp\n if not found:\n raise YDocNotFound\n except Exception:\n raise YDocNotFound\n\n async def write(self, data: bytes) -> None:\n \"\"\"Store an update.\n\n Arguments:\n data: The update to store.\n \"\"\"\n await self.db_initialized.wait()\n async with self.lock:\n async with aiosqlite.connect(self.db_path) as db:\n # first, determine time elapsed since last update\n cursor = await db.execute(\n \"SELECT timestamp FROM yupdates WHERE path = ? ORDER BY timestamp DESC LIMIT 1\",\n (self.path,),\n )\n row = await cursor.fetchone()\n diff = (time.time() - row[0]) if row else 0\n\n if self.document_ttl is not None and diff > self.document_ttl:\n # squash updates\n ydoc = Doc()\n async with db.execute(\n \"SELECT yupdate FROM yupdates WHERE path = ?\", (self.path,)\n ) as cursor:\n async for update, in cursor:\n ydoc.apply_update(update)\n # delete history\n await db.execute(\"DELETE FROM yupdates WHERE path = ?\", (self.path,))\n # insert squashed updates\n squashed_update = ydoc.get_update()\n metadata = await self.get_metadata()\n await db.execute(\n \"INSERT INTO yupdates VALUES (?, ?, ?, ?)\",\n (self.path, squashed_update, metadata, time.time()),\n )\n\n # finally, write this update to the DB\n metadata = await self.get_metadata()\n await db.execute(\n \"INSERT INTO yupdates VALUES (?, ?, ?, ?)\",\n (self.path, data, metadata, time.time()),\n )\n await db.commit()" }, { "identifier": "TempFileYStore", "path": "pycrdt_websocket/ystore.py", "snippet": "class TempFileYStore(FileYStore):\n \"\"\"A YStore which uses the system's temporary directory.\n Files are writen under a common directory.\n To prefix the directory name (e.g. /tmp/my_prefix_b4whmm7y/):\n\n ```py\n class PrefixTempFileYStore(TempFileYStore):\n prefix_dir = \"my_prefix_\"\n ```\n \"\"\"\n\n prefix_dir: str | None = None\n base_dir: str | None = None\n\n def __init__(\n self,\n path: str,\n metadata_callback: Callable[[], Awaitable[bytes] | bytes] | None = None,\n log: Logger | None = None,\n ):\n \"\"\"Initialize the object.\n\n Arguments:\n path: The file path used to store the updates.\n metadata_callback: An optional callback to call to get the metadata.\n log: An optional logger.\n \"\"\"\n full_path = str(Path(self.get_base_dir()) / path)\n super().__init__(full_path, metadata_callback=metadata_callback, log=log)\n\n def get_base_dir(self) -> str:\n \"\"\"Get the base directory where the update file is written.\n\n Returns:\n The base directory path.\n \"\"\"\n if self.base_dir is None:\n self.make_directory()\n assert self.base_dir is not None\n return self.base_dir\n\n def make_directory(self):\n \"\"\"Create the base directory where the update file is written.\"\"\"\n type(self).base_dir = tempfile.mkdtemp(prefix=self.prefix_dir)" } ]
import os import tempfile import time import aiosqlite import pytest from pathlib import Path from unittest.mock import patch from pycrdt_websocket.ystore import SQLiteYStore, TempFileYStore
1,962
class MetadataCallback: def __init__(self): self.i = 0 async def __call__(self): res = str(self.i).encode() self.i += 1 return res class MyTempFileYStore(TempFileYStore): prefix_dir = "test_temp_" MY_SQLITE_YSTORE_DB_PATH = str(Path(tempfile.mkdtemp(prefix="test_sql_")) / "ystore.db")
class MetadataCallback: def __init__(self): self.i = 0 async def __call__(self): res = str(self.i).encode() self.i += 1 return res class MyTempFileYStore(TempFileYStore): prefix_dir = "test_temp_" MY_SQLITE_YSTORE_DB_PATH = str(Path(tempfile.mkdtemp(prefix="test_sql_")) / "ystore.db")
class MySQLiteYStore(SQLiteYStore):
0
2023-12-08 10:38:31+00:00
4k
juniberry/PacketIRC
packetirc.py
[ { "identifier": "LOG_FILE", "path": "settings.py", "snippet": "LOG_FILE = \"packetirc.log\"" }, { "identifier": "LOG_LEVEL", "path": "settings.py", "snippet": "LOG_LEVEL = logging.INFO" }, { "identifier": "SERVER", "path": "settings.py", "snippet": "SERVER = \"\"" }, { "identifier": "PORT", "path": "settings.py", "snippet": "PORT = 6667" }, { "identifier": "PASS", "path": "settings.py", "snippet": "PASS = \"\"" }, { "identifier": "CHANNEL", "path": "settings.py", "snippet": "CHANNEL = \"#Testing\"" }, { "identifier": "HIDE_SERVER", "path": "settings.py", "snippet": "HIDE_SERVER = True" }, { "identifier": "MAX_RETRIES", "path": "settings.py", "snippet": "MAX_RETRIES = 3" }, { "identifier": "RETRY_DELAY", "path": "settings.py", "snippet": "RETRY_DELAY = 5 # seconds" }, { "identifier": "HELP_INFO", "path": "settings.py", "snippet": "HELP_INFO = \"\"\"\nPacketIRC commands:\n /quit [message] - Disconnect from the server with optional message.\n /msg <nickname> <message> - Send a private message to the specified user.\n /join <channel> - Join the specified channel.\n /names - Shows a list of users in the channel.\n /topic [new topic] - Set a new topic for the current channel or request the topic.\n /away [message] - Set an away message or clear the away status.\n /whois <nickname> - Retrieves information about the specified user.\n /help - Display this help message.\n\"\"\"" }, { "identifier": "WELCOME_MESSAGE", "path": "settings.py", "snippet": "WELCOME_MESSAGE = \"\"\"\nWelcome to PacketIRC!\nType /help for a list of commands.\n\"\"\"" }, { "identifier": "BAD_WORDS_FILE", "path": "settings.py", "snippet": "BAD_WORDS_FILE = \"bad_words.txt\"" }, { "identifier": "BAD_WORDS_FILTER", "path": "settings.py", "snippet": "BAD_WORDS_FILTER = False" } ]
import socket import threading import random import time import logging import re import irc.client import os import sys from settings import LOG_FILE, LOG_LEVEL, SERVER, PORT, PASS, CHANNEL, HIDE_SERVER, MAX_RETRIES, RETRY_DELAY, HELP_INFO, WELCOME_MESSAGE, BAD_WORDS_FILE, BAD_WORDS_FILTER
2,606
def on_namreply(self, connection, event): """ Triggered when joining a channel or requesting NAMES. """ channel = event.arguments[1] names = event.arguments[2].split() # Print the names directly print(f"Users in {channel}: {', '.join(names)}") def on_quit(self, connection, event): """ Triggered when a luser quits in a channel we are in. """ nickname = event.source.nick reason = event.arguments[0] if event.arguments else "" print(f"* {nickname} has quit ({reason})") def on_privmsg(self, connection, event): """ Triggered when a user sends us a directed PRIVMSG. """ sender = event.source.nick message = event.arguments[0] print(f"** {sender}: {message}") def on_pubmsg(self, connection, event): """ Triggered from a PRIVMSG sent to a channel we are in. """ # Handle public messages received in the channel nickname = event.source.nick message = event.arguments[0] print(f"<{nickname}> {message}") def on_action(self, connection, event): """ Triggered by emotive ACTIONs, be they on a channel or directed. """ nickname = event.source.nick message = event.arguments[0] channel = event.target print(f"* {nickname} {message}") def on_topicprotected(self, connection, event): """ Apparently this is supposed to trigger when we try to change the topic but are not permitted to. """ print(f"** You don't have permission to change the topic.") # TODO: ## User doesn't have perm to set topic. ## This seems to be broken? def on_topic(self, connection, event): """ Triggered by the server to indicate that the topic has been changed. """ who = event.source.nick #channel = event.target new_topic = event.arguments[0] print(f"* {who} changed the topic to: {new_topic}") def on_currenttopic(self, connection, event): """ Triggered by the server to indicate the current topic of a channel, from our query request. """ channel = event.arguments[0] topic = event.arguments[1] print(f"** {channel}: {topic}") def on_list(self, connection, event): """ Handles the event for LISTing channels. This method is called for each channel in the list. This can be a firehose ...we might want to put a flood or limit on this eventually...soon..ish """ channel = event.arguments[0] if event.arguments else '' user_count = event.arguments[1] if len(event.arguments) > 1 else '' topic = event.arguments[2] if len(event.arguments) > 2 else '' # Truncate topic to 60 characters if longer. if len(topic) > 60: topic = topic[:57] + '...' print(f"{channel} [{user_count}] {topic}") def handle_user_input(irc_client): """ Continuously handle user input and processes IRC style commands. This handler is run within it's own thread aside from the PacketIRC client class. """ global is_running # Threaded runloop. Toggle is_running to False to exit the user interface thread. while is_running: try: # Fetch user input, strip whitespace and log it. message = input().strip() logging.info(f"{callsign} >>> {message}") # Check to see if the user message is a command. if message.startswith('/'): # It likely is, try to process it. # # Split the message into command and command_args parts = message.split(' ', 1) command = parts[0].lower() command_args = parts[1] if len(parts) > 1 else "" # /QUIT - Disconnect and exit with optional quit message. if command == '/quit': # Set the running state flag off, to exit thread runloop. is_running = False # If the user specified a message, use it, otherwise plug in 73. quit_message = command_args if command_args else "73" # We checking for naughty words? If so clean them.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ ______ _ _____ ______ ______ (_____ \ | | _ (_____|_____ \ / _____) _____) )___ ____| | _ ____| |_ _ _____) ) / | ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| | | | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____ |_| \_||_|\____)_| \_)____)\___|_____) |_|\______) PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication. It includes a client-side implementation with simplified IRC functionalities. File: client.py Author: Daria Juniper @juniberry Date: 10-Dec-2023 Changes: 12-Dec-2023 - Initial version 1.0 beta. """ # Import settings from an external configuration file. # Globals VERSION = 'v1.1b' BAD_WORDS = [] HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al. # State is_running = True # Initialize logging. logging.basicConfig(filename=os.path.join(HOME_PATH, LOG_FILE), filemode='w', level=LOG_LEVEL, format='%(asctime)s - %(levelname)s - %(message)s') # PacketIRC Client Class class PacketIRCClient(irc.client.SimpleIRCClient): """ PacketIRCClient class extends irc.client.SimpleIRCClient. It includes methods for handling various IRC events and actions. """ def __init__(self, callsign): """ Initialize the IRC client with a callsign and current_channel property. The callsign should be passed from the packet switch and the client is designed to only operate on a single channel for sanity/bandwidth. """ super().__init__() self.callsign = callsign self.current_channel = None def on_disconnect(self, connection, event): global is_running is_running = False logging.info(f"{callsign} Disconnected from server.") print("** Disconnected.") def on_error(self, connection, event): """ Handle any errors encountered during the IRC session. We will not inform the user since many of these errors can be unhelpful or contain information you dont wish broadcast on the air. So we log it. """ logging.error(f"{callsign} on_error(): {event}") def on_motdstart(self, connection, event): """ Triggered when the MOTD listing begins. """ print("** Message of the Day") def on_motd(self, connection, event): """ Dump out lines of the MOTD Apparently this is only fired once? But could be multiple times? """ for line in event.arguments: print(line) def on_notice(self, connection, event): """ Handle Notices Notices can come from the server, users and sometimes seemingly out of the aether. """ source = event.source.nick if event.source else "SERVER" text = event.arguments[0] print(f"-{source}- {text}") def on_welcome(self, connection, event): """ Triggered when initially connected to an IRC server. We are going to use this to set up our initial channel if set in settings. """ server_name = connection.get_server_name() print(f"** Connected to {server_name}") # Request client send a keepalive message every 30 sec. connection.set_keepalive(30) # If CHANNEL is specified in settings.py then join it. if CHANNEL: connection.join(CHANNEL) def on_whoisuser(self, connection, event): """ Triggered when the server returns query info for a WHOIS """ nick = event.arguments[0] username = event.arguments[1] hostname = event.arguments[2] server = event.arguments[3] real_name = event.arguments[4] print(f"** WHOIS for {nick}") print(f" {username}@{hostname}") # Not all IRCd's will return the server, so this needs to be optional. if not all(char in ' *' for char in server): print(f" Server: {server}") print(f" Name: {real_name}") def on_nicknameinuse(self, connection, event): """ Nickname is in use! Oh noes! Let's do something silly like randomly pick a number, tack that on to the callsign and back away slowly............ >_> """ self.callsign += "_" + str(random.randint(0, 999)) connection.nick(self.callsign) def on_join(self, connection, event): """ Triggered when the user joins a channel (including the user). If this is us joining a channel, action it as such. If this is a new user joining our channel, action it as.... """ nickname = event.source.nick channel = event.target # If the target of the notice is us, we're the ones joining. if nickname == self.connection.get_nickname(): # Reset current channel if we're joining a new one self.current_channel = channel print(f"** Joined {channel}") # Request the topic for the new channel connection.topic(channel) else: # Nope, just another luser joining the idle sesh. print(f"* {nickname} has joined {channel}") def on_part(self, connection, event): """ Triggered when a luser leaves a channel. """ nickname = event.source.nick channel = event.target reason = event.arguments[0] if event.arguments else "" print(f"* {nickname} has left {channel} ({reason})") def on_namreply(self, connection, event): """ Triggered when joining a channel or requesting NAMES. """ channel = event.arguments[1] names = event.arguments[2].split() # Print the names directly print(f"Users in {channel}: {', '.join(names)}") def on_quit(self, connection, event): """ Triggered when a luser quits in a channel we are in. """ nickname = event.source.nick reason = event.arguments[0] if event.arguments else "" print(f"* {nickname} has quit ({reason})") def on_privmsg(self, connection, event): """ Triggered when a user sends us a directed PRIVMSG. """ sender = event.source.nick message = event.arguments[0] print(f"** {sender}: {message}") def on_pubmsg(self, connection, event): """ Triggered from a PRIVMSG sent to a channel we are in. """ # Handle public messages received in the channel nickname = event.source.nick message = event.arguments[0] print(f"<{nickname}> {message}") def on_action(self, connection, event): """ Triggered by emotive ACTIONs, be they on a channel or directed. """ nickname = event.source.nick message = event.arguments[0] channel = event.target print(f"* {nickname} {message}") def on_topicprotected(self, connection, event): """ Apparently this is supposed to trigger when we try to change the topic but are not permitted to. """ print(f"** You don't have permission to change the topic.") # TODO: ## User doesn't have perm to set topic. ## This seems to be broken? def on_topic(self, connection, event): """ Triggered by the server to indicate that the topic has been changed. """ who = event.source.nick #channel = event.target new_topic = event.arguments[0] print(f"* {who} changed the topic to: {new_topic}") def on_currenttopic(self, connection, event): """ Triggered by the server to indicate the current topic of a channel, from our query request. """ channel = event.arguments[0] topic = event.arguments[1] print(f"** {channel}: {topic}") def on_list(self, connection, event): """ Handles the event for LISTing channels. This method is called for each channel in the list. This can be a firehose ...we might want to put a flood or limit on this eventually...soon..ish """ channel = event.arguments[0] if event.arguments else '' user_count = event.arguments[1] if len(event.arguments) > 1 else '' topic = event.arguments[2] if len(event.arguments) > 2 else '' # Truncate topic to 60 characters if longer. if len(topic) > 60: topic = topic[:57] + '...' print(f"{channel} [{user_count}] {topic}") def handle_user_input(irc_client): """ Continuously handle user input and processes IRC style commands. This handler is run within it's own thread aside from the PacketIRC client class. """ global is_running # Threaded runloop. Toggle is_running to False to exit the user interface thread. while is_running: try: # Fetch user input, strip whitespace and log it. message = input().strip() logging.info(f"{callsign} >>> {message}") # Check to see if the user message is a command. if message.startswith('/'): # It likely is, try to process it. # # Split the message into command and command_args parts = message.split(' ', 1) command = parts[0].lower() command_args = parts[1] if len(parts) > 1 else "" # /QUIT - Disconnect and exit with optional quit message. if command == '/quit': # Set the running state flag off, to exit thread runloop. is_running = False # If the user specified a message, use it, otherwise plug in 73. quit_message = command_args if command_args else "73" # We checking for naughty words? If so clean them.
if BAD_WORDS_FILTER:
12
2023-12-13 19:08:48+00:00
4k
Tps-F/rvc-onnx-test
onnxlib/modules.py
[ { "identifier": "commons", "path": "onnxlib/commons.py", "snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef slice_segments2(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:\ndef shift_1d(x):\ndef sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):" }, { "identifier": "get_padding", "path": "onnxlib/commons.py", "snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)" }, { "identifier": "init_weights", "path": "onnxlib/commons.py", "snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)" }, { "identifier": "piecewise_rational_quadratic_transform", "path": "onnxlib/transforms.py", "snippet": "def piecewise_rational_quadratic_transform(\n inputs,\n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None,\n tail_bound=1.0,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE,\n):\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\"tails\": tails, \"tail_bound\": tail_bound}\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet" } ]
import math import torch from typing import Optional, Tuple from torch import nn from torch.nn import Conv1d from torch.nn import functional as F from torch.nn.utils import remove_weight_norm, weight_norm from onnxlib import commons from onnxlib.commons import get_padding, init_weights from onnxlib.transforms import piecewise_rational_quadratic_transform
2,163
class DDSConv(nn.Module): """ Dialted and Depth-Separable Convolution """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): super(DDSConv, self).__init__() self.channels = channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = float(p_dropout) self.drop = nn.Dropout(float(p_dropout)) self.convs_sep = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.norms_1 = nn.ModuleList() self.norms_2 = nn.ModuleList() for i in range(n_layers): dilation = kernel_size**i padding = (kernel_size * dilation - dilation) // 2 self.convs_sep.append( nn.Conv1d( channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding, ) ) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) def forward(self, x, x_mask, g: Optional[torch.Tensor] = None): if g is not None: x = x + g for i in range(self.n_layers): y = self.convs_sep[i](x * x_mask) y = self.norms_1[i](y) y = F.gelu(y) y = self.convs_1x1[i](y) y = self.norms_2[i](y) y = F.gelu(y) y = self.drop(y) x = x + y return x * x_mask class WN(torch.nn.Module): def __init__( self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, ): super(WN, self).__init__() assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.p_dropout = float(p_dropout) self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.drop = nn.Dropout(float(p_dropout)) if gin_channels != 0: cond_layer = torch.nn.Conv1d( gin_channels, 2 * hidden_channels * n_layers, 1 ) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i padding = int((kernel_size * dilation - dilation) / 2) in_layer = torch.nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding, ) in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2 * hidden_channels else: res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward( self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None ): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) for i, (in_layer, res_skip_layer) in enumerate( zip(self.in_layers, self.res_skip_layers) ): x_in = in_layer(x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in)
LRELU_SLOPE = 0.1 class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): super(LayerNorm, self).__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): x = x.transpose(1, -1) x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) return x.transpose(1, -1) class ConvReluNorm(nn.Module): def __init__( self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout, ): super(ConvReluNorm, self).__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = float(p_dropout) assert n_layers > 1, "Number of layers should be larger than 0." self.conv_layers = nn.ModuleList() self.norm_layers = nn.ModuleList() self.conv_layers.append( nn.Conv1d( in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 ) ) self.norm_layers.append(LayerNorm(hidden_channels)) self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(float(p_dropout))) for _ in range(n_layers - 1): self.conv_layers.append( nn.Conv1d( hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2, ) ) self.norm_layers.append(LayerNorm(hidden_channels)) self.proj = nn.Conv1d(hidden_channels, out_channels, 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() def forward(self, x, x_mask): x_org = x for i in range(self.n_layers): x = self.conv_layers[i](x * x_mask) x = self.norm_layers[i](x) x = self.relu_drop(x) x = x_org + self.proj(x) return x * x_mask class DDSConv(nn.Module): """ Dialted and Depth-Separable Convolution """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): super(DDSConv, self).__init__() self.channels = channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = float(p_dropout) self.drop = nn.Dropout(float(p_dropout)) self.convs_sep = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.norms_1 = nn.ModuleList() self.norms_2 = nn.ModuleList() for i in range(n_layers): dilation = kernel_size**i padding = (kernel_size * dilation - dilation) // 2 self.convs_sep.append( nn.Conv1d( channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding, ) ) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) def forward(self, x, x_mask, g: Optional[torch.Tensor] = None): if g is not None: x = x + g for i in range(self.n_layers): y = self.convs_sep[i](x * x_mask) y = self.norms_1[i](y) y = F.gelu(y) y = self.convs_1x1[i](y) y = self.norms_2[i](y) y = F.gelu(y) y = self.drop(y) x = x + y return x * x_mask class WN(torch.nn.Module): def __init__( self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, ): super(WN, self).__init__() assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.p_dropout = float(p_dropout) self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.drop = nn.Dropout(float(p_dropout)) if gin_channels != 0: cond_layer = torch.nn.Conv1d( gin_channels, 2 * hidden_channels * n_layers, 1 ) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i padding = int((kernel_size * dilation - dilation) / 2) in_layer = torch.nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding, ) in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2 * hidden_channels else: res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward( self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None ): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) for i, (in_layer, res_skip_layer) in enumerate( zip(self.in_layers, self.res_skip_layers) ): x_in = in_layer(x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
0
2023-12-09 04:08:04+00:00
4k
zengydd/ProphDR
Models/Proph_DR.py
[ { "identifier": "load_config", "path": "utils/optimizer.py", "snippet": "def load_config(path):\n with open(path, 'r') as f:\n return EasyDict(yaml.safe_load(f))" }, { "identifier": "get_optimizer", "path": "utils/optimizer.py", "snippet": "def get_optimizer(cfg, model):\n if cfg.type == 'adam':\n return torch.optim.Adam(\n model.parameters(),\n lr=cfg.lr,\n weight_decay=cfg.weight_decay,\n betas=(cfg.beta1, cfg.beta2, )\n )\n else:\n raise NotImplementedError('Optimizer not supported: %s' % cfg.type)" }, { "identifier": "get_scheduler", "path": "utils/optimizer.py", "snippet": "def get_scheduler(cfg, optimizer):\n if cfg.type == 'plateau':\n return torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n factor=cfg.factor,\n patience=cfg.patience,\n min_lr=cfg.min_lr\n )\n else:\n raise NotImplementedError('Scheduler not supported: %s' % cfg.type)" }, { "identifier": "nested_dict_factory", "path": "utils/load.py", "snippet": "def nested_dict_factory():\n return defaultdict(nested_dict_factory)" }, { "identifier": "load_pickle", "path": "utils/load.py", "snippet": "def load_pickle(path):\n\tf = open(path, \"rb\")\n\tdata = pickle.load(f)\n\tf.close()\n\treturn data" }, { "identifier": "save_pickle", "path": "utils/load.py", "snippet": "def save_pickle(data, path):\n\tf = open(path, \"wb\")\n\tpickle.dump(data, f)\n\tf.close()" }, { "identifier": "EarlyStopping", "path": "utils/load.py", "snippet": "class EarlyStopping:\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n def __init__(self, patience=10, verbose=False, delta=0):\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n\n def __call__(self, val_loss, model, model_dir):\n score = -val_loss\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model, model_dir)\n elif score < self.best_score + self.delta:\n self.counter += 1\n print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n print('best_score:',self.best_score, 'now:',score)\n self.best_score = score\n self.save_checkpoint(val_loss, model, model_dir)\n self.counter = 0\n\n def save_checkpoint(self, val_loss, model, model_dir):\n '''Saves model when validation loss decrease.'''\n if self.verbose:\n print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\n # torch.save(model.state_dict(), 'checkpoint.pt')\n print('now best_score:', self.best_score)\n torch.save({'model_state_dict': model.state_dict()}, model_dir + '/checkpoint.pt')\n self.val_loss_min = val_loss" }, { "identifier": "FocalLoss", "path": "utils/load.py", "snippet": "class FocalLoss(nn.Module):\n def __init__(self, alpha=1, gamma=2, logits=False, reduction=True):\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.logits = logits\n self.reduction = reduction\n\n def forward(self, inputs, targets):\n if self.logits:\n BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')\n else:\n BCE_loss = F.binary_cross_entropy(inputs, targets, reduction='none')\n pt = torch.exp(-BCE_loss)\n F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss\n\n if self.reduction:\n return torch.mean(F_loss)\n else:\n return F_loss" }, { "identifier": "mydata", "path": "utils/mydata.py", "snippet": "class mydata(data.Dataset):\n def __init__(self, list_ID, label, res_df, drug_smiles_df, omic_encode_dict):\n 'Initialization'\n self.list_ID = list_ID\n self.label = label\n self.res_df = res_df \n self.drug_smiles_df = drug_smiles_df\n self.omic_encode_dict = omic_encode_dict\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.list_ID)\n\n def __getitem__(self, index):\n label = self.label[index]\n ID = self.list_ID[index]\n drug_id = self.res_df.iloc[ID]['DRUG_ID']\n cosmic_id = self.res_df.iloc[ID]['COSMIC_ID']\n drug_f = self.drug_smiles_df.loc[drug_id]['smiles']\n omic_f = self.omic_encode_dict[str(cosmic_id)]\n \n return drug_id, cosmic_id, drug_f, omic_f, label" }, { "identifier": "dataset_split", "path": "utils/mydata.py", "snippet": "def dataset_split(res_df, random=4, stratify=None):\n if stratify == None:\n train_set, val_test_set = train_test_split(res_df, test_size=0.2, random_state=random)\n val_set, test_set = train_test_split(val_test_set, test_size=0.5, random_state=random)\n else:\n train_set, val_test_set = train_test_split(res_df, test_size=0.2, random_state=random, stratify=res_df[stratify])\n # print('ct', val_test_set['binary'].tolist())\n val_set, test_set = train_test_split(val_test_set, test_size=0.5, random_state=random, stratify=val_test_set[stratify])\n print('Responses:{}'.format(res_df.shape[0]))\n print('Train:{}'.format(train_set.shape[0]))\n print('Val:{}'.format(val_set.shape[0]))\n print('Test:{}'.format(test_set.shape[0]))\n print('train_DRUG:{}, val_DRUG:{}, test_DRUG:{}'.format(len(train_set['DRUG_ID'].value_counts()), len(set(val_set['DRUG_ID'])), len(set(test_set['DRUG_ID']))))\n print('train_cell:{}, val_cell:{}, test_cell:{}'.format(len(set(train_set['COSMIC_ID'])), len(set(val_set['COSMIC_ID'])), len(set(test_set['COSMIC_ID']))))\n return train_set, val_set, test_set" }, { "identifier": "encoder_D_pred", "path": "utils/Drug_encode.py", "snippet": "def encoder_D_pred(smiles):\n h_global, h_atom = bert_atom_embedding(smiles)\n # print('h_global', h_global)\n f_pad, max_len, valid_lens = unreg_atomf_list2tensor_pred(h_atom)\n valid_lenD_list = [valid_lens]*4\n valid_lens = torch.tensor(valid_lenD_list)\n encode_D_pred = np.vstack((h_global, f_pad))\n encode_D_pred_list = [encode_D_pred]*4\n encode_D_pred = torch.stack([torch.tensor(arr) for arr in list(encode_D_pred_list)])\n return encode_D_pred, valid_lens" }, { "identifier": "kbert", "path": "utils/Drug_encode.py", "snippet": "def kbert(drug_id_list):\n drug_encode_df = pd.read_csv(drug_std_dir + 'drug_smiles_k_bert.csv')\n feature_list = []\n for i, drug_id in enumerate(drug_id_list):\n drug_id = drug_id.item()\n drug_global_f = drug_encode_df.loc[drug_encode_df['drug_id']==drug_id, 'pretrain_feature_1':'pretrain_feature_768'].values.ravel()\n drug_global_f =drug_global_f.reshape(-1, drug_global_f.shape[0])\n drug_a_f = atom_pad_dict[drug_id]\n # 第0位是global token\n f = np.vstack((drug_global_f, drug_a_f))\n # print(f.shape)\n feature_list.append(f)\n return feature_list" }, { "identifier": "CCNet", "path": "Models/RCCA_ca.py", "snippet": "class CCNet(nn.Module):\n def __init__(self, dim, recurrence=2):\n super(CCNet, self).__init__()\n self.ccnet = RCCAModule(dim, in_channels=1, out_channels=512, recurrence=recurrence)\n def forward(self, x):\n output, attn_list = self.ccnet(x)\n return output, attn_list" }, { "identifier": "cross_EncoderBlock_G", "path": "Models/cross_attention_dual.py", "snippet": "class cross_EncoderBlock_G(nn.Module):\n \"\"\"Transformer编码器块\"\"\"\n def __init__(self, query_size, key_size, value_size, num_hiddens,\n num_heads, norm_shape,\n dropout=0.1, bias=False, **kwargs):\n super(cross_EncoderBlock_G, self).__init__(**kwargs)\n\n self.cross_attention = cross_MultiHeadAttention_G(\n query_size, key_size, value_size, num_hiddens, num_heads, dropout, bias)\n self.addnorm_q = AddNorm_Q(norm_shape, query_size, num_hiddens, dropout)\n self.linear = nn.Linear(num_hiddens, num_hiddens)\n def forward(self, q, k, v, valid_lens):\n attn_output, attn_w = self.cross_attention(q, k, v, valid_lens)\n\n out = self.addnorm_q(q, attn_output)\n return out, attn_w" }, { "identifier": "cross_EncoderBlock_D", "path": "Models/cross_attention_dual.py", "snippet": "class cross_EncoderBlock_D(nn.Module):\n \"\"\"Transformer编码器块\"\"\"\n def __init__(self, query_size, key_size, value_size, num_hiddens,\n num_heads, norm_shape,\n dropout=0.1, bias=False, **kwargs):\n super(cross_EncoderBlock_D, self).__init__(**kwargs)\n # print('query_size', query_size)\n self.cross_attention = cross_MultiHeadAttention_D(\n query_size, key_size, value_size, num_hiddens, num_heads, dropout, bias)\n # self.norm_shape = [self.len_q, self.h_dim]\n self.addnorm_q = AddNorm_Q(norm_shape, query_size, num_hiddens, dropout)\n # self.addnorm = AddNorm(norm_shape, dropout)\n self.linear = nn.Linear(num_hiddens, num_hiddens)\n def forward(self, q, k, v, valid_lens):\n attn_output, attn_w = self.cross_attention(q, k, v, valid_lens)\n # print('attn_output', attn_output.shape)\n # print('attn_w', attn_w.shape)\n out = self.addnorm_q(q, attn_output)\n return out, attn_w" } ]
import os, sys import pandas as pd import numpy as np import random import copy import time import datetime import math import pickle import optuna import yaml import torch import torch.nn as nn import torch.nn.functional as F from torch.utils import data from torch.nn.parallel import DataParallel from torch.autograd import Variable from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR from sklearn.model_selection import train_test_split, KFold from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from torch.utils.tensorboard import SummaryWriter from torch.utils.data import SequentialSampler from prettytable import PrettyTable from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, roc_curve, f1_score, precision_recall_curve from lifelines.utils import concordance_index from scipy.stats import pearsonr,spearmanr from utils.optimizer import load_config, get_optimizer, get_scheduler from collections import defaultdict from utils.load import nested_dict_factory, load_pickle, save_pickle, EarlyStopping, FocalLoss from utils.mydata import mydata, dataset_split from utils.Drug_encode import encoder_D_pred, kbert from Models.RCCA_ca import CCNet from Models.cross_attention_dual import cross_EncoderBlock_G, cross_EncoderBlock_D from Models.k_bert.atom_embedding_generator import bert_atom_embedding
3,147
os.environ['NUMEXPR_MAX_THREADS'] = '32' sys.path.append("..") torch.set_default_dtype(torch.float32) config = './utils/train_res.yml' config = load_config(config) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') float2str = lambda x: '%0.4f' % x root = os.getcwd() drug_smiles_df = pd.read_csv(os.path.join(root,'data_collect/drug_smiles_atom_pad.csv'), index_col='drug_id') def unreg_atomf_list2tensor_pred(f, max_len=96): # 进行padding操作,是对一个batchlist里的数据 valid_l = len(f) f_pad = np.pad(f,((0, max_len-f.shape[0]),(0,0)), constant_values=0) return f_pad, max_len, valid_l def encoder_D_pred(smiles): h_global, h_atom = bert_atom_embedding(smiles) # print('h_global', h_global) f_pad, max_len, valid_lens = unreg_atomf_list2tensor_pred(h_atom) valid_lenD_list = [valid_lens] valid_lens = torch.tensor(valid_lenD_list) encode_D_pred = np.vstack((h_global, f_pad)) encode_D_pred_list = [encode_D_pred] encode_D_pred = torch.stack([torch.tensor(arr) for arr in list(encode_D_pred_list)]) return encode_D_pred, valid_lens def encoder_D(drug_id): drug_id_list = list(drug_id.cpu()) valid_lenD_list = drug_smiles_df.loc[drug_id_list]['valid_lens'].to_list() valid_lenD_list = [i+1 for i in valid_lenD_list] valid_lens = torch.tensor(valid_lenD_list)
os.environ['NUMEXPR_MAX_THREADS'] = '32' sys.path.append("..") torch.set_default_dtype(torch.float32) config = './utils/train_res.yml' config = load_config(config) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') float2str = lambda x: '%0.4f' % x root = os.getcwd() drug_smiles_df = pd.read_csv(os.path.join(root,'data_collect/drug_smiles_atom_pad.csv'), index_col='drug_id') def unreg_atomf_list2tensor_pred(f, max_len=96): # 进行padding操作,是对一个batchlist里的数据 valid_l = len(f) f_pad = np.pad(f,((0, max_len-f.shape[0]),(0,0)), constant_values=0) return f_pad, max_len, valid_l def encoder_D_pred(smiles): h_global, h_atom = bert_atom_embedding(smiles) # print('h_global', h_global) f_pad, max_len, valid_lens = unreg_atomf_list2tensor_pred(h_atom) valid_lenD_list = [valid_lens] valid_lens = torch.tensor(valid_lenD_list) encode_D_pred = np.vstack((h_global, f_pad)) encode_D_pred_list = [encode_D_pred] encode_D_pred = torch.stack([torch.tensor(arr) for arr in list(encode_D_pred_list)]) return encode_D_pred, valid_lens def encoder_D(drug_id): drug_id_list = list(drug_id.cpu()) valid_lenD_list = drug_smiles_df.loc[drug_id_list]['valid_lens'].to_list() valid_lenD_list = [i+1 for i in valid_lenD_list] valid_lens = torch.tensor(valid_lenD_list)
encode_D_list = kbert(drug_id_list)
11
2023-12-13 11:56:08+00:00
4k
merlresearch/PixPNet
pixpnet/symbolic/models.py
[ { "identifier": "_make_divisible", "path": "pixpnet/symbolic/misc.py", "snippet": "def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v" }, { "identifier": "_overwrite_named_param", "path": "pixpnet/symbolic/misc.py", "snippet": "def _overwrite_named_param(kwargs: Dict[str, Any], param: str, new_value: V) -> None:\n if param in kwargs:\n if kwargs[param] != new_value:\n raise ValueError(f\"The parameter '{param}' expected value \" f\"{new_value} but got {kwargs[param]} instead.\")\n else:\n kwargs[param] = new_value" }, { "identifier": "sym_scope", "path": "pixpnet/symbolic/misc.py", "snippet": "@contextmanager\ndef sym_scope(name):\n try:\n _SYM_NAME_STACK.append(name)\n yield\n finally:\n _SYM_NAME_STACK.pop()" }, { "identifier": "get_logger", "path": "pixpnet/utils.py", "snippet": "def get_logger(name):\n logging.basicConfig(\n format=\"%(asctime)s[%(process)d][%(levelname)s] %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n logger = logging.getLogger(name)\n logger.setLevel(os.environ.get(\"PIXPNET_LOG_LEVEL\", \"INFO\"))\n return logger" } ]
import copy import math import os import os.path as osp import pickle import sys import numpy as np import pixpnet import pixpnet.symbolic.index_layers as nn import torch from collections import OrderedDict from functools import partial from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union, cast from filelock import FileLock from pixpnet.symbolic.misc import _make_divisible, _overwrite_named_param, sym_scope from pixpnet.utils import get_logger
2,123
else: os.makedirs(save_dir, exist_ok=True) write_data = { "out": out, "intermediates": [(k, v) for k, v in intermediates.items()], } with open(save_path, "wb") as fp: pickle.dump(write_data, fp) fp.flush() def load_cache(model_name, height, width, num_classes=1, insert_at=None): save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at) save_path = osp.join(save_dir, "rf_data.pkl") with open(save_path, "rb") as fp: sys.modules["ngn"] = pixpnet # legacy naming data = pickle.load(fp) logger.info(f'Reusing cached data "{save_path}"') out = data["out"] intermediates = OrderedDict(((k, v) for k, v in data["intermediates"])) return out, intermediates def compute_rf_data(model_name, height, width, num_classes=1, insert_at=None): name_is_name = isinstance(model_name, str) lock_path = _get_cache_lockfile(model_name, height, width, num_classes, insert_at) with FileLock(lock_path): if name_is_name and check_cache(model_name, height, width, num_classes, insert_at): try: out, intermediates = load_cache(model_name, height, width, num_classes, insert_at) except pickle.UnpicklingError: logger.warning("UnpicklingError when loading rf data! " "Recomputing...") else: return out, intermediates # It is not in the cache at this point. if name_is_name: try: sym_model_cls = globals()[model_name] except KeyError: raise ValueError(f'Invalid name "{model_name}". Valid: ' f"{[*globals().keys()]}") else: sym_model_cls = model_name img_shape = (height, width) with unique_syms() as ctx: x = ctx.Tensor(shape=(1, 1, *img_shape), name="x") sym_model = sym_model_cls(num_classes=num_classes) if insert_at: _, rf_data_from_x = compute_rf_data(model_name, height, width, num_classes) shape_at_insert_layer = rf_data_from_x[insert_at].shape with unique_syms() as ctx: intermediate_x = ctx.Tensor(shape=shape_at_insert_layer, name="intermediate_x") out, intermediates = sym_model(x, intermediate_x=intermediate_x, insert_at=insert_at) else: out, intermediates = sym_model(x) if name_is_name: write_cache(out, intermediates, model_name, height, width, num_classes, insert_at) return out, intermediates class StochasticDepth(nn.Module): """ See :func:`stochastic_depth`. """ def __init__(self, p: float, mode: str) -> None: super().__init__() self.p = p self.mode = mode def forward(self, input: Tensor) -> Tensor: return input def __repr__(self) -> str: s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})" return s def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: """3x3 convolution with padding""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, ) def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion: int = 1 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() if groups != 1 or base_width != 64: raise ValueError("BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # Copyright (c) PyTorch Contributors 2022 # # SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: BSD-3-Clause # Code largely based on PyTorch https://github.com/pytorch/pytorch logger = get_logger(__name__) major, minor = sys.version_info[:2] if major > 3 or (major == 3 and minor >= 9): OrderedDict_T = OrderedDict else: OrderedDict_T = Dict unique_syms = nn.unique_syms Tensor = nn.Tensor ROOT_DIR = osp.dirname(osp.dirname(osp.realpath(pixpnet.__file__))) CACHE_DIR = osp.join(ROOT_DIR, "rf_cache") def _get_cache_dir(model_name, height, width, num_classes, insert_at): insert_at_args = (f"insert_at_{insert_at}",) if insert_at else () return osp.join(CACHE_DIR, model_name, f"{height}x{width}", f"{num_classes}_classes", *insert_at_args) def _get_cache_lockfile(model_name, height, width, num_classes, insert_at): os.makedirs(CACHE_DIR, exist_ok=True) insert_at_str = f"__insert_at_{insert_at}" if insert_at else "" return osp.join(CACHE_DIR, f".{model_name}__{height}x{width}__{num_classes}_classes{insert_at_str}" f".lock") def check_cache(model_name, height, width, num_classes=1, insert_at=None): save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at) save_path = osp.join(save_dir, "rf_data.pkl") if os.path.isfile(save_path): return save_path def _serialize_ndarray(arr: np.ndarray): return { "shape": arr.shape, "data": [v.serialize() for v in arr.flat], } def _deserialize_ndarray(data): return np.asarray([nn.HypercubeCollection.deserialize(arr_indices) for arr_indices in data["data"]]).reshape( data["shape"] ) def write_cache(out, intermediates, model_name, height, width, num_classes, insert_at): save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at) save_path = osp.join(save_dir, "rf_data.pkl") if os.path.isfile(save_path): logger.warning(f'Will overwrite "{save_path}" which already exists') else: os.makedirs(save_dir, exist_ok=True) write_data = { "out": out, "intermediates": [(k, v) for k, v in intermediates.items()], } with open(save_path, "wb") as fp: pickle.dump(write_data, fp) fp.flush() def load_cache(model_name, height, width, num_classes=1, insert_at=None): save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at) save_path = osp.join(save_dir, "rf_data.pkl") with open(save_path, "rb") as fp: sys.modules["ngn"] = pixpnet # legacy naming data = pickle.load(fp) logger.info(f'Reusing cached data "{save_path}"') out = data["out"] intermediates = OrderedDict(((k, v) for k, v in data["intermediates"])) return out, intermediates def compute_rf_data(model_name, height, width, num_classes=1, insert_at=None): name_is_name = isinstance(model_name, str) lock_path = _get_cache_lockfile(model_name, height, width, num_classes, insert_at) with FileLock(lock_path): if name_is_name and check_cache(model_name, height, width, num_classes, insert_at): try: out, intermediates = load_cache(model_name, height, width, num_classes, insert_at) except pickle.UnpicklingError: logger.warning("UnpicklingError when loading rf data! " "Recomputing...") else: return out, intermediates # It is not in the cache at this point. if name_is_name: try: sym_model_cls = globals()[model_name] except KeyError: raise ValueError(f'Invalid name "{model_name}". Valid: ' f"{[*globals().keys()]}") else: sym_model_cls = model_name img_shape = (height, width) with unique_syms() as ctx: x = ctx.Tensor(shape=(1, 1, *img_shape), name="x") sym_model = sym_model_cls(num_classes=num_classes) if insert_at: _, rf_data_from_x = compute_rf_data(model_name, height, width, num_classes) shape_at_insert_layer = rf_data_from_x[insert_at].shape with unique_syms() as ctx: intermediate_x = ctx.Tensor(shape=shape_at_insert_layer, name="intermediate_x") out, intermediates = sym_model(x, intermediate_x=intermediate_x, insert_at=insert_at) else: out, intermediates = sym_model(x) if name_is_name: write_cache(out, intermediates, model_name, height, width, num_classes, insert_at) return out, intermediates class StochasticDepth(nn.Module): """ See :func:`stochastic_depth`. """ def __init__(self, p: float, mode: str) -> None: super().__init__() self.p = p self.mode = mode def forward(self, input: Tensor) -> Tensor: return input def __repr__(self) -> str: s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})" return s def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: """3x3 convolution with padding""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, ) def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion: int = 1 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() if groups != 1 or base_width != 64: raise ValueError("BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
with sym_scope("conv1"):
2
2023-12-06 23:49:31+00:00
4k
dhh1995/MeGraph
megraph/datasets/utils/graph_generators.py
[ { "identifier": "barabasi_albert", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def barabasi_albert(N, degree=None, seed=None):\n \"\"\"Creates a random graph according to the Barabási-Albert preferential attachment model\n of size N and where nodes are atteched with degree edges\"\"\"\n if degree is None:\n # degree = int(random.random() * (N - 1)) + 1\n degree = np.random.randint(1, N)\n return nx.barabasi_albert_graph(N, degree, seed)" }, { "identifier": "caterpillar", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def caterpillar(N, seed=None):\n \"\"\"Creates a random caterpillar graph with a backbone of size b (drawn from U[1, N)), and N − b\n pendent vertices uniformly connected to the backbone.\"\"\"\n if seed is not None:\n np.random.seed(seed)\n B = np.random.randint(low=1, high=N)\n G = nx.empty_graph(N)\n for i in range(1, B):\n G.add_edge(i - 1, i)\n for i in range(B, N):\n G.add_edge(i, np.random.randint(B))\n return G" }, { "identifier": "caveman", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def caveman(N):\n \"\"\"Creates a caveman graph of m cliques of size k, with m and k as close as possible\"\"\"\n m = 1\n for i in range(1, int(math.sqrt(N)) + 1):\n if N % i == 0:\n m = i\n return nx.caveman_graph(m, N // m)" }, { "identifier": "erdos_renyi", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def erdos_renyi(N, degree=None, seed=None):\n \"\"\"Creates an Erdős-Rényi or binomial graph of size N with degree/N probability of edge creation\"\"\"\n p = random.random() if degree is None else degree / N\n return nx.fast_gnp_random_graph(N, p, seed, directed=False)" }, { "identifier": "generate_graph_geo", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def generate_graph_geo(num_nodes, dimensions=2, theta=200.0, rate=1.0):\n \"\"\"Creates a connected graph.\n\n The graphs are geographic threshold graphs, but with added edges via a\n minimum spanning tree algorithm, to ensure all nodes are connected.\n\n Args:\n num_nodes: number of nodes per graph.\n dimensions: (optional) An `int` number of dimensions for the positions.\n Default= 2.\n theta: (optional) A `float` threshold parameters for the geographic\n threshold graph's threshold. Large values (1000+) make mostly trees. Try\n 20-60 for good non-trees. Default=1000.0.\n rate: (optional) A rate parameter for the node weight exponential sampling\n distribution. Default= 1.0.\n\n Returns:\n The graph.\n \"\"\"\n # Create geographic threshold graph.\n pos_array = np.random.uniform(size=(num_nodes, dimensions))\n pos = dict(enumerate(pos_array))\n weight = dict(enumerate(np.random.exponential(rate, size=num_nodes)))\n geo_graph = nx.geographical_threshold_graph(\n num_nodes, theta, pos=pos, weight=weight\n )\n\n # Create minimum spanning tree across geo_graph's nodes.\n distances = spatial.distance.squareform(spatial.distance.pdist(pos_array))\n i_, j_ = np.meshgrid(range(num_nodes), range(num_nodes), indexing=\"ij\")\n weighted_edges = list(zip(i_.ravel(), j_.ravel(), distances.ravel()))\n mst_graph = nx.Graph()\n mst_graph.add_weighted_edges_from(weighted_edges, weight=DISTANCE_WEIGHT_NAME)\n mst_graph = nx.minimum_spanning_tree(mst_graph, weight=DISTANCE_WEIGHT_NAME)\n # Put geo_graph's node attributes into the mst_graph.\n for i in mst_graph.nodes():\n mst_graph.nodes[i].update(geo_graph.nodes[i])\n\n # Compose the graphs.\n combined_graph = nx.compose_all((mst_graph, geo_graph.copy()))\n # Put all distance weights into edge attributes.\n for i, j in combined_graph.edges():\n combined_graph.get_edge_data(i, j).setdefault(\n DISTANCE_WEIGHT_NAME, distances[i, j]\n )\n return combined_graph, mst_graph, geo_graph" }, { "identifier": "generate_graph_sbm", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def generate_graph_sbm(num_nodes, min_block=5, max_block=15):\n connected = False\n while not connected:\n # generate random blocks\n remains = num_nodes\n lower_bound = min_block\n block_sizes = []\n while True:\n a = random.randint(lower_bound, max_block)\n if remains - a < lower_bound:\n break\n block_sizes.append(a)\n remains -= a\n block_sizes.append(remains)\n assert np.sum(block_sizes) == num_nodes\n # generate random prob\n num_blocks = len(block_sizes)\n intra_block_probs = np.random.rand(num_blocks) * 0.2 + 0.3\n inter_block_probs = np.random.rand(num_blocks, num_blocks) * 0.005 + 0.0005\n inter_block_probs = (inter_block_probs + inter_block_probs.T) / 2\n eye = np.eye(num_blocks)\n block_probs = intra_block_probs * eye + inter_block_probs * (1 - eye)\n # generate graph\n graph = nx.stochastic_block_model(block_sizes, block_probs, seed=2022)\n connected = nx.is_connected(graph)\n return graph" }, { "identifier": "grid", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def grid(N):\n \"\"\"Creates a m x k 2d grid graph with N = m*k and m and k as close as possible\"\"\"\n m = 1\n for i in range(1, int(math.sqrt(N)) + 1):\n if N % i == 0:\n m = i\n # return nx.grid_2d_graph(m, N // m)\n # change to manual edges\n return grid_graph(m, N // m)" }, { "identifier": "ladder", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def ladder(N):\n \"\"\"Creates a ladder graph of N nodes: two rows of N/2 nodes, with each pair connected by a single edge.\n In case N is odd another node is attached to the first one.\"\"\"\n G = nx.ladder_graph(N // 2)\n if N % 2 != 0:\n G.add_node(N - 1)\n G.add_edge(0, N - 1)\n return G" }, { "identifier": "line", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def line(N):\n \"\"\"Creates a graph composed of N nodes in a line\"\"\"\n return nx.path_graph(N)" }, { "identifier": "lobster", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def lobster(N, seed=None):\n \"\"\"Creates a random Lobster graph with a backbone of size b (drawn from U[1, N)), and p (drawn\n from U[1, N - b ]) pendent vertices uniformly connected to the backbone, and additional\n N - b - p pendent vertices uniformly connected to the previous pendent vertices\"\"\"\n if seed is not None:\n np.random.seed(seed)\n B = np.random.randint(low=1, high=N)\n F = np.random.randint(low=B + 1, high=N + 1)\n G = nx.empty_graph(N)\n for i in range(1, B):\n G.add_edge(i - 1, i)\n for i in range(B, F):\n G.add_edge(i, np.random.randint(B))\n for i in range(F, N):\n G.add_edge(i, np.random.randint(low=B, high=F))\n return G" }, { "identifier": "star", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def star(N):\n \"\"\"Creates a graph composed by one center node connected N-1 outer nodes\"\"\"\n return nx.star_graph(N - 1)" }, { "identifier": "tree", "path": "megraph/datasets/utils/graph_generation.py", "snippet": "def tree(N, seed=None):\n \"\"\"Creates a tree of size N with a power law degree distribution\"\"\"\n return nx.random_powerlaw_tree(N, seed=seed, tries=10000)" }, { "identifier": "sample_between_min_max", "path": "megraph/rng_utils.py", "snippet": "def sample_between_min_max(min_max: List[Union[int, float]]) -> Union[int, float]:\n \"\"\"Sample a number within [min, max].\"\"\"\n mi, ma = min_max\n if type(mi) is int:\n return random.randint(mi, ma + 1)\n return random.rand() * (ma - mi) + mi" }, { "identifier": "sample_from_mixture", "path": "megraph/rng_utils.py", "snippet": "def sample_from_mixture(mix):\n return random.choice(list(mix.keys()), p=list(mix.values()))" }, { "identifier": "sample_partition", "path": "megraph/rng_utils.py", "snippet": "def sample_partition(n: int, m: int, method: str = \"sep\") -> List[int]:\n \"\"\"Sample a partition of n objects into m parts.\"\"\"\n if n < 0 or m <= 0:\n raise ValueError(f\"No valid partition for {n} objects and {m} parts.\")\n support_methods = [\"sep\", \"iter\"]\n if not (method in support_methods):\n raise ValueError(\n f\"Invalid method {method}, only {support_methods} are supported.\"\n )\n if method == \"sep\":\n sep = [0, n]\n for i in range(m - 1):\n sep.append(sample_between_min_max([0, n]))\n sep = sorted(sep)\n return [sep[i + 1] - sep[i] for i in range(m)]\n else:\n parts = []\n for i in range(m):\n c = sample_between_min_max([0, n])\n n -= c\n parts.append(c)\n return parts" } ]
from collections import deque from enum import Enum from functools import partial from typing import List, Tuple from megraph.datasets.utils.graph_generation import (barabasi_albert, caterpillar, caveman, erdos_renyi, generate_graph_geo, generate_graph_sbm, grid, ladder, line, lobster, star, tree) from megraph.rng_utils import (sample_between_min_max, sample_from_mixture, sample_partition) from IPython import embed import networkx as nx import numpy as np import numpy.random as random
3,047
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : graph_generators.py # Author : Honghua Dong, Yang Yu # Email : [email protected], [email protected] # # Distributed under terms of the MIT license. __all__ = [ "generate_graph_pseudotree", "generate_graph_cycle", "get_random_graph_builder", "generate_pseudotree", ] def sample_random_edge(g: nx.Graph): n = g.number_of_nodes() while True: u, v = random.randint(n), random.randint(n) if (not g.has_edge(u, v)) and (u != v): return u, v def generate_graph_pseudotree( num_nodes: int, cycle_ratio_min_max: List[float] = [0.3, 0.6], partition_method: str = "sep", ) -> Tuple[nx.DiGraph, int]: """[v2] Generate a random tree with sampled cycle length""" cycle_ratio = sample_between_min_max(cycle_ratio_min_max) cycle_len = max(min(3, num_nodes), int(num_nodes * cycle_ratio)) g = nx.cycle_graph(cycle_len) expander_sizes = sample_partition( num_nodes - cycle_len, cycle_len, method=partition_method ) cur_idx = cycle_len for i in range(cycle_len): tree_size = expander_sizes[i] + 1 # the root if tree_size > 1: tree = nx.random_tree(tree_size) # Merge tree to g while the root of the tree is node i on g re_index = lambda x: i if x == 0 else cur_idx + x - 1 for u, v in tree.edges(): g.add_edge(re_index(u), re_index(v)) cur_idx += tree_size - 1 return g, cycle_len def generate_graph_cycle(n: int) -> nx.DiGraph: return nx.cycle_graph(n) def generate_graph_blooming(n: int, degree=None, edge_factor=0.2) -> nx.DiGraph: """A fractal tree plus some random edges""" degree = degree or 2 g = nx.empty_graph(n) edges = [] cur = 1 q = deque([0]) while cur < n: x = q.popleft() for _ in range(degree): if cur < n: edges.append((x, cur)) q.append(cur) cur += 1 g.add_edges_from(edges) # random new edges for _ in range(int(n * edge_factor)): u, v = sample_random_edge(g) g.add_edge(u, v) return g # Graph generators and default graph scales GRAPH_GENERATORS_PAIRS = [ ("er", erdos_renyi), ("ba", barabasi_albert), ("grid", grid), ("caveman", caveman), ("tree", tree),
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : graph_generators.py # Author : Honghua Dong, Yang Yu # Email : [email protected], [email protected] # # Distributed under terms of the MIT license. __all__ = [ "generate_graph_pseudotree", "generate_graph_cycle", "get_random_graph_builder", "generate_pseudotree", ] def sample_random_edge(g: nx.Graph): n = g.number_of_nodes() while True: u, v = random.randint(n), random.randint(n) if (not g.has_edge(u, v)) and (u != v): return u, v def generate_graph_pseudotree( num_nodes: int, cycle_ratio_min_max: List[float] = [0.3, 0.6], partition_method: str = "sep", ) -> Tuple[nx.DiGraph, int]: """[v2] Generate a random tree with sampled cycle length""" cycle_ratio = sample_between_min_max(cycle_ratio_min_max) cycle_len = max(min(3, num_nodes), int(num_nodes * cycle_ratio)) g = nx.cycle_graph(cycle_len) expander_sizes = sample_partition( num_nodes - cycle_len, cycle_len, method=partition_method ) cur_idx = cycle_len for i in range(cycle_len): tree_size = expander_sizes[i] + 1 # the root if tree_size > 1: tree = nx.random_tree(tree_size) # Merge tree to g while the root of the tree is node i on g re_index = lambda x: i if x == 0 else cur_idx + x - 1 for u, v in tree.edges(): g.add_edge(re_index(u), re_index(v)) cur_idx += tree_size - 1 return g, cycle_len def generate_graph_cycle(n: int) -> nx.DiGraph: return nx.cycle_graph(n) def generate_graph_blooming(n: int, degree=None, edge_factor=0.2) -> nx.DiGraph: """A fractal tree plus some random edges""" degree = degree or 2 g = nx.empty_graph(n) edges = [] cur = 1 q = deque([0]) while cur < n: x = q.popleft() for _ in range(degree): if cur < n: edges.append((x, cur)) q.append(cur) cur += 1 g.add_edges_from(edges) # random new edges for _ in range(int(n * edge_factor)): u, v = sample_random_edge(g) g.add_edge(u, v) return g # Graph generators and default graph scales GRAPH_GENERATORS_PAIRS = [ ("er", erdos_renyi), ("ba", barabasi_albert), ("grid", grid), ("caveman", caveman), ("tree", tree),
("ladder", ladder),
7
2023-12-12 04:17:13+00:00
4k
gpavanb1/NODEFit
examples/fit_data.py
[ { "identifier": "DEVICE", "path": "nodefit/constants.py", "snippet": "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")" }, { "identifier": "NeuralODE", "path": "nodefit/neural_ode.py", "snippet": "class NeuralODE:\n def __init__(self, neural_net: nn.Module, t, data):\n \"\"\"\n A class representing a neural ordinary differential equation (Neural ODE) model to fit time-series data.\n\n Args:\n neural_net (nn.Module): The neural network model.\n t (array-like): Time values for the data.\n data (array-like): Observed data to be fitted by the Neural ODE.\n\n Attributes:\n neural_net (nn.Module): The neural network model.\n t (torch.Tensor): Time values for the data.\n data (torch.Tensor): Observed data to be fitted by the Neural ODE.\n nn_data (torch.Tensor): Neural network-generated data after fitting.\n optimizer (torch.optim.Optimizer): The optimizer used for training the neural network.\n y0 (torch.Tensor): Initial state for solving the ODE.\n\n Note:\n This class assumes that the provided neural network (`neural_net`) has a compatible architecture.\n \"\"\"\n self.neural_net = neural_net\n self.t = torch.tensor(t).double().to(DEVICE)\n self.data = torch.tensor(data).double().to(DEVICE)\n self.nn_data = None\n self.optimizer = optim.Adam(self.neural_net.parameters())\n\n (nsteps, _) = self.data.shape\n if len(self.t) != nsteps:\n raise Exception('Time array not in correct shape')\n\n self.y0 = self.data[0].clone().to(DEVICE)\n\n def predict(self, t, y):\n \"\"\"\n Predicts the next state using the neural network.\n\n Args:\n t (float): The current time.\n y (torch.Tensor): The current state.\n\n Returns:\n torch.Tensor: The predicted next state.\n \"\"\"\n combined = torch.cat(\n [torch.tensor([t]).to(DEVICE), y.clone().to(DEVICE)], dim=0).to(DEVICE)\n return self.neural_net(combined)\n\n def loss(self):\n \"\"\"\n Computes the mean squared error loss between observed and predicted data.\n\n Returns:\n torch.Tensor: The loss value.\n \"\"\"\n if self.data is None:\n raise Exception('Load the data before training')\n\n criterion = nn.MSELoss()\n\n self.nn_data = odeint(self.predict, self.y0, self.t).to(DEVICE)\n\n loss_tensor = criterion(self.data, self.nn_data)\n return loss_tensor\n\n def train(self, num_epochs, print_every=100):\n \"\"\"\n Trains the neural network to fit the observed data.\n\n Args:\n num_epochs (int): The number of training epochs.\n print_every (int): Print loss every `print_every` epochs.\n\n Returns:\n None\n \"\"\"\n for i in tqdm(range(num_epochs)):\n self.optimizer.zero_grad()\n self.loss().backward()\n self.optimizer.step()\n\n # Print the loss every 100 epochs\n if i % print_every == 0:\n print(f'Epoch {i}/{num_epochs}, Loss: {self.loss().item()}')\n\n def extrapolate(self, tf, npts=20):\n \"\"\"\n Extrapolates the solution of the Neural ODE to future time points.\n\n Args:\n tf (float): The final time for extrapolation.\n npts (int): Number of points for extrapolation.\n\n Returns:\n dict: Dictionary containing extrapolated times and corresponding values.\n \"\"\"\n self.t = self.t.cpu()\n tinit = self.t[-1].item()\n tspan = np.linspace(tinit, tf, npts)\n self.t = self.t.to(DEVICE)\n result = odeint(\n self.predict, self.nn_data[-1].clone().to(DEVICE), torch.tensor(tspan).to(DEVICE)).to(DEVICE)\n return {\"time\": tspan, \"values\": result}\n\n def plot(self, extra_data=None):\n \"\"\"\n Plots the observed data, Neural Network solution, and extrapolated data (if provided).\n\n Args:\n extra_data (dict): Dictionary containing extrapolated time and corresponding values.\n\n Returns:\n None\n \"\"\"\n if self.data is None:\n raise Exception('Load data before plotting')\n if self.nn_data is None:\n raise Exception('Fit neural network before plotting')\n\n # Convert the arrays to numpy arrays for easier plotting\n t_np = self.t.cpu().numpy()\n data_np = self.data.cpu().numpy()\n nn_data_np = self.nn_data.detach().cpu().numpy()\n if extra_data is not None:\n extra_data_np = extra_data['values'].detach().cpu().numpy()\n\n # Plot each line separately\n plt.figure(figsize=(10, 6))\n\n # Plot time series\n for i in range(data_np.shape[1]):\n plt.plot(t_np, data_np[:, i],\n label=f'Trained Data {i + 1}', marker='o')\n\n # Plot Neural Network solution\n plt.gca().set_prop_cycle(None)\n for i in range(nn_data_np.shape[1]):\n plt.plot(t_np, nn_data_np[:, i],\n label=f'NN Solution {i + 1}', marker='x')\n\n # Plot extrapolated data\n if extra_data is not None:\n plt.gca().set_prop_cycle(None)\n for i in range(extra_data_np.shape[1]):\n plt.plot(extra_data['time'], extra_data_np[:, i],\n label=f'Extrapolated NN Solution {i + 1}', marker='x', linestyle='dotted')\n\n # Add labels and a legend\n plt.xlabel('Time')\n plt.ylabel('Value')\n plt.legend()\n\n # Show the plot\n plt.show()" }, { "identifier": "NeuralSDE", "path": "nodefit/neural_sde.py", "snippet": "class NeuralSDE:\n def __init__(self, drift_nn: nn.Module, diffusion_nn: nn.Module, t, data, batch_size=2):\n \"\"\"\n Initializes a Neural SDE model.\n\n Parameters:\n - drift_nn (nn.Module): Neural network representing the drift term in the SDE.\n - diffusion_nn (nn.Module): Neural network representing the diffusion term in the SDE.\n - t (numpy.ndarray or list): Time array.\n - data (numpy.ndarray or list): Time series data.\n - batch_size (int): Number of trajectories in each batch. Default is 2.\n \"\"\"\n self.sde = SDE(drift_nn, diffusion_nn)\n self.t = torch.tensor(t).double().to(DEVICE)\n self.data = torch.tensor(data).double().to(DEVICE)\n self.nn_data = None\n self.batch_size = batch_size\n\n (nsteps, _) = self.data.shape\n if len(self.t) != nsteps:\n raise Exception('Time array not in correct shape')\n\n self.y0 = self.data[0].clone().repeat(batch_size, 1).to(DEVICE)\n\n def loss(self):\n \"\"\"\n Computes the loss between the observed data and the Neural SDE predictions.\n\n Returns:\n torch.Tensor: Loss value.\n \"\"\"\n if self.data is None:\n raise Exception('Load the data before training')\n\n criterion = nn.MSELoss()\n\n self.nn_data = sdeint(self.sde, self.y0, self.t,\n method=self.sde.numerical_method).to(DEVICE)\n repeated_data = self.data.unsqueeze(2).repeat(\n 1, 1, self.batch_size).to(DEVICE)\n\n loss_tensor = criterion(repeated_data, self.nn_data)\n return loss_tensor\n\n def train(self, num_epochs, print_every=100):\n \"\"\"\n Trains the Neural SDE model using gradient descent.\n\n Parameters:\n - num_epochs (int): Number of training epochs.\n - print_every (int): Frequency of printing the loss during training. Default is 100.\n \"\"\"\n for i in tqdm(range(num_epochs)):\n self.sde.drift_opt.zero_grad()\n self.sde.diffusion_opt.zero_grad()\n self.loss().backward()\n self.sde.drift_opt.step()\n self.sde.diffusion_opt.step()\n\n # Print the loss every 100 epochs\n if i % print_every == 0:\n print(f'Epoch {i}/{num_epochs}, Loss: {self.loss().item()}')\n\n def extrapolate(self, tf, npts=20):\n \"\"\"\n Extrapolates the Neural SDE solution beyond the observed time range.\n\n Parameters:\n - tf (float): Final time for extrapolation.\n - npts (int): Number of points for extrapolation. Default is 20.\n\n Returns:\n dict: Extrapolated time and values.\n \"\"\"\n self.t = self.t.cpu()\n tinit = self.t[-1].item()\n tspan = np.linspace(tinit, tf, npts)\n self.t = self.t.to(DEVICE)\n result = sdeint(\n self.sde, self.nn_data[-1].clone().to(\n DEVICE), torch.tensor(tspan).to(DEVICE),\n method=self.sde.numerical_method).to(DEVICE)\n return {\"time\": tspan, \"values\": result}\n\n def plot(self, extra_data=None):\n \"\"\"\n Plots the observed data, Neural Network solution, and extrapolated data (if provided).\n\n Args:\n extra_data (dict): Dictionary containing extrapolated time and corresponding values. Note that the plot is performed for the mean of all trajectories in the batch\n\n Returns:\n None\n \"\"\"\n if self.data is None:\n raise Exception('Load data before plotting')\n if self.nn_data is None:\n raise Exception('Fit neural network before plotting')\n\n # Convert the arrays to numpy arrays for easier plotting\n t_np = self.t.cpu().numpy()\n data_np = self.data.cpu().numpy()\n nn_data_np = self.nn_data.detach().cpu().numpy()\n if extra_data is not None:\n extra_data_np = extra_data['values'].detach().cpu().numpy()\n\n # Plot each line separately\n plt.figure(figsize=(10, 6))\n\n # Plot time series\n for i in range(data_np.shape[1]):\n plt.plot(t_np, data_np[:, i],\n label=f'Trained Data {i + 1}', marker='o')\n\n # Plot Neural Network solution\n plt.gca().set_prop_cycle(None)\n for i in range(nn_data_np.shape[1]):\n plt.plot(t_np, np.mean(nn_data_np[:, i, :], axis=1),\n label=f'NN Solution {i + 1}', marker='x')\n\n # Plot extrapolated data\n if extra_data is not None:\n plt.gca().set_prop_cycle(None)\n for i in range(extra_data_np.shape[1]):\n plt.plot(extra_data['time'], np.mean(extra_data_np[:, i, :], axis=1),\n label=f'Extrapolated NN Solution {i + 1}', marker='x', linestyle='dotted')\n\n # Add labels and a legend\n plt.xlabel('Time')\n plt.ylabel('Value')\n plt.legend()\n\n # Show the plot\n plt.show()" } ]
import numpy as np import torch.nn as nn from nodefit.constants import DEVICE from nodefit.neural_ode import NeuralODE from nodefit.neural_sde import NeuralSDE
3,081
### # DEFINE NETWORKS ### # Neural ODE parameters ndim, drift_nhidden, diffusion_nhidden = 2, 10, 2 drift_nn = nn.Sequential( nn.Linear(ndim+1, drift_nhidden), nn.Sigmoid(), nn.Linear(drift_nhidden, ndim) ).double().to(DEVICE) diffusion_nn = nn.Sequential( nn.Linear(ndim+1, diffusion_nhidden), nn.Sigmoid(), nn.Linear(diffusion_nhidden, ndim) ).double().to(DEVICE) ### # PROVIDE DATA ### t = np.linspace(0, 5, 10) # Provide data as list of lists with starting condition data = np.array([[1., 1.], [1.52210594, 1.23757532], [2.0570346, 1.37814989], [2.47603815, 1.46040018], [2.75026795, 1.50703724], [2.91602961, 1.5343292], [3.01170625, 1.5498438], [3.06584853, 1.5585547], [3.09827458, 1.56379774], [3.11650095, 1.56674226]]) ### # FIT USING NEURALODE ### print('Performing fit using Neural ODE...')
### # DEFINE NETWORKS ### # Neural ODE parameters ndim, drift_nhidden, diffusion_nhidden = 2, 10, 2 drift_nn = nn.Sequential( nn.Linear(ndim+1, drift_nhidden), nn.Sigmoid(), nn.Linear(drift_nhidden, ndim) ).double().to(DEVICE) diffusion_nn = nn.Sequential( nn.Linear(ndim+1, diffusion_nhidden), nn.Sigmoid(), nn.Linear(diffusion_nhidden, ndim) ).double().to(DEVICE) ### # PROVIDE DATA ### t = np.linspace(0, 5, 10) # Provide data as list of lists with starting condition data = np.array([[1., 1.], [1.52210594, 1.23757532], [2.0570346, 1.37814989], [2.47603815, 1.46040018], [2.75026795, 1.50703724], [2.91602961, 1.5343292], [3.01170625, 1.5498438], [3.06584853, 1.5585547], [3.09827458, 1.56379774], [3.11650095, 1.56674226]]) ### # FIT USING NEURALODE ### print('Performing fit using Neural ODE...')
neural_ode = NeuralODE(drift_nn, t, data)
1
2023-12-12 18:10:25+00:00
4k
SJTU-Quant/SUNNY-GNN
train/utils.py
[ { "identifier": "snexgnn", "path": "models/snexgnn.py", "snippet": "class ExtractorMLP(nn.Module):\nclass SNexGNN(nn.Module):\nclass SNexHGN(SNexGNN):\n def __init__(self, in_dim, bias=True):\n def forward(self, emb):\n def __init__(self, pret_encoder, encoder, extractor, in_dim, target_ntype, n_heads=1, dropout=0.5):\n def set_config(self, config):\n def sampling(self, att_log_logit, training):\n def sparsity(self, edge_mask, eps=1e-6):\n def get_cts_mask(self, g, topk, k):\n def cts_loss(self, anchor_emb, pos_emb, neg_emb, pos_logits, neg_logits, labels):\n def sim_matrix(anchor, aug):\n def loss(self, edge_mask, logits, labels):\n def batched_emb(self, g, x, batched_edge_mask, idx):\n def get_edge_att(self, g, all_emb, e_batch, h_target):\n def calc_att(mask, hop_batch, k=1):\n def forward(self, g, all_emb, labels, training=False, explain=False, epoch=0):\n def __init__(self, pret_encoder, encoder, extractor, in_dim, target_ntype, n_heads=1, dropout=0.5):\n def get_edge_att_hetero(self, g, all_emb, e_batch, h_target, etype, ntypes):\n def calc_att(mask, hop_batch, k):\n def batched_emb_hetero(self, g, x, batched_edge_mask, n_samples, idx):\n def forward(self, g, all_emb, labels, training=False, epoch=0):" }, { "identifier": "hgn", "path": "models/hgn.py", "snippet": "class Attention(nn.Module):\nclass myHeteroGATConv(nn.Module):\nclass SimpleHeteroHGN(BaseGNN):\n def __init__(self, hidden_dim, attn_drop):\n def forward(self, embeds):\n def __init__(\n self,\n edge_feats,\n num_etypes,\n in_feats,\n out_feats,\n num_heads,\n feat_drop=0.0,\n attn_drop=0.0,\n negative_slope=0.2,\n residual=False,\n activation=None,\n allow_zero_in_degree=False,\n bias=False,\n alpha=0.0,\n share_weight=False,\n ):\n def reset_parameters(self):\n def set_allow_zero_in_degree(self, set_value):\n def forward(self, graph, nfeat, res_attn=None, edge_weight=None):\n def __init__(\n self,\n edge_dim,\n num_etypes,\n in_dims,\n num_hidden,\n num_classes,\n num_layers,\n heads,\n feat_drop,\n attn_drop,\n negative_slope,\n residual,\n alpha,\n shared_weight=False,\n ):\n def set_graph(self, g):\n def forward(self, h, target_ntype=None, offset=None, get_att=False, pooling=False):\n def get_emb(self, x, edge_weight=None, get_att=False, pooling=False):\n def get_all_emb(self, x, edge_weight=None):\n def loss(self, x, target_ntype, target_node, label):" }, { "identifier": "gat", "path": "models/gat.py", "snippet": "class Identity(nn.Module):\nclass GATConv(nn.Module):\nclass GAT(BaseGNN):\n def __init__(self):\n def forward(self, x):\n def __init__(self,\n in_feats,\n out_feats,\n num_heads,\n feat_drop=0.,\n attn_drop=0.,\n negative_slope=0.2,\n residual=False,\n activation=None,\n allow_zero_in_degree=False,\n bias=True):\n def reset_parameters(self):\n def set_allow_zero_in_degree(self, set_value):\n def forward(self, graph, feat, edge_weight, get_attention=False):\n def __init__(self, in_dim, hidden_dim, out_dim, num_heads, num_classes, drop=0.5):\n def set_graph(self, g):\n def forward(self, h, offset=None, get_att=False):\n def get_emb(self, x, edge_weight=None, get_att=False):\n def get_all_emb(self, x, edge_weight=None):" }, { "identifier": "gcn", "path": "models/gcn.py", "snippet": "class GCN(BaseGNN):\n def __init__(self, in_dim, hidden_dim, out_dim, num_classes, drop=0.5):\n def set_graph(self, g):\n def forward(self, h, offset=None):\n def get_emb(self, x, edge_weight=None):\n def get_all_emb(self, x, edge_weight=None):" } ]
import os import torch import dgl from tqdm import tqdm from models import snexgnn, hgn, gat, gcn
1,799
def edge_hop_mask(sg, target_ntype=None, k=2): is_homogeneous = sg.is_homogeneous if not is_homogeneous: edge_types = sg.etypes node_types = sg.ntypes sg = dgl.to_homogeneous(sg) src_target = torch.nonzero(sg.ndata['_TYPE']==node_types.index(target_ntype))[0].item() else: src_target = 0 e_h_mask = torch.tensor([], dtype=torch.bool) src = [[src_target]] for i in range(k): one_hop_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1) one_hop_loader = dgl.dataloading.DataLoader(sg, src[i], one_hop_sampler, batch_size=1, shuffle=False) neighbors = [] h_mask = torch.zeros(sg.number_of_edges(), dtype=torch.bool) for j, (ng, _, _) in enumerate(one_hop_loader): ng_lst = ng.numpy().tolist() neighbors.extend(ng_lst) edge_ids = sg.edge_ids(ng, [src[i][j]]*len(ng)) h_mask[edge_ids] = 1 src.append(list(set(neighbors))) e_h_mask = torch.cat((e_h_mask, h_mask.unsqueeze(0)), dim=0) if not is_homogeneous: e_h_mask_dict = {} for i in range(len(edge_types)): etype = edge_types[i] a = torch.nonzero(sg.edata[dgl.ETYPE] == i).view(-1) e_h_mask_dict[etype] = e_h_mask[:, a].T return e_h_mask_dict return e_h_mask.T def accuracy(y_pred, y_true): y_true = y_true.squeeze().long() preds = y_pred.max(1)[1].type_as(y_true) correct = preds.eq(y_true).double() correct = correct.sum().item() return correct / len(y_true) def get_model(cfg): graph_path = cfg.graph_path index_path = cfg.index_path method = cfg.method data_hyparams = cfg.hyparams['data'] dataset = cfg.dataset ckpt_dir = cfg.ckpt_dir encoder_type = cfg.encoder_type num_classes = data_hyparams['num_classes'] target_ntype = data_hyparams['target_ntype'] n_layer = 2 gs, _ = dgl.load_graphs(graph_path) g = gs[0] if g.is_homogeneous: g = dgl.add_self_loop(g) in_dim = {n: g.nodes[n].data['nfeat'].shape[1] for n in g.ntypes} info = torch.load(index_path) if method == 'gat': model = gat.GAT(in_dim[target_ntype], 256, 64, [8, 1], num_classes) elif method == 'gcn': model = gcn.GCN(in_dim[target_ntype], 256, 64, num_classes) elif method == 'simplehgn': edge_type_num = len(g.etypes)
def edge_hop_mask(sg, target_ntype=None, k=2): is_homogeneous = sg.is_homogeneous if not is_homogeneous: edge_types = sg.etypes node_types = sg.ntypes sg = dgl.to_homogeneous(sg) src_target = torch.nonzero(sg.ndata['_TYPE']==node_types.index(target_ntype))[0].item() else: src_target = 0 e_h_mask = torch.tensor([], dtype=torch.bool) src = [[src_target]] for i in range(k): one_hop_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1) one_hop_loader = dgl.dataloading.DataLoader(sg, src[i], one_hop_sampler, batch_size=1, shuffle=False) neighbors = [] h_mask = torch.zeros(sg.number_of_edges(), dtype=torch.bool) for j, (ng, _, _) in enumerate(one_hop_loader): ng_lst = ng.numpy().tolist() neighbors.extend(ng_lst) edge_ids = sg.edge_ids(ng, [src[i][j]]*len(ng)) h_mask[edge_ids] = 1 src.append(list(set(neighbors))) e_h_mask = torch.cat((e_h_mask, h_mask.unsqueeze(0)), dim=0) if not is_homogeneous: e_h_mask_dict = {} for i in range(len(edge_types)): etype = edge_types[i] a = torch.nonzero(sg.edata[dgl.ETYPE] == i).view(-1) e_h_mask_dict[etype] = e_h_mask[:, a].T return e_h_mask_dict return e_h_mask.T def accuracy(y_pred, y_true): y_true = y_true.squeeze().long() preds = y_pred.max(1)[1].type_as(y_true) correct = preds.eq(y_true).double() correct = correct.sum().item() return correct / len(y_true) def get_model(cfg): graph_path = cfg.graph_path index_path = cfg.index_path method = cfg.method data_hyparams = cfg.hyparams['data'] dataset = cfg.dataset ckpt_dir = cfg.ckpt_dir encoder_type = cfg.encoder_type num_classes = data_hyparams['num_classes'] target_ntype = data_hyparams['target_ntype'] n_layer = 2 gs, _ = dgl.load_graphs(graph_path) g = gs[0] if g.is_homogeneous: g = dgl.add_self_loop(g) in_dim = {n: g.nodes[n].data['nfeat'].shape[1] for n in g.ntypes} info = torch.load(index_path) if method == 'gat': model = gat.GAT(in_dim[target_ntype], 256, 64, [8, 1], num_classes) elif method == 'gcn': model = gcn.GCN(in_dim[target_ntype], 256, 64, num_classes) elif method == 'simplehgn': edge_type_num = len(g.etypes)
model = hgn.SimpleHeteroHGN(32, edge_type_num, in_dim, 32, num_classes, n_layer,
1
2023-12-12 02:46:00+00:00
4k
dvmazur/mixtral-offloading
src/custom_layers.py
[ { "identifier": "pack_4bit_u8_common", "path": "src/packing.py", "snippet": "def pack_4bit_u8_common(W_q: torch.Tensor):\n height = W_q.size(0)\n assert height % 2 == 0\n \n W_q = W_q.to(torch.uint8)\n p = (W_q[::2, ...] << 4) | (W_q[1::2, ...])\n\n return PackedTensor(p.to(torch.uint8))" }, { "identifier": "pack_2bit_u8_common", "path": "src/packing.py", "snippet": "def pack_2bit_u8_common(W_q: torch.Tensor):\n W_q = W_q.to(torch.uint8)\n height = W_q.size(0)\n p = (W_q[::4, ...] << 6) | (W_q[1::4, ...] << 4) | (W_q[2::4, ...] << 2) | (W_q[3::4, ...])\n\n return PackedTensor(p)" }, { "identifier": "unpack_4bit_u8_common", "path": "src/packing.py", "snippet": "def unpack_4bit_u8_common(W_q: torch.Tensor):\n height = W_q.size(0)\n W_q = W_q.to(torch.uint8)\n result = torch.empty([2 * height] + list(W_q.shape[1:]),\n dtype=torch.uint8, device=W_q.device)\n result[::2, ...] = (W_q >> 4)\n result[1::2, ...] = (W_q & 0b1111)\n\n return result" }, { "identifier": "unpack_2bit_u8_common", "path": "src/packing.py", "snippet": "def unpack_2bit_u8_common(W_q: torch.Tensor):\n W_q = W_q.to(torch.uint8)\n height = W_q.size(0)\n result = torch.empty([4 * height] + list(W_q.shape[1:]),\n dtype=torch.uint8, device=W_q.device)\n result[::4, ...] = (W_q >> 6) & 0b11\n result[1::4, ...] = (W_q >> 4) & 0b11\n result[2::4, ...] = (W_q >> 2) & 0b11\n result[3::4, ...] = W_q & 0b11\n\n return result" }, { "identifier": "triton_matmul4_transpose", "path": "src/triton_kernels.py", "snippet": "def triton_matmul4_transpose(groupsize: int, a: torch.FloatTensor, qweight: torch.IntTensor, scales: torch.FloatTensor, zeros: torch.FloatTensor, bias: Optional[torch.FloatTensor] = None) -> torch.FloatTensor:\n \"\"\"\n Compute the matrix multiplication C = A x B + bias.\n Where B is quantized using GPTQ and groupsize = -1 into 4-bit values.\n\n A is of shape (M, K) float16\n qweight is of shape (N//2, K) int32\n scales is of shape (G, K) float16\n zeros is of shape (G, K) float16\n bias is of shape (1, N) float16\n\n groupsize is the number of infeatures in each group.\n G = N // groupsize\n \n C = A @ qweight.T\n Returns C of shape (..., N) float16\n \"\"\"\n assert a.shape[-1] == (qweight.shape[1])\n assert a.is_contiguous(), \"A must be contiguous\"\n assert scales.shape[1] == zeros.shape[1]\n assert scales.shape[1] == qweight.shape[1]\n\n # Flatten a into (-1, K)\n x = a.view(-1, a.shape[-1])\n\n M, K = x.shape\n N = qweight.shape[0] * 2\n # This is based on the possible BLOCK_SIZE_Ks\n# assert K % 16 == 0 and K % 32 == 0 and K % 64 == 0 and K % 128 == 0, \"K must be a multiple of 16, 32, 64, and 128\"\n # This is based on the possible BLOCK_SIZE_Ns\n# assert N % 16 == 0 and N % 32 == 0 and N % 64 == 0 and N % 128 == 0 and N % 256 == 0, \"N must be a multiple of 16, 32, 64, 128, and 256\"\n # This is based on the possible BLOCK_SIZE_Ks\n# assert groupsize % 32 == 0 and groupsize % 64 == 0 and groupsize % 128 == 0, \"groupsize must be a multiple of 32, 64, and 128\"\n\n c = torch.empty((M, N), device='cuda', dtype=torch.float16)\n\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul4_kernel_transpose[grid](\n x, qweight, c,\n scales, zeros,\n M, N, K,\n x.stride(0), x.stride(1),\n qweight.stride(0), qweight.stride(1),\n c.stride(0), c.stride(1),\n scales.stride(0), scales.stride(1),\n zeros.stride(0), zeros.stride(1),\n groupsize, groupsize == N,\n )\n \n # Reshape c\n c = c.view(a.shape[:-1] + (N,)) # (..., N)\n\n # Add bias\n if bias is not None:\n c = c + bias\n\n return c" }, { "identifier": "triton_matmul3_transpose", "path": "src/triton_kernels.py", "snippet": "def triton_matmul3_transpose(groupsize: int, a: torch.FloatTensor, qweight: torch.IntTensor, scales: torch.FloatTensor, zeros: torch.FloatTensor, N: int, bias: Optional[torch.FloatTensor] = None) -> torch.FloatTensor:\n \"\"\"\n Compute the matrix multiplication C = A x B + bias.\n Where B is quantized using GPTQ and groupsize = -1 into 4-bit values.\n\n A is of shape (M, K) float16\n qweight is of shape (ceil(N / 10), K) int32\n scales is of shape (G, K) float16\n zeros is of shape (G, K) float16\n bias is of shape (1, N) float16\n\n groupsize is the number of infeatures in each group.\n G = N // groupsize\n \n C = A @ qweight.T\n Returns C of shape (..., N) float16\n \"\"\"\n \n assert a.shape[-1] == (qweight.shape[1])\n assert a.is_contiguous(), \"A must be contiguous\"\n assert scales.shape[1] == zeros.shape[1]\n assert scales.shape[1] == qweight.shape[1]\n\n # Flatten a into (-1, K)\n x = a.view(-1, a.shape[-1])\n\n M, K = x.shape\n assert 0 <= (qweight.shape[0] * 10 - N) < 10\n\n c = torch.empty((M, N), device='cuda', dtype=torch.float16)\n\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul3_kernel_transpose[grid](\n x, qweight, c,\n scales, zeros,\n M, N, K,\n x.stride(0), x.stride(1),\n qweight.stride(0), qweight.stride(1),\n c.stride(0), c.stride(1),\n scales.stride(0), scales.stride(1),\n zeros.stride(0), zeros.stride(1),\n groupsize, groupsize == N,\n )\n \n # Reshape c\n c = c.view(a.shape[:-1] + (N,)) # (..., N)\n\n # Add bias\n if bias is not None:\n c = c + bias\n\n return c" }, { "identifier": "triton_matmul2_transpose", "path": "src/triton_kernels.py", "snippet": "def triton_matmul2_transpose(groupsize: int, a: torch.FloatTensor, qweight: torch.IntTensor, scales: torch.FloatTensor, zeros: torch.FloatTensor, bias: Optional[torch.FloatTensor] = None) -> torch.FloatTensor:\n \"\"\"\n Compute the matrix multiplication C = A x B + bias.\n Where B is quantized using GPTQ and groupsize = -1 into 4-bit values.\n\n A is of shape (M, K) float16\n qweight is of shape (N // 4, K) int32\n scales is of shape (G, K) float16\n zeros is of shape (G, K) float16\n bias is of shape (1, N) float16\n\n groupsize is the number of infeatures in each group.\n G = N // groupsize\n \n C = A @ qweight.T\n Returns C of shape (..., N) float16\n \"\"\"\n \n assert a.shape[-1] == (qweight.shape[1])\n assert a.is_contiguous(), \"A must be contiguous\"\n assert scales.shape[1] == zeros.shape[1]\n assert scales.shape[1] == qweight.shape[1]\n\n # Flatten a into (-1, K)\n x = a.view(-1, a.shape[-1])\n\n M, K = x.shape\n N = qweight.shape[0] * 4\n # This is based on the possible BLOCK_SIZE_Ks\n# assert K % 16 == 0 and K % 32 == 0 and K % 64 == 0 and K % 128 == 0, \"K must be a multiple of 16, 32, 64, and 128\"\n # This is based on the possible BLOCK_SIZE_Ns\n# assert N % 16 == 0 and N % 32 == 0 and N % 64 == 0 and N % 128 == 0 and N % 256 == 0, \"N must be a multiple of 16, 32, 64, 128, and 256\"\n # This is based on the possible BLOCK_SIZE_Ks\n# assert groupsize % 32 == 0 and groupsize % 64 == 0 and groupsize % 128 == 0, \"groupsize must be a multiple of 32, 64, and 128\"\n\n c = torch.empty((M, N), device='cuda', dtype=torch.float16)\n\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul2_kernel_transpose[grid](\n x, qweight, c,\n scales, zeros,\n M, N, K,\n x.stride(0), x.stride(1),\n qweight.stride(0), qweight.stride(1),\n c.stride(0), c.stride(1),\n scales.stride(0), scales.stride(1),\n zeros.stride(0), zeros.stride(1),\n groupsize, groupsize == N,\n )\n \n # Reshape c\n c = c.view(a.shape[:-1] + (N,)) # (..., N)\n\n # Add bias\n if bias is not None:\n c = c + bias\n\n return c" } ]
import copy import functools import torch from transformers.models.mixtral.configuration_mixtral import MixtralConfig from transformers.activations import ACT2FN from typing import Dict, Any from hqq.core.quantize import HQQLinear, Quantizer from torch import nn from torch.nn import functional as F from .packing import pack_4bit_u8_common, pack_2bit_u8_common, unpack_4bit_u8_common, unpack_2bit_u8_common from .triton_kernels import triton_matmul4_transpose, triton_matmul3_transpose, triton_matmul2_transpose
3,384
class HQQLinearTritonSavable(HQQLinear): def __init__(self, layer, quant_config, meta=None, **kwargs): """ Example how to get meta: >>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config) >>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config) """ assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4] super().__init__(layer, quant_config, **kwargs) if not hasattr(self, 'meta'): assert meta is not None self.meta = copy.deepcopy(meta) self._register_state_dict_hook(self._add_to_state_dict_hook) self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook) def quantize(self, *args, **kwargs): super().quantize(*args, **kwargs) # repacking self.repack() def repack(self): if self.W_q.shape != self.meta['shape']: W_q = Quantizer.unpack[self.meta['packing']](self.W_q) sh = self.meta['shape'] W_q = W_q.reshape((-1,) + sh[1:]) W_q = W_q[:sh[0], ...] self.W_q = Quantizer.pack[self.meta['packing']](W_q) def forward(self, x): return self.forward_triton(x) def set_backend(self, backend): pass @torch.inference_mode() def forward_triton(self, x): assert self.ready, "model was not quantized" assert self.meta['axis'] == 0 W_q, meta = self.W_q, self.meta del_keys = [] if 'quant_scale' in meta and meta['quant_scale']: meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale') if 'quant_zero' in meta and meta['quant_zero']: meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero') K = meta['shape'][1] N = meta['shape'][0] if self.meta['nbits'] == 4: fn = triton_matmul4_transpose elif self.meta['nbits'] == 3: fn = functools.partial(triton_matmul3_transpose, N=N) elif self.meta['nbits'] == 2:
class HQQLinearTritonSavable(HQQLinear): def __init__(self, layer, quant_config, meta=None, **kwargs): """ Example how to get meta: >>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config) >>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config) """ assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4] super().__init__(layer, quant_config, **kwargs) if not hasattr(self, 'meta'): assert meta is not None self.meta = copy.deepcopy(meta) self._register_state_dict_hook(self._add_to_state_dict_hook) self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook) def quantize(self, *args, **kwargs): super().quantize(*args, **kwargs) # repacking self.repack() def repack(self): if self.W_q.shape != self.meta['shape']: W_q = Quantizer.unpack[self.meta['packing']](self.W_q) sh = self.meta['shape'] W_q = W_q.reshape((-1,) + sh[1:]) W_q = W_q[:sh[0], ...] self.W_q = Quantizer.pack[self.meta['packing']](W_q) def forward(self, x): return self.forward_triton(x) def set_backend(self, backend): pass @torch.inference_mode() def forward_triton(self, x): assert self.ready, "model was not quantized" assert self.meta['axis'] == 0 W_q, meta = self.W_q, self.meta del_keys = [] if 'quant_scale' in meta and meta['quant_scale']: meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale') if 'quant_zero' in meta and meta['quant_zero']: meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero') K = meta['shape'][1] N = meta['shape'][0] if self.meta['nbits'] == 4: fn = triton_matmul4_transpose elif self.meta['nbits'] == 3: fn = functools.partial(triton_matmul3_transpose, N=N) elif self.meta['nbits'] == 2:
fn = triton_matmul2_transpose
6
2023-12-15 03:32:35+00:00
4k
CircleRadon/Osprey
osprey/model/osprey_arch.py
[ { "identifier": "build_vision_tower", "path": "osprey/model/multimodal_encoder/builder.py", "snippet": "def build_vision_tower(vision_tower_cfg, delay_load=False):\n\n return CLIPVisionTower(args=vision_tower_cfg)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')" }, { "identifier": "build_vision_projector", "path": "osprey/model/multimodal_projector/builder.py", "snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n mm_hidden_size = getattr(config, 'mm_hidden_size', 768)\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')" }, { "identifier": "IGNORE_INDEX", "path": "osprey/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "osprey/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_PATCH_TOKEN", "path": "osprey/constants.py", "snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "osprey/constants.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "osprey/constants.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" } ]
from abc import ABC, abstractmethod from .multimodal_encoder.builder import build_vision_tower from .multimodal_projector.builder import build_vision_projector from osprey.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN import torch import torch.nn as nn
3,374
cur_image_features = image_features[cur_image_idx] image_token_start = image_token_indices[0] if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach()) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start])) cur_new_input_embeds.append(cur_image_features) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2])) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_new_labels.append(cur_labels[image_token_start:image_token_start+1]) cur_labels = cur_labels[image_token_start+2:] else: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) cur_new_input_embeds.append(cur_image_features) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_labels = cur_labels[image_token_start+1:] cur_image_idx += 1 if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): cur_input_ids = cur_input_ids[image_token_start+2:] else: cur_input_ids = cur_input_ids[image_token_start+1:] image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] if cur_input_ids.numel() > 0: if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0]) _l = 0 for i, idx in enumerate(mask_idx): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:idx[0]]).detach()) ## mask cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].detach()) ## pos cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].detach()) if labels is not None: cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype) _l = idx[0]+2 if _l< len(cur_input_ids): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:]).detach()) else: mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0]) assert len(mask_idx) == len(mask_feats[batch_idx]), "mask num not equal to mask feats" _l = 0 for i, idx in enumerate(mask_idx): cur_raw_new_input_embeds = self.get_model().embed_tokens(cur_input_ids[_l:idx[0]]) cur_new_input_embeds.append(cur_raw_new_input_embeds) ## mask cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype)) ## pos cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype)) if labels is not None: cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype) _l = idx[0]+2 if _l< len(cur_input_ids): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:])) if labels is not None: cur_new_labels.append(cur_labels) cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) new_input_embeds.append(cur_new_input_embeds) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): max_len = max(x.shape[0] for x in new_input_embeds) new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: new_input_embeds = torch.stack(new_input_embeds, dim=0) if labels is not None: new_labels = torch.stack(new_labels, dim=0) if attention_mask is not None: new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) assert attention_mask.shape == new_input_embeds.shape[:2] return None, attention_mask, past_key_values, new_input_embeds, new_labels def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.resize_token_embeddings(len(tokenizer)) mask_tokens = ['<mask>', '<pos>'] num_new_tokens = tokenizer.add_tokens(mask_tokens, special_tokens=True) if model_args.mm_use_im_start_end:
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class OspreyMetaModel: def __init__(self, config): super(OspreyMetaModel, self).__init__(config) if hasattr(config, "mm_vision_tower"): self.vision_tower = build_vision_tower(config, delay_load=False) self.mm_projector = build_vision_projector(config) def get_vision_tower(self): vision_tower = getattr(self, 'vision_tower', None) if type(vision_tower) is list: vision_tower = vision_tower[0] return vision_tower def initialize_vision_modules(self, model_args, fsdp=None): vision_tower = model_args.vision_tower pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter if not hasattr(self.config, "mm_vision_tower"): self.config.mm_vision_tower = vision_tower vision_tower = build_vision_tower(model_args) if fsdp is not None and len(fsdp) > 0: self.vision_tower = [self.vision_tower] else: self.vision_tower = vision_tower self.config.use_mm_proj = True self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') self.mm_projector = build_vision_projector(self.config) if pretrain_mm_mlp_adapter is not None: print("***********load projector_weights********") mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') def get_w(weights, keyword): return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) class OspreyMetaForCausalLM(ABC): def __init__(self): super(OspreyMetaForCausalLM, self).__init__() @abstractmethod def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_images(self, images): image_features, image_features_dict = self.get_model().get_vision_tower()(images) self.get_model().mm_projector.to(device=image_features.device, dtype=image_features.dtype) image_features = self.get_model().mm_projector(image_features) return image_features, image_features_dict def prepare_inputs_labels_for_multimodal( self, input_ids, masks, attention_mask, past_key_values, labels, images ): vision_tower = self.get_vision_tower() if vision_tower is None or images is None or input_ids.shape[1] == 1: if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) return input_ids, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features, image_features_dict = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1).to(concat_images.device) for x in image_features] else: image_features, image_features_dict = self.encode_images(images) mask_feats, pos_feats = self.mask_extractor(image_features_dict, masks) new_input_embeds = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # multimodal LLM, but the current sample is not multimodal # FIXME: this is a hacky fix, for deepspeed zero3 to work half_len = cur_input_ids.shape[0] // 2 cur_image_features = image_features[cur_image_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0) new_input_embeds.append(cur_input_embeds) if labels is not None: new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] cur_new_input_embeds = [] if labels is not None: cur_labels = labels[batch_idx] cur_new_labels = [] assert cur_labels.shape == cur_input_ids.shape while image_token_indices.numel() > 0: cur_image_features = image_features[cur_image_idx] image_token_start = image_token_indices[0] if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach()) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start])) cur_new_input_embeds.append(cur_image_features) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2])) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_new_labels.append(cur_labels[image_token_start:image_token_start+1]) cur_labels = cur_labels[image_token_start+2:] else: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) cur_new_input_embeds.append(cur_image_features) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_labels = cur_labels[image_token_start+1:] cur_image_idx += 1 if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): cur_input_ids = cur_input_ids[image_token_start+2:] else: cur_input_ids = cur_input_ids[image_token_start+1:] image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] if cur_input_ids.numel() > 0: if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0]) _l = 0 for i, idx in enumerate(mask_idx): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:idx[0]]).detach()) ## mask cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].detach()) ## pos cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].detach()) if labels is not None: cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype) _l = idx[0]+2 if _l< len(cur_input_ids): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:]).detach()) else: mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0]) assert len(mask_idx) == len(mask_feats[batch_idx]), "mask num not equal to mask feats" _l = 0 for i, idx in enumerate(mask_idx): cur_raw_new_input_embeds = self.get_model().embed_tokens(cur_input_ids[_l:idx[0]]) cur_new_input_embeds.append(cur_raw_new_input_embeds) ## mask cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype)) ## pos cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype)) if labels is not None: cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype) _l = idx[0]+2 if _l< len(cur_input_ids): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:])) if labels is not None: cur_new_labels.append(cur_labels) cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) new_input_embeds.append(cur_new_input_embeds) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): max_len = max(x.shape[0] for x in new_input_embeds) new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: new_input_embeds = torch.stack(new_input_embeds, dim=0) if labels is not None: new_labels = torch.stack(new_labels, dim=0) if attention_mask is not None: new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) assert attention_mask.shape == new_input_embeds.shape[:2] return None, attention_mask, past_key_values, new_input_embeds, new_labels def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.resize_token_embeddings(len(tokenizer)) mask_tokens = ['<mask>', '<pos>'] num_new_tokens = tokenizer.add_tokens(mask_tokens, special_tokens=True) if model_args.mm_use_im_start_end:
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
6
2023-12-17 16:21:45+00:00
4k
3DTopia/OpenLRM
lrm/inferrer.py
[ { "identifier": "LRMGenerator", "path": "lrm/models/generator.py", "snippet": "class LRMGenerator(nn.Module):\n \"\"\"\n Full model of the large reconstruction model.\n \"\"\"\n def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,\n transformer_dim: int, transformer_layers: int, transformer_heads: int,\n triplane_low_res: int, triplane_high_res: int, triplane_dim: int,\n encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):\n super().__init__()\n \n # attributes\n self.encoder_feat_dim = encoder_feat_dim\n self.camera_embed_dim = camera_embed_dim\n\n # modules\n self.encoder = DinoWrapper(\n model_name=encoder_model_name,\n freeze=encoder_freeze,\n )\n self.camera_embedder = CameraEmbedder(\n raw_dim=12+4, embed_dim=camera_embed_dim,\n )\n self.transformer = TriplaneTransformer(\n inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,\n image_feat_dim=encoder_feat_dim,\n camera_embed_dim=camera_embed_dim,\n triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,\n )\n self.synthesizer = TriplaneSynthesizer(\n triplane_dim=triplane_dim, samples_per_ray=rendering_samples_per_ray,\n )\n\n def forward_planes(self, image, camera):\n # image: [N, C_img, H_img, W_img]\n # camera: [N, D_cam_raw]\n assert image.shape[0] == camera.shape[0], \"Batch size mismatch for image and camera\"\n N = image.shape[0]\n\n # encode image\n image_feats = self.encoder(image)\n assert image_feats.shape[-1] == self.encoder_feat_dim, \\\n f\"Feature dimension mismatch: {image_feats.shape[-1]} vs {self.encoder_feat_dim}\"\n\n # embed camera\n camera_embeddings = self.camera_embedder(camera)\n assert camera_embeddings.shape[-1] == self.camera_embed_dim, \\\n f\"Feature dimension mismatch: {camera_embeddings.shape[-1]} vs {self.camera_embed_dim}\"\n\n # transformer generating planes\n planes = self.transformer(image_feats, camera_embeddings)\n assert planes.shape[0] == N, \"Batch size mismatch for planes\"\n assert planes.shape[1] == 3, \"Planes should have 3 channels\"\n\n return planes\n\n def forward(self, image, source_camera, render_cameras, render_size: int):\n # image: [N, C_img, H_img, W_img]\n # source_camera: [N, D_cam_raw]\n # render_cameras: [N, M, D_cam_render]\n # render_size: int\n assert image.shape[0] == source_camera.shape[0], \"Batch size mismatch for image and source_camera\"\n assert image.shape[0] == render_cameras.shape[0], \"Batch size mismatch for image and render_cameras\"\n N, M = render_cameras.shape[:2]\n\n planes = self.forward_planes(image, source_camera)\n\n # render target views\n render_results = self.synthesizer(planes, render_cameras, render_size)\n assert render_results['images_rgb'].shape[0] == N, \"Batch size mismatch for render_results\"\n assert render_results['images_rgb'].shape[1] == M, \"Number of rendered views should be consistent with render_cameras\"\n\n return {\n 'planes': planes,\n **render_results,\n }" }, { "identifier": "build_camera_principle", "path": "lrm/cam_utils.py", "snippet": "def build_camera_principle(RT: torch.Tensor, intrinsics: torch.Tensor):\n \"\"\"\n RT: (N, 3, 4)\n intrinsics: (N, 3, 2), [[fx, fy], [cx, cy], [width, height]]\n \"\"\"\n fx, fy, cx, cy = get_normalized_camera_intrinsics(intrinsics)\n return torch.cat([\n RT.reshape(-1, 12),\n fx.unsqueeze(-1), fy.unsqueeze(-1), cx.unsqueeze(-1), cy.unsqueeze(-1),\n ], dim=-1)" }, { "identifier": "build_camera_standard", "path": "lrm/cam_utils.py", "snippet": "def build_camera_standard(RT: torch.Tensor, intrinsics: torch.Tensor):\n \"\"\"\n RT: (N, 3, 4)\n intrinsics: (N, 3, 2), [[fx, fy], [cx, cy], [width, height]]\n \"\"\"\n E = compose_extrinsic_RT(RT)\n fx, fy, cx, cy = get_normalized_camera_intrinsics(intrinsics)\n I = torch.stack([\n torch.stack([fx, torch.zeros_like(fx), cx], dim=-1),\n torch.stack([torch.zeros_like(fy), fy, cy], dim=-1),\n torch.tensor([[0, 0, 1]], dtype=torch.float32, device=RT.device).repeat(RT.shape[0], 1),\n ], dim=1)\n return torch.cat([\n E.reshape(-1, 16),\n I.reshape(-1, 9),\n ], dim=-1)" }, { "identifier": "center_looking_at_camera_pose", "path": "lrm/cam_utils.py", "snippet": "def center_looking_at_camera_pose(camera_position: torch.Tensor, look_at: torch.Tensor = None, up_world: torch.Tensor = None):\n \"\"\"\n camera_position: (M, 3)\n look_at: (3)\n up_world: (3)\n return: (M, 3, 4)\n \"\"\"\n # by default, looking at the origin and world up is pos-z\n if look_at is None:\n look_at = torch.tensor([0, 0, 0], dtype=torch.float32)\n if up_world is None:\n up_world = torch.tensor([0, 0, 1], dtype=torch.float32)\n look_at = look_at.unsqueeze(0).repeat(camera_position.shape[0], 1)\n up_world = up_world.unsqueeze(0).repeat(camera_position.shape[0], 1)\n\n z_axis = camera_position - look_at\n z_axis = z_axis / z_axis.norm(dim=-1, keepdim=True)\n x_axis = torch.cross(up_world, z_axis)\n x_axis = x_axis / x_axis.norm(dim=-1, keepdim=True)\n y_axis = torch.cross(z_axis, x_axis)\n y_axis = y_axis / y_axis.norm(dim=-1, keepdim=True)\n extrinsics = torch.stack([x_axis, y_axis, z_axis, camera_position], dim=-1)\n return extrinsics" } ]
import torch import math import os import imageio import mcubes import trimesh import numpy as np import argparse from PIL import Image from .models.generator import LRMGenerator from .cam_utils import build_camera_principle, build_camera_standard, center_looking_at_camera_pose from huggingface_hub import hf_hub_download
2,703
# Copyright (c) 2023, Zexin He # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LRMInferrer: def __init__(self, model_name: str): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') _checkpoint = self._load_checkpoint(model_name) _model_weights, _model_kwargs = _checkpoint['weights'], _checkpoint['kwargs']['model'] self.model = self._build_model(_model_kwargs, _model_weights).eval() self.infer_kwargs = _checkpoint['kwargs']['infer'] def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def _load_checkpoint(self, model_name: str, cache_dir = './.cache'): # download checkpoint if not exists local_dir = os.path.join(cache_dir, model_name) if not os.path.exists(local_dir): os.makedirs(local_dir, exist_ok=True) if not os.path.exists(os.path.join(local_dir, f'model.pth')): # os.system(f'wget -O {os.path.join(cache_dir, f"{model_name}.pth")} https://zxhezexin.com/modelzoo/openlrm/{model_name}.pth') # raise FileNotFoundError(f"Checkpoint {model_name} not found in {cache_dir}") repo_id = f'zxhezexin/{model_name}' config_path = hf_hub_download(repo_id=repo_id, filename='config.json', local_dir=local_dir) model_path = hf_hub_download(repo_id=repo_id, filename=f'model.pth', local_dir=local_dir) else: model_path = os.path.join(local_dir, f'model.pth') checkpoint = torch.load(model_path, map_location=self.device) return checkpoint def _build_model(self, model_kwargs, model_weights): model = LRMGenerator(**model_kwargs).to(self.device) model.load_state_dict(model_weights) print(f"======== Loaded model from checkpoint ========") return model @staticmethod def _get_surrounding_views(M: int = 160, radius: float = 2.0, height: float = 0.8): # M: number of surrounding views # radius: camera dist to center # height: height of the camera # return: (M, 3, 4) assert M > 0 assert radius > 0 camera_positions = [] projected_radius = math.sqrt(radius ** 2 - height ** 2) for i in range(M): theta = 2 * math.pi * i / M - math.pi / 2 x = projected_radius * math.cos(theta) y = projected_radius * math.sin(theta) z = height camera_positions.append([x, y, z]) camera_positions = torch.tensor(camera_positions, dtype=torch.float32) extrinsics = center_looking_at_camera_pose(camera_positions) return extrinsics @staticmethod def _default_intrinsics(): # return: (3, 2) fx = fy = 384 cx = cy = 256 w = h = 512 intrinsics = torch.tensor([ [fx, fy], [cx, cy], [w, h], ], dtype=torch.float32) return intrinsics def _default_source_camera(self, batch_size: int = 1): # return: (N, D_cam_raw) dist_to_center = 2 canonical_camera_extrinsics = torch.tensor([[ [1, 0, 0, 0], [0, 0, -1, -dist_to_center], [0, 1, 0, 0], ]], dtype=torch.float32) canonical_camera_intrinsics = self._default_intrinsics().unsqueeze(0) source_camera = build_camera_principle(canonical_camera_extrinsics, canonical_camera_intrinsics) return source_camera.repeat(batch_size, 1) def _default_render_cameras(self, batch_size: int = 1): # return: (N, M, D_cam_render) render_camera_extrinsics = self._get_surrounding_views() render_camera_intrinsics = self._default_intrinsics().unsqueeze(0).repeat(render_camera_extrinsics.shape[0], 1, 1)
# Copyright (c) 2023, Zexin He # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LRMInferrer: def __init__(self, model_name: str): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') _checkpoint = self._load_checkpoint(model_name) _model_weights, _model_kwargs = _checkpoint['weights'], _checkpoint['kwargs']['model'] self.model = self._build_model(_model_kwargs, _model_weights).eval() self.infer_kwargs = _checkpoint['kwargs']['infer'] def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def _load_checkpoint(self, model_name: str, cache_dir = './.cache'): # download checkpoint if not exists local_dir = os.path.join(cache_dir, model_name) if not os.path.exists(local_dir): os.makedirs(local_dir, exist_ok=True) if not os.path.exists(os.path.join(local_dir, f'model.pth')): # os.system(f'wget -O {os.path.join(cache_dir, f"{model_name}.pth")} https://zxhezexin.com/modelzoo/openlrm/{model_name}.pth') # raise FileNotFoundError(f"Checkpoint {model_name} not found in {cache_dir}") repo_id = f'zxhezexin/{model_name}' config_path = hf_hub_download(repo_id=repo_id, filename='config.json', local_dir=local_dir) model_path = hf_hub_download(repo_id=repo_id, filename=f'model.pth', local_dir=local_dir) else: model_path = os.path.join(local_dir, f'model.pth') checkpoint = torch.load(model_path, map_location=self.device) return checkpoint def _build_model(self, model_kwargs, model_weights): model = LRMGenerator(**model_kwargs).to(self.device) model.load_state_dict(model_weights) print(f"======== Loaded model from checkpoint ========") return model @staticmethod def _get_surrounding_views(M: int = 160, radius: float = 2.0, height: float = 0.8): # M: number of surrounding views # radius: camera dist to center # height: height of the camera # return: (M, 3, 4) assert M > 0 assert radius > 0 camera_positions = [] projected_radius = math.sqrt(radius ** 2 - height ** 2) for i in range(M): theta = 2 * math.pi * i / M - math.pi / 2 x = projected_radius * math.cos(theta) y = projected_radius * math.sin(theta) z = height camera_positions.append([x, y, z]) camera_positions = torch.tensor(camera_positions, dtype=torch.float32) extrinsics = center_looking_at_camera_pose(camera_positions) return extrinsics @staticmethod def _default_intrinsics(): # return: (3, 2) fx = fy = 384 cx = cy = 256 w = h = 512 intrinsics = torch.tensor([ [fx, fy], [cx, cy], [w, h], ], dtype=torch.float32) return intrinsics def _default_source_camera(self, batch_size: int = 1): # return: (N, D_cam_raw) dist_to_center = 2 canonical_camera_extrinsics = torch.tensor([[ [1, 0, 0, 0], [0, 0, -1, -dist_to_center], [0, 1, 0, 0], ]], dtype=torch.float32) canonical_camera_intrinsics = self._default_intrinsics().unsqueeze(0) source_camera = build_camera_principle(canonical_camera_extrinsics, canonical_camera_intrinsics) return source_camera.repeat(batch_size, 1) def _default_render_cameras(self, batch_size: int = 1): # return: (N, M, D_cam_render) render_camera_extrinsics = self._get_surrounding_views() render_camera_intrinsics = self._default_intrinsics().unsqueeze(0).repeat(render_camera_extrinsics.shape[0], 1, 1)
render_cameras = build_camera_standard(render_camera_extrinsics, render_camera_intrinsics)
2
2023-12-20 10:52:01+00:00
4k
xinghaochen/TinySAM
tinysam/predictor.py
[ { "identifier": "Sam", "path": "tinysam/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "ResizeLongestSide", "path": "tinysam/utils/transforms.py", "snippet": "class ResizeLongestSide:\n \"\"\"\n Resizes images to the longest side 'target_length', as well as provides\n methods for resizing coordinates and boxes. Provides methods for\n transforming both numpy array and batched torch tensors.\n \"\"\"\n\n def __init__(self, target_length: int) -> None:\n self.target_length = target_length\n\n def apply_image(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Expects a numpy array with shape HxWxC in uint8 format.\n \"\"\"\n target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)\n return np.array(resize(to_pil_image(image), target_size))\n\n def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:\n \"\"\"\n Expects a numpy array of length 2 in the final dimension. Requires the\n original image size in (H, W) format.\n \"\"\"\n old_h, old_w = original_size\n new_h, new_w = self.get_preprocess_shape(\n original_size[0], original_size[1], self.target_length\n )\n coords = deepcopy(coords).astype(float)\n coords[..., 0] = coords[..., 0] * (new_w / old_w)\n coords[..., 1] = coords[..., 1] * (new_h / old_h)\n return coords\n\n def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:\n \"\"\"\n Expects a numpy array shape Bx4. Requires the original image size\n in (H, W) format.\n \"\"\"\n boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)\n return boxes.reshape(-1, 4)\n\n def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Expects batched images with shape BxCxHxW and float format. This\n transformation may not exactly match apply_image. apply_image is\n the transformation expected by the model.\n \"\"\"\n # Expects an image in BCHW format. May not exactly match apply_image.\n target_size = self.get_preprocess_shape(image.shape[2], image.shape[3], self.target_length)\n return F.interpolate(\n image, target_size, mode=\"bilinear\", align_corners=False, antialias=True\n )\n\n def apply_coords_torch(\n self, coords: torch.Tensor, original_size: Tuple[int, ...]\n ) -> torch.Tensor:\n \"\"\"\n Expects a torch tensor with length 2 in the last dimension. Requires the\n original image size in (H, W) format.\n \"\"\"\n old_h, old_w = original_size\n new_h, new_w = self.get_preprocess_shape(\n original_size[0], original_size[1], self.target_length\n )\n coords = deepcopy(coords).to(torch.float)\n coords[..., 0] = coords[..., 0] * (new_w / old_w)\n coords[..., 1] = coords[..., 1] * (new_h / old_h)\n return coords\n\n def apply_boxes_torch(\n self, boxes: torch.Tensor, original_size: Tuple[int, ...]\n ) -> torch.Tensor:\n \"\"\"\n Expects a torch tensor with shape Bx4. Requires the original image\n size in (H, W) format.\n \"\"\"\n boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)\n return boxes.reshape(-1, 4)\n\n @staticmethod\n def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:\n \"\"\"\n Compute the output size given input size and target long side length.\n \"\"\"\n scale = long_side_length * 1.0 / max(oldh, oldw)\n newh, neww = oldh * scale, oldw * scale\n neww = int(neww + 0.5)\n newh = int(newh + 0.5)\n return (newh, neww)" } ]
import numpy as np import torch from .modeling import Sam from typing import Optional, Tuple from .utils.transforms import ResizeLongestSide
2,718
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamPredictor: def __init__( self,
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamPredictor: def __init__( self,
sam_model: Sam,
0
2023-12-19 11:25:54+00:00
4k
VikParuchuri/texify
benchmark.py
[ { "identifier": "batch_inference", "path": "texify/inference.py", "snippet": "def batch_inference(images, model, processor, temperature=settings.TEMPERATURE, max_tokens=settings.MAX_TOKENS):\n images = [image.convert(\"RGB\") for image in images]\n encodings = processor(images=images, return_tensors=\"pt\", add_special_tokens=False)\n pixel_values = encodings[\"pixel_values\"].to(model.dtype)\n pixel_values = pixel_values.to(model.device)\n\n additional_kwargs = {}\n if temperature > 0:\n additional_kwargs[\"temperature\"] = temperature\n additional_kwargs[\"do_sample\"] = True\n additional_kwargs[\"top_p\"] = 0.95\n\n generated_ids = model.generate(\n pixel_values=pixel_values,\n max_new_tokens=max_tokens,\n decoder_start_token_id=processor.tokenizer.bos_token_id,\n **additional_kwargs,\n )\n\n generated_text = processor.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)\n generated_text = [postprocess(text) for text in generated_text]\n return generated_text" }, { "identifier": "load_model", "path": "texify/model/model.py", "snippet": "def load_model(checkpoint=settings.MODEL_CHECKPOINT, device=settings.TORCH_DEVICE_MODEL, dtype=settings.MODEL_DTYPE):\n config = get_config(checkpoint)\n AutoModel.register(VariableDonutSwinConfig, VariableDonutSwinModel)\n\n model = VisionEncoderDecoderModel.from_pretrained(checkpoint, config=config, torch_dtype=dtype)\n model = model.to(device)\n model = model.eval()\n print(f\"Loaded texify model to {device} with {dtype} dtype\")\n return model" }, { "identifier": "load_processor", "path": "texify/model/processor.py", "snippet": "def load_processor():\n AutoImageProcessor.register(VariableDonutSwinConfig, VariableDonutImageProcessor)\n processor = VariableDonutProcessor.from_pretrained(settings.MODEL_CHECKPOINT)\n processor.image_processor.max_size = settings.MAX_IMAGE_SIZE\n processor.image_processor.size = [settings.MAX_IMAGE_SIZE[\"height\"], settings.MAX_IMAGE_SIZE[\"width\"]]\n processor.image_processor.image_mean = IMAGE_MEAN\n processor.image_processor.image_std = IMAGE_STD\n processor.image_processor.train = False\n\n processor.tokenizer.model_max_length = settings.MAX_TOKENS\n processor.train = False\n return processor" }, { "identifier": "settings", "path": "texify/settings.py", "snippet": "class Settings(BaseSettings):\n class Config:\n TORCH_DEVICE: Optional[str] = None\n MAX_TOKENS: int = 384 # Will not work well above 768, since it was not trained with more\n MAX_IMAGE_SIZE: Dict = {\"height\": 420, \"width\": 420}\n MODEL_CHECKPOINT: str = \"vikp/texify\"\n BATCH_SIZE: int = 16 # Should use ~5GB of RAM\n DATA_DIR: str = \"data\"\n TEMPERATURE: float = 0.0 # Temperature for generation, 0.0 means greedy\n def TORCH_DEVICE_MODEL(self) -> str:\n def CUDA(self) -> bool:\n def MODEL_DTYPE(self) -> torch.dtype:" } ]
import argparse import os.path import random import time import evaluate import json import base64 import io import torch from functools import partial from tabulate import tabulate from tqdm import tqdm from texify.inference import batch_inference from texify.model.model import load_model from texify.model.processor import load_processor from PIL import Image from texify.settings import settings from rapidfuzz.distance import Levenshtein from pix2tex.cli import LatexOCR from nougat.postprocessing import markdown_compatible from nougat.utils.checkpoint import get_checkpoint from nougat.utils.dataset import ImageDataset from nougat.utils.device import move_to_device from nougat import NougatModel
1,827
text = text.replace("$", "") text = text.replace("\[", "") text = text.replace("\]", "") text = text.replace("\(", "") text = text.replace("\)", "") text = text.strip() return text def score_text(predictions, references): bleu = evaluate.load("bleu") bleu_results = bleu.compute(predictions=predictions, references=references) meteor = evaluate.load('meteor') meteor_results = meteor.compute(predictions=predictions, references=references) lev_dist = [] for p, r in zip(predictions, references): lev_dist.append(Levenshtein.normalized_distance(p, r)) return { 'bleu': bleu_results["bleu"], 'meteor': meteor_results['meteor'], 'edit': sum(lev_dist) / len(lev_dist) } def image_to_pil(image): decoded = base64.b64decode(image) return Image.open(io.BytesIO(decoded)) def load_images(source_data): images = [sd["image"] for sd in source_data] images = [image_to_pil(image) for image in images] return images def inference_texify(source_data, model, processor): images = load_images(source_data) write_data = [] for i in tqdm(range(0, len(images), settings.BATCH_SIZE), desc="Texify inference"): batch = images[i:i+settings.BATCH_SIZE] text = batch_inference(batch, model, processor) for j, t in enumerate(text): eq_idx = i + j write_data.append({"text": t, "equation": source_data[eq_idx]["equation"]}) return write_data def inference_pix2tex(source_data): model = LatexOCR() images = load_images(source_data) write_data = [] for i in tqdm(range(len(images)), desc="Pix2tex inference"): try: text = model(images[i]) except ValueError: # Happens when resize fails text = "" write_data.append({"text": text, "equation": source_data[i]["equation"]}) return write_data def image_to_bmp(image): img_out = io.BytesIO() image.save(img_out, format="BMP") return img_out def inference_nougat(source_data, batch_size=1): # Load images, then convert to bmp format for nougat images = load_images(source_data) images = [image_to_bmp(image) for image in images] predictions = [] ckpt = get_checkpoint(None, model_tag="0.1.0-small") model = NougatModel.from_pretrained(ckpt) if settings.TORCH_DEVICE_MODEL != "cpu": move_to_device(model, bf16=settings.CUDA, cuda=settings.CUDA) model.eval() dataset = ImageDataset( images, partial(model.encoder.prepare_input, random_padding=False), ) # Batch sizes higher than 1 explode memory usage on CPU/MPS dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, pin_memory=True, shuffle=False, ) for idx, sample in tqdm(enumerate(dataloader), desc="Nougat inference", total=len(dataloader)): model.config.max_length = settings.MAX_TOKENS model_output = model.inference(image_tensors=sample, early_stopping=False) output = [markdown_compatible(o) for o in model_output["predictions"]] predictions.extend(output) return predictions def main(): parser = argparse.ArgumentParser(description="Benchmark the performance of texify.") parser.add_argument("--data_path", type=str, help="Path to JSON file with source images/equations", default=os.path.join(settings.DATA_DIR, "bench_data.json")) parser.add_argument("--result_path", type=str, help="Path to JSON file to save results to.", default=os.path.join(settings.DATA_DIR, "bench_results.json")) parser.add_argument("--max", type=int, help="Maximum number of images to benchmark.", default=None) parser.add_argument("--pix2tex", action="store_true", help="Run pix2tex scoring", default=False) parser.add_argument("--nougat", action="store_true", help="Run nougat scoring", default=False) args = parser.parse_args() source_path = os.path.abspath(args.data_path) result_path = os.path.abspath(args.result_path) os.makedirs(os.path.dirname(result_path), exist_ok=True)
def normalize_text(text): # Replace fences text = text.replace("$", "") text = text.replace("\[", "") text = text.replace("\]", "") text = text.replace("\(", "") text = text.replace("\)", "") text = text.strip() return text def score_text(predictions, references): bleu = evaluate.load("bleu") bleu_results = bleu.compute(predictions=predictions, references=references) meteor = evaluate.load('meteor') meteor_results = meteor.compute(predictions=predictions, references=references) lev_dist = [] for p, r in zip(predictions, references): lev_dist.append(Levenshtein.normalized_distance(p, r)) return { 'bleu': bleu_results["bleu"], 'meteor': meteor_results['meteor'], 'edit': sum(lev_dist) / len(lev_dist) } def image_to_pil(image): decoded = base64.b64decode(image) return Image.open(io.BytesIO(decoded)) def load_images(source_data): images = [sd["image"] for sd in source_data] images = [image_to_pil(image) for image in images] return images def inference_texify(source_data, model, processor): images = load_images(source_data) write_data = [] for i in tqdm(range(0, len(images), settings.BATCH_SIZE), desc="Texify inference"): batch = images[i:i+settings.BATCH_SIZE] text = batch_inference(batch, model, processor) for j, t in enumerate(text): eq_idx = i + j write_data.append({"text": t, "equation": source_data[eq_idx]["equation"]}) return write_data def inference_pix2tex(source_data): model = LatexOCR() images = load_images(source_data) write_data = [] for i in tqdm(range(len(images)), desc="Pix2tex inference"): try: text = model(images[i]) except ValueError: # Happens when resize fails text = "" write_data.append({"text": text, "equation": source_data[i]["equation"]}) return write_data def image_to_bmp(image): img_out = io.BytesIO() image.save(img_out, format="BMP") return img_out def inference_nougat(source_data, batch_size=1): # Load images, then convert to bmp format for nougat images = load_images(source_data) images = [image_to_bmp(image) for image in images] predictions = [] ckpt = get_checkpoint(None, model_tag="0.1.0-small") model = NougatModel.from_pretrained(ckpt) if settings.TORCH_DEVICE_MODEL != "cpu": move_to_device(model, bf16=settings.CUDA, cuda=settings.CUDA) model.eval() dataset = ImageDataset( images, partial(model.encoder.prepare_input, random_padding=False), ) # Batch sizes higher than 1 explode memory usage on CPU/MPS dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, pin_memory=True, shuffle=False, ) for idx, sample in tqdm(enumerate(dataloader), desc="Nougat inference", total=len(dataloader)): model.config.max_length = settings.MAX_TOKENS model_output = model.inference(image_tensors=sample, early_stopping=False) output = [markdown_compatible(o) for o in model_output["predictions"]] predictions.extend(output) return predictions def main(): parser = argparse.ArgumentParser(description="Benchmark the performance of texify.") parser.add_argument("--data_path", type=str, help="Path to JSON file with source images/equations", default=os.path.join(settings.DATA_DIR, "bench_data.json")) parser.add_argument("--result_path", type=str, help="Path to JSON file to save results to.", default=os.path.join(settings.DATA_DIR, "bench_results.json")) parser.add_argument("--max", type=int, help="Maximum number of images to benchmark.", default=None) parser.add_argument("--pix2tex", action="store_true", help="Run pix2tex scoring", default=False) parser.add_argument("--nougat", action="store_true", help="Run nougat scoring", default=False) args = parser.parse_args() source_path = os.path.abspath(args.data_path) result_path = os.path.abspath(args.result_path) os.makedirs(os.path.dirname(result_path), exist_ok=True)
model = load_model()
1
2023-12-18 22:59:58+00:00
4k
dcharatan/pixelsplat
src/model/decoder/cuda_splatting.py
[ { "identifier": "get_fov", "path": "src/geometry/projection.py", "snippet": "def get_fov(intrinsics: Float[Tensor, \"batch 3 3\"]) -> Float[Tensor, \"batch 2\"]:\n intrinsics_inv = intrinsics.inverse()\n\n def process_vector(vector):\n vector = torch.tensor(vector, dtype=torch.float32, device=intrinsics.device)\n vector = einsum(intrinsics_inv, vector, \"b i j, j -> b i\")\n return vector / vector.norm(dim=-1, keepdim=True)\n\n left = process_vector([0, 0.5, 1])\n right = process_vector([1, 0.5, 1])\n top = process_vector([0.5, 0, 1])\n bottom = process_vector([0.5, 1, 1])\n fov_x = (left * right).sum(dim=-1).acos()\n fov_y = (top * bottom).sum(dim=-1).acos()\n return torch.stack((fov_x, fov_y), dim=-1)" }, { "identifier": "homogenize_points", "path": "src/geometry/projection.py", "snippet": "def homogenize_points(\n points: Float[Tensor, \"*batch dim\"],\n) -> Float[Tensor, \"*batch dim+1\"]:\n \"\"\"Convert batched points (xyz) to (xyz1).\"\"\"\n return torch.cat([points, torch.ones_like(points[..., :1])], dim=-1)" }, { "identifier": "depth_to_relative_disparity", "path": "src/model/encoder/epipolar/conversions.py", "snippet": "def depth_to_relative_disparity(\n depth: Float[Tensor, \"*#batch\"],\n near: Float[Tensor, \"*#batch\"],\n far: Float[Tensor, \"*#batch\"],\n eps: float = 1e-10,\n) -> Float[Tensor, \" *batch\"]:\n \"\"\"Convert depth to relative disparity, where 0 is near and 1 is far\"\"\"\n disp_near = 1 / (near + eps)\n disp_far = 1 / (far + eps)\n disp = 1 / (depth + eps)\n return 1 - (disp - disp_far) / (disp_near - disp_far + eps)" } ]
from math import isqrt from typing import Literal from diff_gaussian_rasterization import ( GaussianRasterizationSettings, GaussianRasterizer, ) from einops import einsum, rearrange, repeat from jaxtyping import Float from torch import Tensor from ...geometry.projection import get_fov, homogenize_points from ..encoder.epipolar.conversions import depth_to_relative_disparity import torch
2,894
return torch.stack(all_images) def render_cuda_orthographic( extrinsics: Float[Tensor, "batch 4 4"], width: Float[Tensor, " batch"], height: Float[Tensor, " batch"], near: Float[Tensor, " batch"], far: Float[Tensor, " batch"], image_shape: tuple[int, int], background_color: Float[Tensor, "batch 3"], gaussian_means: Float[Tensor, "batch gaussian 3"], gaussian_covariances: Float[Tensor, "batch gaussian 3 3"], gaussian_sh_coefficients: Float[Tensor, "batch gaussian 3 d_sh"], gaussian_opacities: Float[Tensor, "batch gaussian"], fov_degrees: float = 0.1, use_sh: bool = True, dump: dict | None = None, ) -> Float[Tensor, "batch 3 height width"]: b, _, _ = extrinsics.shape h, w = image_shape assert use_sh or gaussian_sh_coefficients.shape[-1] == 1 _, _, _, n = gaussian_sh_coefficients.shape degree = isqrt(n) - 1 shs = rearrange(gaussian_sh_coefficients, "b g xyz n -> b g n xyz").contiguous() # Create fake "orthographic" projection by moving the camera back and picking a # small field of view. fov_x = torch.tensor(fov_degrees, device=extrinsics.device).deg2rad() tan_fov_x = (0.5 * fov_x).tan() distance_to_near = (0.5 * width) / tan_fov_x tan_fov_y = 0.5 * height / distance_to_near fov_y = (2 * tan_fov_y).atan() near = near + distance_to_near far = far + distance_to_near move_back = torch.eye(4, dtype=torch.float32, device=extrinsics.device) move_back[2, 3] = -distance_to_near extrinsics = extrinsics @ move_back # Escape hatch for visualization/figures. if dump is not None: dump["extrinsics"] = extrinsics dump["fov_x"] = fov_x dump["fov_y"] = fov_y dump["near"] = near dump["far"] = far projection_matrix = get_projection_matrix( near, far, repeat(fov_x, "-> b", b=b), fov_y ) projection_matrix = rearrange(projection_matrix, "b i j -> b j i") view_matrix = rearrange(extrinsics.inverse(), "b i j -> b j i") full_projection = view_matrix @ projection_matrix all_images = [] all_radii = [] for i in range(b): # Set up a tensor for the gradients of the screen-space means. mean_gradients = torch.zeros_like(gaussian_means[i], requires_grad=True) try: mean_gradients.retain_grad() except Exception: pass settings = GaussianRasterizationSettings( image_height=h, image_width=w, tanfovx=tan_fov_x, tanfovy=tan_fov_y, bg=background_color[i], scale_modifier=1.0, viewmatrix=view_matrix[i], projmatrix=full_projection[i], sh_degree=degree, campos=extrinsics[i, :3, 3], prefiltered=False, # This matches the original usage. debug=False, ) rasterizer = GaussianRasterizer(settings) row, col = torch.triu_indices(3, 3) image, radii = rasterizer( means3D=gaussian_means[i], means2D=mean_gradients, shs=shs[i] if use_sh else None, colors_precomp=None if use_sh else shs[i, :, 0, :], opacities=gaussian_opacities[i, ..., None], cov3D_precomp=gaussian_covariances[i, :, row, col], ) all_images.append(image) all_radii.append(radii) return torch.stack(all_images) DepthRenderingMode = Literal["depth", "disparity", "relative_disparity", "log"] def render_depth_cuda( extrinsics: Float[Tensor, "batch 4 4"], intrinsics: Float[Tensor, "batch 3 3"], near: Float[Tensor, " batch"], far: Float[Tensor, " batch"], image_shape: tuple[int, int], gaussian_means: Float[Tensor, "batch gaussian 3"], gaussian_covariances: Float[Tensor, "batch gaussian 3 3"], gaussian_opacities: Float[Tensor, "batch gaussian"], scale_invariant: bool = True, mode: DepthRenderingMode = "depth", ) -> Float[Tensor, "batch height width"]: # Specify colors according to Gaussian depths. camera_space_gaussians = einsum( extrinsics.inverse(), homogenize_points(gaussian_means), "b i j, b g j -> b g i" ) fake_color = camera_space_gaussians[..., 2] if mode == "disparity": fake_color = 1 / fake_color elif mode == "relative_disparity":
def get_projection_matrix( near: Float[Tensor, " batch"], far: Float[Tensor, " batch"], fov_x: Float[Tensor, " batch"], fov_y: Float[Tensor, " batch"], ) -> Float[Tensor, "batch 4 4"]: """Maps points in the viewing frustum to (-1, 1) on the X/Y axes and (0, 1) on the Z axis. Differs from the OpenGL version in that Z doesn't have range (-1, 1) after transformation and that Z is flipped. """ tan_fov_x = (0.5 * fov_x).tan() tan_fov_y = (0.5 * fov_y).tan() top = tan_fov_y * near bottom = -top right = tan_fov_x * near left = -right (b,) = near.shape result = torch.zeros((b, 4, 4), dtype=torch.float32, device=near.device) result[:, 0, 0] = 2 * near / (right - left) result[:, 1, 1] = 2 * near / (top - bottom) result[:, 0, 2] = (right + left) / (right - left) result[:, 1, 2] = (top + bottom) / (top - bottom) result[:, 3, 2] = 1 result[:, 2, 2] = far / (far - near) result[:, 2, 3] = -(far * near) / (far - near) return result def render_cuda( extrinsics: Float[Tensor, "batch 4 4"], intrinsics: Float[Tensor, "batch 3 3"], near: Float[Tensor, " batch"], far: Float[Tensor, " batch"], image_shape: tuple[int, int], background_color: Float[Tensor, "batch 3"], gaussian_means: Float[Tensor, "batch gaussian 3"], gaussian_covariances: Float[Tensor, "batch gaussian 3 3"], gaussian_sh_coefficients: Float[Tensor, "batch gaussian 3 d_sh"], gaussian_opacities: Float[Tensor, "batch gaussian"], scale_invariant: bool = True, use_sh: bool = True, ) -> Float[Tensor, "batch 3 height width"]: assert use_sh or gaussian_sh_coefficients.shape[-1] == 1 # Make sure everything is in a range where numerical issues don't appear. if scale_invariant: scale = 1 / near extrinsics = extrinsics.clone() extrinsics[..., :3, 3] = extrinsics[..., :3, 3] * scale[:, None] gaussian_covariances = gaussian_covariances * (scale[:, None, None, None] ** 2) gaussian_means = gaussian_means * scale[:, None, None] near = near * scale far = far * scale _, _, _, n = gaussian_sh_coefficients.shape degree = isqrt(n) - 1 shs = rearrange(gaussian_sh_coefficients, "b g xyz n -> b g n xyz").contiguous() b, _, _ = extrinsics.shape h, w = image_shape fov_x, fov_y = get_fov(intrinsics).unbind(dim=-1) tan_fov_x = (0.5 * fov_x).tan() tan_fov_y = (0.5 * fov_y).tan() projection_matrix = get_projection_matrix(near, far, fov_x, fov_y) projection_matrix = rearrange(projection_matrix, "b i j -> b j i") view_matrix = rearrange(extrinsics.inverse(), "b i j -> b j i") full_projection = view_matrix @ projection_matrix all_images = [] all_radii = [] for i in range(b): # Set up a tensor for the gradients of the screen-space means. mean_gradients = torch.zeros_like(gaussian_means[i], requires_grad=True) try: mean_gradients.retain_grad() except Exception: pass settings = GaussianRasterizationSettings( image_height=h, image_width=w, tanfovx=tan_fov_x[i].item(), tanfovy=tan_fov_y[i].item(), bg=background_color[i], scale_modifier=1.0, viewmatrix=view_matrix[i], projmatrix=full_projection[i], sh_degree=degree, campos=extrinsics[i, :3, 3], prefiltered=False, # This matches the original usage. debug=False, ) rasterizer = GaussianRasterizer(settings) row, col = torch.triu_indices(3, 3) image, radii = rasterizer( means3D=gaussian_means[i], means2D=mean_gradients, shs=shs[i] if use_sh else None, colors_precomp=None if use_sh else shs[i, :, 0, :], opacities=gaussian_opacities[i, ..., None], cov3D_precomp=gaussian_covariances[i, :, row, col], ) all_images.append(image) all_radii.append(radii) return torch.stack(all_images) def render_cuda_orthographic( extrinsics: Float[Tensor, "batch 4 4"], width: Float[Tensor, " batch"], height: Float[Tensor, " batch"], near: Float[Tensor, " batch"], far: Float[Tensor, " batch"], image_shape: tuple[int, int], background_color: Float[Tensor, "batch 3"], gaussian_means: Float[Tensor, "batch gaussian 3"], gaussian_covariances: Float[Tensor, "batch gaussian 3 3"], gaussian_sh_coefficients: Float[Tensor, "batch gaussian 3 d_sh"], gaussian_opacities: Float[Tensor, "batch gaussian"], fov_degrees: float = 0.1, use_sh: bool = True, dump: dict | None = None, ) -> Float[Tensor, "batch 3 height width"]: b, _, _ = extrinsics.shape h, w = image_shape assert use_sh or gaussian_sh_coefficients.shape[-1] == 1 _, _, _, n = gaussian_sh_coefficients.shape degree = isqrt(n) - 1 shs = rearrange(gaussian_sh_coefficients, "b g xyz n -> b g n xyz").contiguous() # Create fake "orthographic" projection by moving the camera back and picking a # small field of view. fov_x = torch.tensor(fov_degrees, device=extrinsics.device).deg2rad() tan_fov_x = (0.5 * fov_x).tan() distance_to_near = (0.5 * width) / tan_fov_x tan_fov_y = 0.5 * height / distance_to_near fov_y = (2 * tan_fov_y).atan() near = near + distance_to_near far = far + distance_to_near move_back = torch.eye(4, dtype=torch.float32, device=extrinsics.device) move_back[2, 3] = -distance_to_near extrinsics = extrinsics @ move_back # Escape hatch for visualization/figures. if dump is not None: dump["extrinsics"] = extrinsics dump["fov_x"] = fov_x dump["fov_y"] = fov_y dump["near"] = near dump["far"] = far projection_matrix = get_projection_matrix( near, far, repeat(fov_x, "-> b", b=b), fov_y ) projection_matrix = rearrange(projection_matrix, "b i j -> b j i") view_matrix = rearrange(extrinsics.inverse(), "b i j -> b j i") full_projection = view_matrix @ projection_matrix all_images = [] all_radii = [] for i in range(b): # Set up a tensor for the gradients of the screen-space means. mean_gradients = torch.zeros_like(gaussian_means[i], requires_grad=True) try: mean_gradients.retain_grad() except Exception: pass settings = GaussianRasterizationSettings( image_height=h, image_width=w, tanfovx=tan_fov_x, tanfovy=tan_fov_y, bg=background_color[i], scale_modifier=1.0, viewmatrix=view_matrix[i], projmatrix=full_projection[i], sh_degree=degree, campos=extrinsics[i, :3, 3], prefiltered=False, # This matches the original usage. debug=False, ) rasterizer = GaussianRasterizer(settings) row, col = torch.triu_indices(3, 3) image, radii = rasterizer( means3D=gaussian_means[i], means2D=mean_gradients, shs=shs[i] if use_sh else None, colors_precomp=None if use_sh else shs[i, :, 0, :], opacities=gaussian_opacities[i, ..., None], cov3D_precomp=gaussian_covariances[i, :, row, col], ) all_images.append(image) all_radii.append(radii) return torch.stack(all_images) DepthRenderingMode = Literal["depth", "disparity", "relative_disparity", "log"] def render_depth_cuda( extrinsics: Float[Tensor, "batch 4 4"], intrinsics: Float[Tensor, "batch 3 3"], near: Float[Tensor, " batch"], far: Float[Tensor, " batch"], image_shape: tuple[int, int], gaussian_means: Float[Tensor, "batch gaussian 3"], gaussian_covariances: Float[Tensor, "batch gaussian 3 3"], gaussian_opacities: Float[Tensor, "batch gaussian"], scale_invariant: bool = True, mode: DepthRenderingMode = "depth", ) -> Float[Tensor, "batch height width"]: # Specify colors according to Gaussian depths. camera_space_gaussians = einsum( extrinsics.inverse(), homogenize_points(gaussian_means), "b i j, b g j -> b g i" ) fake_color = camera_space_gaussians[..., 2] if mode == "disparity": fake_color = 1 / fake_color elif mode == "relative_disparity":
fake_color = depth_to_relative_disparity(
2
2023-12-20 19:45:59+00:00
4k
FoundationVision/GLEE
app/GLEE/glee/backbone/resnet.py
[ { "identifier": "Backbone", "path": "app/GLEE/glee/backbone/backbone.py", "snippet": "class Backbone(nn.Module):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of arguments.\n \"\"\"\n super().__init__()\n\n def forward(self):\n \"\"\"\n Subclasses must override this method, but adhere to the same return type.\n\n Returns:\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\n \"\"\"\n pass\n\n @property\n def size_divisibility(self) -> int:\n \"\"\"\n Some backbones require the input height and width to be divisible by a\n specific integer. This is typically true for encoder / decoder type networks\n with lateral connection (e.g., FPN) for which feature maps need to match\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\n input size divisibility is required.\n \"\"\"\n return 0\n\n def output_shape(self):\n \"\"\"\n Returns:\n dict[str->ShapeSpec]\n \"\"\"\n # this is a backward-compatible default\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }" }, { "identifier": "register_backbone", "path": "app/GLEE/glee/backbone/registry.py", "snippet": "def register_backbone(fn):\n module_name_split = fn.__module__.split('.')\n model_name = module_name_split[-1]\n _model_entrypoints[model_name] = fn\n return fn" } ]
import pickle import numpy as np import fvcore.nn.weight_init as weight_init import torch import torch.nn.functional as F from typing import Any, Dict from torch import nn from .backbone import Backbone from .registry import register_backbone from detectron2.layers import ( CNNBlockBase, Conv2d, DeformConv, ModulatedDeformConv, ShapeSpec, get_norm, ) from detectron2.utils.file_io import PathManager
3,089
Examples: :: stage = ResNet.make_stage( BottleneckBlock, 3, in_channels=16, out_channels=64, bottleneck_channels=16, num_groups=1, stride_per_block=[2, 1, 1], dilations_per_block=[1, 1, 2] ) Usually, layers that produce the same feature map spatial size are defined as one "stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should all be 1. """ blocks = [] for i in range(num_blocks): curr_kwargs = {} for k, v in kwargs.items(): if k.endswith("_per_block"): assert len(v) == num_blocks, ( f"Argument '{k}' of make_stage should have the " f"same length as num_blocks={num_blocks}." ) newk = k[: -len("_per_block")] assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" curr_kwargs[newk] = v[i] else: curr_kwargs[k] = v blocks.append( block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs) ) in_channels = out_channels return blocks @staticmethod def make_default_stages(depth, block_class=None, **kwargs): """ Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152). If it doesn't create the ResNet variant you need, please use :meth:`make_stage` instead for fine-grained customization. Args: depth (int): depth of ResNet block_class (type): the CNN block class. Has to accept `bottleneck_channels` argument for depth > 50. By default it is BasicBlock or BottleneckBlock, based on the depth. kwargs: other arguments to pass to `make_stage`. Should not contain stride and channels, as they are predefined for each depth. Returns: list[list[CNNBlockBase]]: modules in all stages; see arguments of :class:`ResNet.__init__`. """ num_blocks_per_stage = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth] if block_class is None: block_class = BasicBlock if depth < 50 else BottleneckBlock if depth < 50: in_channels = [64, 64, 128, 256] out_channels = [64, 128, 256, 512] else: in_channels = [64, 256, 512, 1024] out_channels = [256, 512, 1024, 2048] ret = [] for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): if depth >= 50: kwargs["bottleneck_channels"] = o // 4 ret.append( ResNet.make_stage( block_class=block_class, num_blocks=n, stride_per_block=[s] + [1] * (n - 1), in_channels=i, out_channels=o, **kwargs, ) ) return ret ResNetBlockBase = CNNBlockBase """ Alias for backward compatibiltiy. """ def make_stage(*args, **kwargs): """ Deprecated alias for backward compatibiltiy. """ return ResNet.make_stage(*args, **kwargs) def _convert_ndarray_to_tensor(state_dict: Dict[str, Any]) -> None: """ In-place convert all numpy arrays in the state_dict to torch tensor. Args: state_dict (dict): a state-dict to be loaded to the model. Will be modified. """ # model could be an OrderedDict with _metadata attribute # (as returned by Pytorch's state_dict()). We should preserve these # properties. for k in list(state_dict.keys()): v = state_dict[k] if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor): raise ValueError( "Unsupported type found in checkpoint! {}: {}".format(k, type(v)) ) if not isinstance(v, torch.Tensor): state_dict[k] = torch.from_numpy(v)
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = [ "ResNetBlockBase", "BasicBlock", "BottleneckBlock", "DeformBottleneckBlock", "BasicStem", "ResNet", "make_stage", "get_resnet_backbone", ] class BasicBlock(CNNBlockBase): """ The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`, with two 3x3 conv layers and a projection shortcut if needed. """ def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"): """ Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. stride (int): Stride for the first conv. norm (str or callable): normalization for all conv layers. See :func:`layers.get_norm` for supported format. """ super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None self.conv1 = Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False, norm=get_norm(norm, out_channels), ) self.conv2 = Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False, norm=get_norm(norm, out_channels), ) for layer in [self.conv1, self.conv2, self.shortcut]: if layer is not None: # shortcut can be None weight_init.c2_msra_fill(layer) def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out class BottleneckBlock(CNNBlockBase): """ The standard bottleneck residual block used by ResNet-50, 101 and 152 defined in :paper:`ResNet`. It contains 3 conv layers with kernels 1x1, 3x3, 1x1, and a projection shortcut if needed. """ def __init__( self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, ): """ Args: bottleneck_channels (int): number of output channels for the 3x3 "bottleneck" conv layers. num_groups (int): number of groups for the 3x3 conv layer. norm (str or callable): normalization for all conv layers. See :func:`layers.get_norm` for supported format. stride_in_1x1 (bool): when stride>1, whether to put stride in the first 1x1 convolution or the bottleneck 3x3 convolution. dilation (int): the dilation rate of the 3x3 conv layer. """ super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: if layer is not None: # shortcut can be None weight_init.c2_msra_fill(layer) # Zero-initialize the last normalization in each residual branch, # so that at the beginning, the residual branch starts with zeros, # and each residual block behaves like an identity. # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "For BN layers, the learnable scaling coefficient γ is initialized # to be 1, except for each residual block's last BN # where γ is initialized to be 0." # nn.init.constant_(self.conv3.norm.weight, 0) # TODO this somehow hurts performance when training GN models from scratch. # Add it as an option when we need to use this code to train a backbone. def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) out = F.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out class DeformBottleneckBlock(CNNBlockBase): """ Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>` in the 3x3 convolution. """ def __init__( self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, deform_modulated=False, deform_num_groups=1, ): super().__init__(in_channels, out_channels, stride) self.deform_modulated = deform_modulated if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) if deform_modulated: deform_conv_op = ModulatedDeformConv # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size offset_channels = 27 else: deform_conv_op = DeformConv offset_channels = 18 self.conv2_offset = Conv2d( bottleneck_channels, offset_channels * deform_num_groups, kernel_size=3, stride=stride_3x3, padding=1 * dilation, dilation=dilation, ) self.conv2 = deform_conv_op( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, deformable_groups=deform_num_groups, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: if layer is not None: # shortcut can be None weight_init.c2_msra_fill(layer) nn.init.constant_(self.conv2_offset.weight, 0) nn.init.constant_(self.conv2_offset.bias, 0) def forward(self, x): out = self.conv1(x) out = F.relu_(out) if self.deform_modulated: offset_mask = self.conv2_offset(out) offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) offset = torch.cat((offset_x, offset_y), dim=1) mask = mask.sigmoid() out = self.conv2(out, offset, mask) else: offset = self.conv2_offset(out) out = self.conv2(out, offset) out = F.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out class BasicStem(CNNBlockBase): """ The standard ResNet stem (layers before the first residual block), with a conv, relu and max_pool. """ def __init__(self, in_channels=3, out_channels=64, norm="BN"): """ Args: norm (str or callable): norm after the first conv layer. See :func:`layers.get_norm` for supported format. """ super().__init__(in_channels, out_channels, 4) self.in_channels = in_channels self.conv1 = Conv2d( in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels), ) weight_init.c2_msra_fill(self.conv1) def forward(self, x): x = self.conv1(x) x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x class ResNet(Backbone): """ Implement :paper:`ResNet`. """ def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0): """ Args: stem (nn.Module): a stem module stages (list[list[CNNBlockBase]]): several (typically 4) stages, each contains multiple :class:`CNNBlockBase`. num_classes (None or int): if None, will not perform classification. Otherwise, will create a linear layer. out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in "stem", "linear", or "res2" ... If None, will return the output of the last layer. freeze_at (int): The number of stages at the beginning to freeze. see :meth:`freeze` for detailed explanation. """ super().__init__() self.stem = stem self.num_classes = num_classes current_stride = self.stem.stride self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": self.stem.out_channels} self.stage_names, self.stages = [], [] if out_features is not None: # Avoid keeping unused layers in this module. They consume extra memory # and may cause allreduce to fail num_stages = max( [{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features] ) stages = stages[:num_stages] for i, blocks in enumerate(stages): assert len(blocks) > 0, len(blocks) for block in blocks: assert isinstance(block, CNNBlockBase), block name = "res" + str(i + 2) stage = nn.Sequential(*blocks) self.add_module(name, stage) self.stage_names.append(name) self.stages.append(stage) self._out_feature_strides[name] = current_stride = int( current_stride * np.prod([k.stride for k in blocks]) ) self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels self.stage_names = tuple(self.stage_names) # Make it static for scripting if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(curr_channels, num_classes) # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "The 1000-way fully-connected layer is initialized by # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." nn.init.normal_(self.linear.weight, std=0.01) name = "linear" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) self.freeze(freeze_at) def forward(self, x): """ Args: x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. Returns: dict[str->Tensor]: names and the corresponding features """ assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!" outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for name, stage in zip(self.stage_names, self.stages): x = stage(x) if name in self._out_features: outputs[name] = x if self.num_classes is not None: x = self.avgpool(x) x = torch.flatten(x, 1) x = self.linear(x) if "linear" in self._out_features: outputs["linear"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): """ Freeze the first several stages of the ResNet. Commonly used in fine-tuning. Layers that produce the same feature map spatial size are defined as one "stage" by :paper:`FPN`. Args: freeze_at (int): number of stages to freeze. `1` means freezing the stem. `2` means freezing the stem and one residual stage, etc. Returns: nn.Module: this ResNet itself """ if freeze_at >= 1: self.stem.freeze() for idx, stage in enumerate(self.stages, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self @staticmethod def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs): """ Create a list of blocks of the same type that forms one ResNet stage. Args: block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this stage. A module of this type must not change spatial resolution of inputs unless its stride != 1. num_blocks (int): number of blocks in this stage in_channels (int): input channels of the entire stage. out_channels (int): output channels of **every block** in the stage. kwargs: other arguments passed to the constructor of `block_class`. If the argument name is "xx_per_block", the argument is a list of values to be passed to each block in the stage. Otherwise, the same argument is passed to every block in the stage. Returns: list[CNNBlockBase]: a list of block module. Examples: :: stage = ResNet.make_stage( BottleneckBlock, 3, in_channels=16, out_channels=64, bottleneck_channels=16, num_groups=1, stride_per_block=[2, 1, 1], dilations_per_block=[1, 1, 2] ) Usually, layers that produce the same feature map spatial size are defined as one "stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should all be 1. """ blocks = [] for i in range(num_blocks): curr_kwargs = {} for k, v in kwargs.items(): if k.endswith("_per_block"): assert len(v) == num_blocks, ( f"Argument '{k}' of make_stage should have the " f"same length as num_blocks={num_blocks}." ) newk = k[: -len("_per_block")] assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" curr_kwargs[newk] = v[i] else: curr_kwargs[k] = v blocks.append( block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs) ) in_channels = out_channels return blocks @staticmethod def make_default_stages(depth, block_class=None, **kwargs): """ Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152). If it doesn't create the ResNet variant you need, please use :meth:`make_stage` instead for fine-grained customization. Args: depth (int): depth of ResNet block_class (type): the CNN block class. Has to accept `bottleneck_channels` argument for depth > 50. By default it is BasicBlock or BottleneckBlock, based on the depth. kwargs: other arguments to pass to `make_stage`. Should not contain stride and channels, as they are predefined for each depth. Returns: list[list[CNNBlockBase]]: modules in all stages; see arguments of :class:`ResNet.__init__`. """ num_blocks_per_stage = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth] if block_class is None: block_class = BasicBlock if depth < 50 else BottleneckBlock if depth < 50: in_channels = [64, 64, 128, 256] out_channels = [64, 128, 256, 512] else: in_channels = [64, 256, 512, 1024] out_channels = [256, 512, 1024, 2048] ret = [] for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): if depth >= 50: kwargs["bottleneck_channels"] = o // 4 ret.append( ResNet.make_stage( block_class=block_class, num_blocks=n, stride_per_block=[s] + [1] * (n - 1), in_channels=i, out_channels=o, **kwargs, ) ) return ret ResNetBlockBase = CNNBlockBase """ Alias for backward compatibiltiy. """ def make_stage(*args, **kwargs): """ Deprecated alias for backward compatibiltiy. """ return ResNet.make_stage(*args, **kwargs) def _convert_ndarray_to_tensor(state_dict: Dict[str, Any]) -> None: """ In-place convert all numpy arrays in the state_dict to torch tensor. Args: state_dict (dict): a state-dict to be loaded to the model. Will be modified. """ # model could be an OrderedDict with _metadata attribute # (as returned by Pytorch's state_dict()). We should preserve these # properties. for k in list(state_dict.keys()): v = state_dict[k] if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor): raise ValueError( "Unsupported type found in checkpoint! {}: {}".format(k, type(v)) ) if not isinstance(v, torch.Tensor): state_dict[k] = torch.from_numpy(v)
@register_backbone
1
2023-12-15 01:12:36+00:00
4k
nianhua99/PandoraNext-Helper
app.py
[ { "identifier": "ApiResponse", "path": "util/api_response.py", "snippet": "class ApiResponse:\n\n @staticmethod\n def success(data):\n return jsonify({\n 'status': 0,\n 'message': '请求成功',\n 'data': data\n })\n\n @staticmethod\n def error(message, status=-1):\n return jsonify({\n 'status': status,\n 'message': message\n }), 500\n\n @staticmethod\n def unauthorized(message):\n return jsonify({\n 'status': 444,\n 'message': message\n }), 444" }, { "identifier": "auth", "path": "auth/auth.py", "snippet": "def validate_hcaptcha_response(token):\ndef login2():\nDASHBOARD_PERMISSION = {\n 'id': '9710971640510357',\n 'parentId': '',\n 'label': 'sys.menu.analysis',\n 'name': 'Analysis',\n 'type': 1,\n 'route': 'home',\n 'icon': 'ic-analysis',\n 'order': 1,\n 'component': '/dashboard/analysis/index.tsx',\n}\nTOKEN_PERMISSION = {\n 'id': '9100714781927721',\n 'parentId': '',\n 'label': 'sys.menu.token',\n 'name': 'Token',\n 'icon': 'ph:key',\n 'type': 0,\n 'route': 'token',\n 'order': 2,\n 'children': [\n {\n 'id': '84269992294009655',\n 'parentId': '9100714781927721',\n 'label': 'sys.menu.account',\n 'name': 'Account',\n 'type': 1,\n 'route': 'account',\n 'component': '/token/account/index.tsx',\n },\n {\n 'id': '84269992294009656',\n 'parentId': '9100714781927721',\n 'hide': False,\n 'label': 'sys.menu.share',\n 'name': 'Share',\n 'type': 1,\n 'route': 'share',\n 'component': '/token/share/index.tsx',\n }\n ],\n}\nPERMISSION_LIST = [\n DASHBOARD_PERMISSION,\n TOKEN_PERMISSION,\n]\nADMIN_ROLE = {\n 'id': '4281707933534332',\n 'name': 'Admin',\n 'label': 'Admin',\n 'status': 1,\n 'order': 1,\n 'desc': 'Super Admin',\n 'permission': PERMISSION_LIST,\n}" }, { "identifier": "db", "path": "model.py", "snippet": "class User(db.Model):\n def keys(self):\n def __getitem__(self, item):\n def __repr__(self):" } ]
import json import os import re import secrets import account import share import sys_info from datetime import date, datetime from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from flask import Flask, redirect, url_for, send_from_directory from flask.json.provider import JSONProvider from flask_bootstrap import Bootstrap5 from flask_migrate import Migrate from flask_moment import Moment from flask_apscheduler import APScheduler from loguru import logger from flask_jwt_extended import JWTManager from util.api_response import ApiResponse from auth import auth from model import db
1,954
logger.error("请配置PandoraNext相关环境变量") exit(1) else: app.config.update( pandora_path=PANDORA_NEXT_PATH, pandora_domain=PANDORA_NEXT_DOMAIN ) with open(os.path.join(PANDORA_NEXT_PATH, 'config.json'), 'r') as f: config = json.load(f) # 检查setup_password是否已经配置和密码强度 # 密码强度要求:8-16位,包含数字、字母、特殊字符 logger.info(config) if config['setup_password'] is None: logger.error('请先配置setup_password') exit(1) elif re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$', config['setup_password']) is None: logger.error('setup_password强度不符合要求,请重新配置') exit(1) app.config.update(setup_password=config['setup_password']) # 必须配置proxy_api_prefix,且不少于8位,同时包含字母和数字 if not config['proxy_api_prefix'] or re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$', config['proxy_api_prefix']) is None: logger.error('请配置proxy_api_prefix') exit(1) app.config.update(proxy_api_prefix=config['proxy_api_prefix']) DISABLE_CAPTCHA = os.getenv('DISABLE_CAPTCHA') # 检查验证码是否已经配置 if DISABLE_CAPTCHA: logger.warning('已关闭验证码配置,建议您开启验证码') app.config.update( license_id=config['license_id'], captcha_enabled=False, ) elif config['captcha'] and config['captcha']['provider'] and config['captcha']['provider'] == 'hcaptcha': app.config.update( license_id=config['license_id'], captcha_enabled=True, captcha_provider=config['captcha']['provider'], captcha_site_key=config['captcha']['site_key'], captcha_secret_key=config['captcha']['site_secret'] ) else: logger.warning('未读取到有效的 hcaptcha 配置,建议您开启验证码') app.config.update( license_id=config['license_id'], captcha_enabled=False, ) check_require_config() # scheduler jobstore app.config['SCHEDULER_JOBSTORES'] = { 'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE)) } scheduler = APScheduler() scheduler.init_app(app) scheduler.start() app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True db.init_app(app) def include_object(object, name, type_, reflected, compare_to): if ( type_ == "table" and name == "apscheduler_jobs" ): return False else: return True migrate = Migrate(include_object=include_object) migrate.init_app(app, db) def format_datetime(value): """Format a datetime to a string.""" if value is None: return "" return value.strftime('%Y-%m-%d %H:%M:%S') class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): return o.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(o, date): return o.strftime('%Y-%m-%d') elif hasattr(o, 'keys') and hasattr(o, '__getitem__'): return dict(o) raise TypeError(f'Object of type {o.__class__.__name__} ' f'is not JSON serializable') class StandardJSONProvider(JSONProvider): def dumps(self, obj, **kwargs): # 使用自定义的JSON编码器进行序列化 return json.dumps(obj, cls=JSONEncoder, **kwargs) def loads(self, s, **kwargs): return json.loads(s, **kwargs) app.json = StandardJSONProvider(app) @app.route('/', defaults={'path': ''}) @app.route('/<path:path>') def catch_all(path): return app.send_static_file("index.html") def create_app():
DATABASE = 'helper.db' app = Flask(__name__, static_folder='frontend/dist', static_url_path='/') Bootstrap5(app) Moment().init_app(app) # 生成随机的secret_key app.secret_key = secrets.token_hex(16) jwt = JWTManager(app) @jwt.unauthorized_loader def custom_unauthorized_callback(error_string): return ApiResponse.unauthorized(error_string, ) @jwt.invalid_token_loader def custom_invalid_token_callback(error_string): return ApiResponse.unauthorized(error_string, ) @jwt.expired_token_loader def custom_expired_token_callback(error_string, expired_token): return ApiResponse.unauthorized(error_string, ) # # @app.context_processor # def context_api_prefix(): # return dict(api_prefix='/api') def check_require_config(): PANDORA_NEXT_PATH = os.getenv('PANDORA_NEXT_PATH') # 如果PANDORA_NEXT_PATH 为空则检查/data下是否存在config.json if not PANDORA_NEXT_PATH: if os.path.exists('/data/config.json'): PANDORA_NEXT_PATH = '/data' else: logger.error("请配置PandoraNext相关环境变量") exit(1) PANDORA_NEXT_DOMAIN = os.getenv('PANDORA_NEXT_DOMAIN') if not PANDORA_NEXT_DOMAIN: logger.error("请配置PandoraNext相关环境变量") exit(1) else: app.config.update( pandora_path=PANDORA_NEXT_PATH, pandora_domain=PANDORA_NEXT_DOMAIN ) with open(os.path.join(PANDORA_NEXT_PATH, 'config.json'), 'r') as f: config = json.load(f) # 检查setup_password是否已经配置和密码强度 # 密码强度要求:8-16位,包含数字、字母、特殊字符 logger.info(config) if config['setup_password'] is None: logger.error('请先配置setup_password') exit(1) elif re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$', config['setup_password']) is None: logger.error('setup_password强度不符合要求,请重新配置') exit(1) app.config.update(setup_password=config['setup_password']) # 必须配置proxy_api_prefix,且不少于8位,同时包含字母和数字 if not config['proxy_api_prefix'] or re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$', config['proxy_api_prefix']) is None: logger.error('请配置proxy_api_prefix') exit(1) app.config.update(proxy_api_prefix=config['proxy_api_prefix']) DISABLE_CAPTCHA = os.getenv('DISABLE_CAPTCHA') # 检查验证码是否已经配置 if DISABLE_CAPTCHA: logger.warning('已关闭验证码配置,建议您开启验证码') app.config.update( license_id=config['license_id'], captcha_enabled=False, ) elif config['captcha'] and config['captcha']['provider'] and config['captcha']['provider'] == 'hcaptcha': app.config.update( license_id=config['license_id'], captcha_enabled=True, captcha_provider=config['captcha']['provider'], captcha_site_key=config['captcha']['site_key'], captcha_secret_key=config['captcha']['site_secret'] ) else: logger.warning('未读取到有效的 hcaptcha 配置,建议您开启验证码') app.config.update( license_id=config['license_id'], captcha_enabled=False, ) check_require_config() # scheduler jobstore app.config['SCHEDULER_JOBSTORES'] = { 'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE)) } scheduler = APScheduler() scheduler.init_app(app) scheduler.start() app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True db.init_app(app) def include_object(object, name, type_, reflected, compare_to): if ( type_ == "table" and name == "apscheduler_jobs" ): return False else: return True migrate = Migrate(include_object=include_object) migrate.init_app(app, db) def format_datetime(value): """Format a datetime to a string.""" if value is None: return "" return value.strftime('%Y-%m-%d %H:%M:%S') class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): return o.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(o, date): return o.strftime('%Y-%m-%d') elif hasattr(o, 'keys') and hasattr(o, '__getitem__'): return dict(o) raise TypeError(f'Object of type {o.__class__.__name__} ' f'is not JSON serializable') class StandardJSONProvider(JSONProvider): def dumps(self, obj, **kwargs): # 使用自定义的JSON编码器进行序列化 return json.dumps(obj, cls=JSONEncoder, **kwargs) def loads(self, s, **kwargs): return json.loads(s, **kwargs) app.json = StandardJSONProvider(app) @app.route('/', defaults={'path': ''}) @app.route('/<path:path>') def catch_all(path): return app.send_static_file("index.html") def create_app():
app.register_blueprint(auth.auth_bp, url_prefix='/api')
1
2023-12-18 13:18:50+00:00
4k
SHI-Labs/VCoder
vcoder_llava/eval/model_vqa_mmbench.py
[ { "identifier": "IMAGE_TOKEN_INDEX", "path": "vcoder_llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "vcoder_llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "conv_templates", "path": "vcoder_llava/vcoder_conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass VCoderConversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_segs(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_depths(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "load_pretrained_model", "path": "vcoder_llava/model/builder.py", "snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n if 'llava' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is None:\n warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n if 'vcoder_it' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n model = VCoderITLlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n else:\n print('Loading LLaVA from base model...')\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n\n mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')\n mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}\n model.load_state_dict(mm_projector_weights, strict=False)\n else:\n if 'vcoder_it_llava' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = VCoderITLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n elif 'vcoder_ds_llava' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = VCoderDSLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n elif 'vcoder_llava' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = VCoderLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) \n else:\n print('Loading LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=\"auto\")\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n image_processor = None\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n if 'llava' in model_name.lower():\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device=device, dtype=torch.float16)\n image_processor = vision_tower.image_processor\n \n seg_image_processor = None\n if 'vcoder' in model_name.lower():\n seg_image_processor = image_processor\n \n depth_image_processor = None\n if \"ds\" in model_name.lower():\n depth_image_processor = image_processor\n\n model.requires_grad_(False)\n return tokenizer, model, image_processor, seg_image_processor, depth_image_processor, context_len" }, { "identifier": "disable_torch_init", "path": "vcoder_llava/utils.py", "snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)" }, { "identifier": "tokenizer_image_token", "path": "vcoder_llava/mm_utils.py", "snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids" }, { "identifier": "process_images", "path": "vcoder_llava/mm_utils.py", "snippet": "def process_images(images, image_processor, model_cfg):\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images" }, { "identifier": "load_image_from_base64", "path": "vcoder_llava/mm_utils.py", "snippet": "def load_image_from_base64(image):\n return Image.open(BytesIO(base64.b64decode(image)))" }, { "identifier": "get_model_name_from_path", "path": "vcoder_llava/mm_utils.py", "snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]" } ]
import argparse import torch import os import json import pandas as pd import shortuuid import math from tqdm import tqdm from vcoder_llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle from vcoder_llava.model.builder import load_pretrained_model from vcoder_llava.utils import disable_torch_init from vcoder_llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path from PIL import Image
3,234
all_options = ['A', 'B', 'C', 'D'] def split_list(lst, n): """Split a list into n (roughly) equal-sized chunks""" chunk_size = math.ceil(len(lst) / n) # integer division return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] def get_chunk(lst, n, k): chunks = split_list(lst, n) return chunks[k] def is_none(value): if value is None: return True if type(value) is float and math.isnan(value): return True if type(value) is str and value.lower() == 'nan': return True if type(value) is str and value.lower() == 'none': return True return False def get_options(row, options): parsed_options = [] for option in options: option_value = row[option] if is_none(option_value): break parsed_options.append(option_value) return parsed_options def eval_model(args): # Model disable_torch_init() model_path = os.path.expanduser(args.model_path)
all_options = ['A', 'B', 'C', 'D'] def split_list(lst, n): """Split a list into n (roughly) equal-sized chunks""" chunk_size = math.ceil(len(lst) / n) # integer division return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] def get_chunk(lst, n, k): chunks = split_list(lst, n) return chunks[k] def is_none(value): if value is None: return True if type(value) is float and math.isnan(value): return True if type(value) is str and value.lower() == 'nan': return True if type(value) is str and value.lower() == 'none': return True return False def get_options(row, options): parsed_options = [] for option in options: option_value = row[option] if is_none(option_value): break parsed_options.append(option_value) return parsed_options def eval_model(args): # Model disable_torch_init() model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
8
2023-12-17 07:46:27+00:00
4k
galatolofederico/microchain
examples/calculator.py
[ { "identifier": "OpenAITextGenerator", "path": "microchain/models/generators.py", "snippet": "class OpenAITextGenerator:\n def __init__(self, *, model, api_key, api_base, temperature=0.9, top_p=1, max_tokens=512):\n try:\n import openai\n except ImportError:\n raise ImportError(\"Please install OpenAI python library using pip install openai\")\n \n self.model = model\n self.api_key = api_key\n self.api_base = api_base\n self.temperature = temperature\n self.top_p = top_p\n self.max_tokens = max_tokens\n\n self.client = openai.OpenAI(\n api_key=self.api_key,\n base_url=self.api_base\n )\n \n def __call__(self, prompt, stop=None):\n import openai\n assert isinstance(prompt, str), \"prompt must be a string https://platform.openai.com/docs/guides/text-generation/chat-completions-api\"\n\n try:\n response = self.client.completions.create(\n model=self.model,\n prompt=prompt,\n temperature=self.temperature,\n max_tokens=self.max_tokens,\n top_p=self.top_p,\n stop=stop\n )\n except openai.error.OpenAIError as e:\n print(colored(f\"Error: {e}\", \"red\"))\n return \"Error: timeout\"\n \n output = response.choices[0].text.strip()\n\n return output" }, { "identifier": "HFChatTemplate", "path": "microchain/models/templates.py", "snippet": "class HFChatTemplate:\n def __init__(self, template):\n try:\n import transformers\n except ImportError:\n raise ImportError(\"Please install transformers python library using pip install transformers\")\n \n try:\n import jinja2\n except ImportError:\n raise ImportError(\"Please install jinja2 python library using pip install jinja2\")\n\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(template)\n\n def __call__(self, prompt):\n return self.tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)" }, { "identifier": "LLM", "path": "microchain/models/llm.py", "snippet": "class LLM:\n def __init__(self, *, generator, templates=[]):\n if not isinstance(templates, list):\n templates = [templates]\n \n self.generator = generator\n self.templates = templates\n \n def __call__(self, prompt, stop=None):\n for template in self.templates:\n prompt = template(prompt)\n\n return self.generator(prompt, stop=stop)" }, { "identifier": "Function", "path": "microchain/engine/function.py", "snippet": "class Function:\n def __init__(self):\n self.call_signature = inspect.signature(self.__call__) \n self.call_parameters = []\n for name, parameter in self.call_signature.parameters.items():\n if parameter.annotation == inspect._empty:\n raise ValueError(f\"Parameter {name} must have an annotation\")\n \n self.call_parameters.append(dict(\n name=name,\n annotation=parameter.annotation\n ))\n self.state = None\n self.engine = None\n \n def bind(self, *, state, engine):\n self.state = state\n self.engine = engine\n\n @property\n def name(self):\n return type(self).__name__\n\n @property\n def example(self):\n if not isinstance(self.example_args, list):\n raise ValueError(\"example_args must be a list\")\n if len(self.example_args) != len(self.call_parameters):\n raise ValueError(f\"example_args must have the same length as call_parameters ({len(self.call_parameters)})\")\n\n bound = self.call_signature.bind(*self.example_args)\n \n return f\"{self.name}({', '.join([f'{name}={value}' for name, value in bound.arguments.items()])})\"\n \n @property\n def signature(self):\n arguments = [f\"{parameter['name']}: {parameter['annotation'].__name__}\" for parameter in self.call_parameters]\n return f\"{self.name}({', '.join(arguments)})\"\n\n @property\n def help(self):\n return f\"{self.signature}\\n{self.description}.\\nExample: {self.example}\\n\"\n\n @property\n def error(self):\n return f\"Error: wrong format. Use {self.signature}. Example: {self.example}. Please try again.\"\n\n def check_bind(self):\n if self.state is None:\n raise ValueError(\"You must register the function to an Engine\")\n\n def safe_call(self, args, kwargs):\n self.check_bind()\n try:\n return FunctionResult.SUCCESS, str(self.__call__(*args, **kwargs))\n except Exception as e:\n print(colored(f\"Exception in Function call {e}\", \"red\"))\n print(colored(''.join(traceback.TracebackException.from_exception(e).format()), \"red\"))\n return FunctionResult.ERROR, self.error\n\n def __call__(self, command):\n raise NotImplementedError" }, { "identifier": "Engine", "path": "microchain/engine/engine.py", "snippet": "class Engine:\n def __init__(self, state=dict()):\n self.state = state\n self.functions = dict()\n self.help_called = False\n self.agent = None\n \n def register(self, function):\n self.functions[function.name] = function\n function.bind(state=self.state, engine=self)\n\n def bind(self, agent):\n self.agent = agent\n\n def stop(self):\n if self.agent is None:\n raise ValueError(\"You must bind the engine to an agent before stopping\")\n self.agent.stop()\n\n def execute(self, command):\n if self.agent is None:\n raise ValueError(\"You must bind the engine to an agent before executing commands\")\n if not self.help_called:\n raise ValueError(\"You never accessed the help property. Building a prompt without including the help string is a very bad idea.\")\n try:\n tree = ast.parse(command)\n except SyntaxError:\n return FunctionResult.ERROR, f\"Error: syntax error in command {command}. Please try again.\"\n \n if len(tree.body) != 1:\n return FunctionResult.ERROR, f\"Error: unknown command {command}. Please try again.\"\n\n if not isinstance(tree.body[0], ast.Expr):\n return FunctionResult.ERROR, f\"Error: unknown command {command}. Please try again.\"\n\n if not isinstance(tree.body[0].value, ast.Call):\n return FunctionResult.ERROR, f\"Error: the command {command} must be a function call. Please try again.\"\n \n if not isinstance(tree.body[0].value.func, ast.Name):\n return FunctionResult.ERROR, f\"Error: the command {command} must be a function call. Please try again.\"\n\n function_name = tree.body[0].value.func.id\n function_args = tree.body[0].value.args\n function_kwargs = tree.body[0].value.keywords\n\n for arg in function_args:\n if not isinstance(arg, ast.Constant):\n return FunctionResult.ERROR, f\"Error: the command {command} must be a function call, you cannot use variables. Please try again.\"\n\n for kwarg in function_kwargs:\n if not isinstance(kwarg, ast.keyword):\n return FunctionResult.ERROR, f\"Error: the command {command} must be a function call, you cannot use variables. Please try again.\"\n if not isinstance(kwarg.value, ast.Constant):\n return FunctionResult.ERROR, f\"Error: the command {command} must be a function call, you cannot use variables. Please try again.\"\n\n function_args = [arg.value for arg in function_args]\n function_kwargs = {kwarg.arg: kwarg.value.value for kwarg in function_kwargs}\n\n if function_name not in self.functions:\n return FunctionResult.ERROR, f\"Error: unknown command {command}. Please try again.\"\n \n if len(function_args) + len(function_kwargs) != len(self.functions[function_name].call_parameters):\n return FunctionResult.ERROR, self.functions[function_name].error\n\n return self.functions[function_name].safe_call(args=function_args, kwargs=function_kwargs) \n \n @property\n def help(self):\n self.help_called = True\n return \"\\n\".join([f.help for f in self.functions.values()])" }, { "identifier": "Agent", "path": "microchain/engine/agent.py", "snippet": "class Agent:\n def __init__(self, llm, engine):\n self.llm = llm\n self.engine = engine\n self.max_tries = 10\n self.prompt = None\n self.bootstrap = []\n self.do_stop = False\n\n self.engine.bind(self)\n self.reset()\n\n def reset(self):\n self.history = []\n self.do_stop = False\n\n def build_initial_messages(self):\n self.history = [\n dict(\n role=\"user\",\n content=self.prompt\n ),\n ]\n for command in self.bootstrap:\n result, output = self.engine.execute(command)\n if result == FunctionResult.ERROR:\n raise Exception(f\"Your bootstrap commands contain an error. output={output}\")\n\n print(colored(f\">> {command}\", \"blue\"))\n print(colored(f\"{output}\", \"green\"))\n\n self.history.append(dict(\n role=\"assistant\",\n content=command\n ))\n self.history.append(dict(\n role=\"user\",\n content=output\n ))\n \n def clean_reply(self, reply):\n reply = reply.replace(\"\\_\", \"_\")\n reply = reply.strip()\n reply = reply[:reply.rfind(\")\")+1]\n return reply\n\n def stop(self):\n self.do_stop = True\n\n def step(self):\n result = FunctionResult.ERROR\n temp_messages = []\n tries = 0\n abort = False\n output = \"\"\n reply = \"\"\n while result != FunctionResult.SUCCESS:\n tries += 1\n\n if self.do_stop:\n abort = True\n break\n\n if tries > self.max_tries:\n print(colored(f\"Tried {self.max_tries} times (agent.max_tries) Aborting\", \"red\"))\n abort = True\n break\n \n reply = self.llm(self.history + temp_messages, stop=[\"\\n\"])\n reply = self.clean_reply(reply)\n\n if len(reply) < 2:\n print(colored(\"Error: empty reply, aborting\", \"red\"))\n abort = True\n break\n\n print(colored(f\">> {reply}\", \"yellow\"))\n \n result, output = self.engine.execute(reply)\n\n if result == FunctionResult.ERROR:\n print(colored(output, \"red\"))\n temp_messages.append(dict(\n role=\"assistant\",\n content=reply\n ))\n temp_messages.append(dict(\n role=\"user\",\n content=output\n ))\n else:\n print(colored(output, \"green\"))\n break\n \n return dict(\n abort=abort,\n reply=reply,\n output=output,\n )\n\n def run(self, iterations=10):\n if self.prompt is None:\n raise ValueError(\"You must set a prompt before running the agent\")\n\n print(colored(f\"prompt:\\n{self.prompt}\", \"blue\"))\n print(colored(f\"Running {iterations} iterations\", \"green\"))\n\n self.reset()\n self.build_initial_messages()\n\n for it in range(iterations):\n if self.do_stop:\n break\n\n step_output = self.step()\n \n if step_output[\"abort\"]:\n break\n\n self.history.append(dict(\n role=\"assistant\",\n content=step_output[\"reply\"]\n ))\n self.history.append(dict(\n role=\"user\",\n content=step_output[\"output\"]\n ))\n \n print(colored(f\"Finished {iterations} iterations\", \"green\"))" }, { "identifier": "Reasoning", "path": "microchain/functions.py", "snippet": "class Reasoning(Function):\n @property\n def description(self):\n return \"Use this function for your internal reasoning\"\n\n @property\n def example_args(self):\n return [\"The next step to take is...\"]\n\n def __call__(self, reasoning: str):\n return f\"The reasoning has been recorded\"" }, { "identifier": "Stop", "path": "microchain/functions.py", "snippet": "class Stop(Function):\n @property\n def description(self):\n return \"Use this function to stop the program\"\n\n @property\n def example_args(self):\n return []\n\n def __call__(self):\n self.engine.stop()\n return \"The program has been stopped\"" } ]
import os import random from dotenv import load_dotenv # pip install python-dotenv from microchain import OpenAITextGenerator, HFChatTemplate, LLM, Function, Engine, Agent from microchain.functions import Reasoning, Stop
3,210
class Sum(Function): @property def description(self): return "Use this function to compute the sum of two numbers" @property def example_args(self): return [2, 2] def __call__(self, a: float, b: float): return a + b class Product(Function): @property def description(self): return "Use this function to compute the product of two numbers" @property def example_args(self): return [2, 2] def __call__(self, a: float, b: float): return a * b load_dotenv() generator = OpenAITextGenerator( model=os.environ["MODEL_NAME"], api_key=os.environ["API_KEY"], api_base=os.environ["API_BASE"], temperature=0.7 ) template = HFChatTemplate(os.environ["TEMPLATE_NAME"]) llm = LLM(generator=generator, templates=[template]) engine = Engine() engine.register(Reasoning())
class Sum(Function): @property def description(self): return "Use this function to compute the sum of two numbers" @property def example_args(self): return [2, 2] def __call__(self, a: float, b: float): return a + b class Product(Function): @property def description(self): return "Use this function to compute the product of two numbers" @property def example_args(self): return [2, 2] def __call__(self, a: float, b: float): return a * b load_dotenv() generator = OpenAITextGenerator( model=os.environ["MODEL_NAME"], api_key=os.environ["API_KEY"], api_base=os.environ["API_BASE"], temperature=0.7 ) template = HFChatTemplate(os.environ["TEMPLATE_NAME"]) llm = LLM(generator=generator, templates=[template]) engine = Engine() engine.register(Reasoning())
engine.register(Stop())
7
2023-12-19 10:57:56+00:00
4k
OSU-NLP-Group/SeeAct
src/offline_experiments/screenshot_generation/image_annotation.py
[ { "identifier": "convert_elements2detections", "path": "src/data_utils/image_utils.py", "snippet": "def convert_elements2detections(candidate_elements):\n \"\"\"\n Extract element coordinates\n Parse candidate elements coordinates and convert into sv Detection objects\n \"\"\"\n boxes = []\n for box_id, element in enumerate(candidate_elements):\n bounding_box_rect = json.loads(element['attributes'])['bounding_box_rect'].strip().split(',')\n x1 = float(bounding_box_rect[0])\n y1 = float(bounding_box_rect[1])\n w = float(bounding_box_rect[2])\n h = float(bounding_box_rect[3])\n boxes.append([x1, y1, x1 + w, y1 + h])\n # Format bounding box into transformers output format to convert into supervision detection\n transformer_results = {\n \"boxes\": torch.tensor(boxes),\n \"scores\": torch.tensor([0.5 for item in boxes]),\n \"labels\": torch.tensor([1 for item in boxes])\n }\n detections = sv.Detections.from_transformers(transformer_results)\n return detections" }, { "identifier": "extract_topk_elements", "path": "src/data_utils/image_utils.py", "snippet": "def extract_topk_elements(all_elements, k):\n topk_elements = []\n for element in all_elements:\n rank = element['rank']\n score = element['score']\n if rank < k:\n topk_elements.append(copy.deepcopy(element))\n return topk_elements" }, { "identifier": "extract_elements_by_ids", "path": "src/data_utils/image_utils.py", "snippet": "def extract_elements_by_ids(all_elements, ids):\n \"\"\"\n Extract elements specified by the list of element_id\n To prevent order change, we will keep the return element the same order as the ids input\n \"\"\"\n output = []\n for element in all_elements:\n element_id = element['backend_node_id']\n if element_id in ids:\n output.append(element)\n\n # Order output element to be identical with ids input\n element_dict = {}\n for element in all_elements:\n element_id = element['backend_node_id']\n element_dict[element_id] = element\n ordered_output = []\n for element_id in ids:\n ordered_output.append(element_dict[element_id])\n\n return ordered_output" }, { "identifier": "batch_elements_by_locality", "path": "src/data_utils/image_utils.py", "snippet": "def batch_elements_by_locality(elements, num_choices):\n # Sort elements by y1 location (ascending order)\n sorted_elements = sorted(elements, key=lambda x: float(\n json.loads(x['attributes'])['bounding_box_rect'].strip().split(',')[1]))\n\n batches = []\n while len(sorted_elements) > 1:\n batch = sorted_elements[: num_choices]\n sorted_elements = sorted_elements[num_choices:]\n batches.append(batch)\n\n return batches" }, { "identifier": "batch_elements_by_locality_16_16_17", "path": "src/data_utils/image_utils.py", "snippet": "def batch_elements_by_locality_16_16_17(elements):\n # Sort elements by y1 location (ascending order)\n sorted_elements = sorted(elements, key=lambda x: float(\n json.loads(x['attributes'])['bounding_box_rect'].strip().split(',')[1]))\n\n batches = []\n # First batch: 16\n batch = sorted_elements[: 16]\n sorted_elements = sorted_elements[16:]\n batches.append(batch)\n\n # Second batch: 17\n batch = sorted_elements[: 17]\n sorted_elements = sorted_elements[17:]\n batches.append(batch)\n\n # Third batch: 17\n batch = sorted_elements[: 17]\n sorted_elements = sorted_elements[17:]\n batches.append(batch)\n\n return batches" }, { "identifier": "data_format_input_multichoice", "path": "src/data_utils/format_prompt_utils.py", "snippet": "def data_format_input_multichoice(\n sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False\n):\n # Parse html into a dom tree\n dom_tree = lxml.etree.fromstring(sample[\"cleaned_html\"])\n dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids)\n tree_repr, id_mapping = get_tree_repr(\n dom_tree, id_mapping={}, keep_html_brackets=keep_html_brackets\n )\n candidate_nodes = dom_tree.xpath(\"//*[@backend_node_id]\")\n choices = []\n for idx, node in enumerate(candidate_nodes):\n temp = get_tree_repr(\n node,\n id_mapping=id_mapping,\n keep_html_brackets=keep_html_brackets,\n )\n choices.append(\n [\n node.attrib[\"backend_node_id\"],\n \" \".join(\n get_tree_repr(\n node,\n id_mapping=id_mapping,\n keep_html_brackets=keep_html_brackets,\n )[0].split()[:10]\n ),\n ]\n )\n gt = id_mapping.get(gt, -1)\n seq_input = (\n \"Based on the HTML webpage above, try to complete the following task:\\n\"\n f\"Task: {sample['confirmed_task']}\\n\"\n f\"Previous actions:\\n\"\n )\n if len(sample[\"previous_actions\"]) > 0:\n for action in sample[\"previous_actions\"][-previous_k:]:\n seq_input += f\"{action}\\n\"\n else:\n seq_input += \"None\\n\"\n seq_input += (\n \"What should be the next action? Please select from the following choices \"\n \"(If the correct action is not in the page above, please select A. 'None of the above'):\\n\\n\"\n \"A. None of the above\\n\"\n )\n for idx, choice in enumerate(choices):\n # convert to ascii A, B, C, D, ...\n seq_input += f\"{chr(66 + idx)}. {choice[1]}\\n\"\n if gt == -1:\n seq_target = \"A.\"\n else:\n gt += 1\n current_action_op = sample[\"operation\"][\"op\"]\n current_action_value = sample[\"operation\"][\"value\"]\n seq_target = f\"{chr(65 + gt)}.\\n\" f\"Action: {current_action_op}\\n\"\n if current_action_op != \"CLICK\":\n seq_target += f\"Value: {current_action_value}\"\n return tree_repr, seq_input, seq_target, choices, node_to_keep" } ]
import json import os import jsonlines import base64 import numpy as np import cv2 import copy import argparse import supervision as sv import torch import pickle as pkl from tqdm import tqdm from src.data_utils.image_utils import convert_elements2detections from src.data_utils.image_utils import extract_topk_elements, extract_elements_by_ids from src.data_utils.image_utils import batch_elements_by_locality, batch_elements_by_locality_16_16_17 from src.data_utils.format_prompt_utils import data_format_input_multichoice
2,627
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def run(args): with open(args.selected_set_task_id_path, 'rb') as f: selected_set_task_id_dict = pkl.load(f) selected_task_ids = selected_set_task_id_dict[args.split] # Path to the raw_dump containing screenshot source data screenshot_dump_path = args.screenshot_dump_path # Set the image output directory output_dir = args.output_dir if not os.path.exists(output_dir): os.mkdir(output_dir) # Path to dumped query data (Taken from Mind2Web experiment sample before sending into LLM inference) query_source_path = args.query_source_path with open(query_source_path, 'r') as f: all_queries = json.load(f) # setup annotators bounding_box_annotator = sv.BoundingBoxAnnotator( thickness=2 ) candidate_label_annotator = sv.LabelAnnotator( color_lookup=sv.ColorLookup.INDEX, text_position=sv.Position.BOTTOM_LEFT, text_scale=0.5, text_color=sv.Color.white(), color=sv.Color.black(), text_thickness=1 ) # Enumerate each task in query data and generate screenshots for i, task in tqdm(enumerate(all_queries)): if len(task) == 2: continue task_action_id = task[0] task_id, action_id = task_action_id.strip().split("_") if task_id not in selected_task_ids: continue # Load Image source data single_screenshot_path = os.path.join(screenshot_dump_path, task_id, "processed/screenshot.json") if os.path.exists(single_screenshot_path): with open(single_screenshot_path) as f: scrshots_task = json.load(f) else: continue # Output Path task_dir = os.path.join(output_dir, task_action_id) if not os.path.exists(task_dir): os.mkdir(task_dir) image_dir = os.path.join(output_dir, task_action_id, "images") if not os.path.exists(image_dir): os.mkdir(image_dir) actid2scrshots_task = {} for scrshot in scrshots_task: tsd_act_uid = scrshot["action_uid"] actid2scrshots_task[tsd_act_uid] = scrshot scrshot = actid2scrshots_task[action_id] inference_batches = task[1] sample = task[2] # Prepare Image bef_tsd = scrshot["before"]["screenshot"] bef_tsd = np.frombuffer(base64.b64decode(bef_tsd), np.uint8) bef_img = cv2.imdecode(bef_tsd, cv2.IMREAD_COLOR) # Collect all elements all_elements = [] positive_elements = sample['pos_candidates'] negative_elements = sample['neg_candidates'] all_elements.extend(positive_elements) all_elements.extend(negative_elements) # Prepare top-50 elements and batch into 3 batches with 20 choices top_50_elements = extract_topk_elements(all_elements, k=50) if args.num_choice == -1: choice_batches = batch_elements_by_locality_16_16_17(top_50_elements) else: choice_batches = batch_elements_by_locality(top_50_elements, num_choices=args.num_choice) to_run = [] for batch_idx, candidate_elements in enumerate(choice_batches): temp = copy.deepcopy(sample) # Prepare question, choices, etc. candidate_element_ids = [item['backend_node_id'] for item in candidate_elements] seq_context, seq_in, _, choices, node_to_keep = data_format_input_multichoice( temp, candidate_element_ids, -1, keep_html_brackets=True ) temp['context_html'] = seq_context temp['context_node_ids'] = copy.deepcopy(list(node_to_keep)) temp['question'] = seq_in # Reorder Choices temp['choices'] = choices temp['image_path'] = os.path.join("", task_action_id, "images") # Choices will be reordered after data_format_input_multichoice, need to reorder candidate_element_ids # Align candidate_element_ids with choices candidate_element_ids = [item[0] for item in choices] # Align candidate_elements with choices candidate_elements = extract_elements_by_ids(all_elements, ids=candidate_element_ids) # Prepare Images
# -*- coding: utf-8 -*- # Copyright (c) 2024 OSU Natural Language Processing Group # # Licensed under the OpenRAIL-S License; # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.licenses.ai/ai-pubs-open-rails-vz1 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def run(args): with open(args.selected_set_task_id_path, 'rb') as f: selected_set_task_id_dict = pkl.load(f) selected_task_ids = selected_set_task_id_dict[args.split] # Path to the raw_dump containing screenshot source data screenshot_dump_path = args.screenshot_dump_path # Set the image output directory output_dir = args.output_dir if not os.path.exists(output_dir): os.mkdir(output_dir) # Path to dumped query data (Taken from Mind2Web experiment sample before sending into LLM inference) query_source_path = args.query_source_path with open(query_source_path, 'r') as f: all_queries = json.load(f) # setup annotators bounding_box_annotator = sv.BoundingBoxAnnotator( thickness=2 ) candidate_label_annotator = sv.LabelAnnotator( color_lookup=sv.ColorLookup.INDEX, text_position=sv.Position.BOTTOM_LEFT, text_scale=0.5, text_color=sv.Color.white(), color=sv.Color.black(), text_thickness=1 ) # Enumerate each task in query data and generate screenshots for i, task in tqdm(enumerate(all_queries)): if len(task) == 2: continue task_action_id = task[0] task_id, action_id = task_action_id.strip().split("_") if task_id not in selected_task_ids: continue # Load Image source data single_screenshot_path = os.path.join(screenshot_dump_path, task_id, "processed/screenshot.json") if os.path.exists(single_screenshot_path): with open(single_screenshot_path) as f: scrshots_task = json.load(f) else: continue # Output Path task_dir = os.path.join(output_dir, task_action_id) if not os.path.exists(task_dir): os.mkdir(task_dir) image_dir = os.path.join(output_dir, task_action_id, "images") if not os.path.exists(image_dir): os.mkdir(image_dir) actid2scrshots_task = {} for scrshot in scrshots_task: tsd_act_uid = scrshot["action_uid"] actid2scrshots_task[tsd_act_uid] = scrshot scrshot = actid2scrshots_task[action_id] inference_batches = task[1] sample = task[2] # Prepare Image bef_tsd = scrshot["before"]["screenshot"] bef_tsd = np.frombuffer(base64.b64decode(bef_tsd), np.uint8) bef_img = cv2.imdecode(bef_tsd, cv2.IMREAD_COLOR) # Collect all elements all_elements = [] positive_elements = sample['pos_candidates'] negative_elements = sample['neg_candidates'] all_elements.extend(positive_elements) all_elements.extend(negative_elements) # Prepare top-50 elements and batch into 3 batches with 20 choices top_50_elements = extract_topk_elements(all_elements, k=50) if args.num_choice == -1: choice_batches = batch_elements_by_locality_16_16_17(top_50_elements) else: choice_batches = batch_elements_by_locality(top_50_elements, num_choices=args.num_choice) to_run = [] for batch_idx, candidate_elements in enumerate(choice_batches): temp = copy.deepcopy(sample) # Prepare question, choices, etc. candidate_element_ids = [item['backend_node_id'] for item in candidate_elements] seq_context, seq_in, _, choices, node_to_keep = data_format_input_multichoice( temp, candidate_element_ids, -1, keep_html_brackets=True ) temp['context_html'] = seq_context temp['context_node_ids'] = copy.deepcopy(list(node_to_keep)) temp['question'] = seq_in # Reorder Choices temp['choices'] = choices temp['image_path'] = os.path.join("", task_action_id, "images") # Choices will be reordered after data_format_input_multichoice, need to reorder candidate_element_ids # Align candidate_element_ids with choices candidate_element_ids = [item[0] for item in choices] # Align candidate_elements with choices candidate_elements = extract_elements_by_ids(all_elements, ids=candidate_element_ids) # Prepare Images
candidate_detections = convert_elements2detections(candidate_elements)
0
2023-12-21 18:22:11+00:00
4k
DeepWok/mase
machop/chop/passes/graph/transforms/quantize/quant_parsers/parse_quant_config.py
[ { "identifier": "cp_multi_values", "path": "machop/chop/passes/graph/transforms/quantize/quant_parsers/utils.py", "snippet": "def cp_multi_values(\n src: dict, dst: dict, src_keys: tuple, dst_keys: tuple = None, strict: bool = True\n):\n \"\"\"Copy multiple values from src dict to dst dict.\"\"\"\n if dst_keys is None:\n for key in src_keys:\n if not strict and key not in src:\n continue\n dst[key] = deepcopy(src[key])\n else:\n for src_key, dst_key in zip(src_keys, dst_keys):\n if not strict and src_key not in src:\n continue\n dst[dst_key] = deepcopy(src[src_key])" }, { "identifier": "has_multi_keys", "path": "machop/chop/passes/graph/transforms/quantize/quant_parsers/utils.py", "snippet": "def has_multi_keys(src: dict, keys: tuple):\n \"\"\"Check if src dict has multiple keys.\"\"\"\n for key in keys:\n if key not in src:\n return False\n return True" } ]
from functools import partial from .utils import cp_multi_values, has_multi_keys
2,100
), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias", ), "bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"), }, "minifloat_denorm": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias", ), "bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"), }, "log": { "weight_entries": ("weight_width", "weight_exponent_bias"), "data_in_entries": ("data_in_width", "data_in_exponent_bias"), "bias_entries": ("bias_width", "bias_exponent_bias"), }, "block_fp": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias", "weight_block_size", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias", "data_in_block_size", ), "bias_entries": ( "bias_width", "bias_exponent_width", "bias_exponent_bias", "bias_block_size", ), }, "block_minifloat": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias_width", "weight_block_size", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias_width", "data_in_block_size", ), "bias_entries": ( "bias_width", "bias_exponent_width", "bias_exponent_bias_width", "bias_block_size", ), }, "block_log": { "weight_entries": ( "weight_width", "weight_exponent_bias_width", "weight_block_size", ), "data_in_entries": ( "data_in_width", "data_in_exponent_bias_width", "data_in_block_size", ), "bias_entries": ( "bias_width", "bias_exponent_bias_width", "bias_block_size", ), }, } """ cp_<entry_name> functions A collection of functions to copy values from a src config to a parsed config. """ def cp_name(config: dict, p_config: dict, entries=None, strict: bool = True): cp_multi_values(config, p_config, ("name",), strict=strict) def cp_bypass(config: dict, p_config: dict, entries=None, strict: bool = True): cp_multi_values(config, p_config, ("bypass",), strict=strict) def cp_weight_entries(config: dict, p_config: dict, entries: dict, strict: bool = True): cp_multi_values(config, p_config, entries["weight_entries"], strict=strict) def cp_data_in_entries( config: dict, p_config: dict, entries: dict, strict: bool = True ): cp_multi_values(config, p_config, entries["data_in_entries"], strict=strict) def cp_data_out_entries(config: dict, p_config: dict, entries: dict): cp_multi_values(config, p_config, entries["data_out_entries"]) def cp_bias_entries(config: dict, p_config: dict, entries: dict, strict: bool = True): cp_multi_values(config, p_config, entries["bias_entries"], strict=strict) def cp_weight_entries_to_bias( config: dict, p_config: dict, entries: dict, strict: bool = True ):
""" QUANT_ARITH_ENTRIES A mapping from (quantization arithmetic name) to (a mapping from (operand name) to (operand quantization spec name)) Example A fixed point quantized value is defined by (width, frac_width), thus the mapping is defined as follows: ```python "fixed": { "weight_entries": ("weight_width", "weight_frac_width"), "data_in_entries": ("data_in_width", "data_in_frac_width"), "bias_entries": ("bias_width", "bias_frac_width"), }, ``` """ QUANT_ARITH_ENTRIES = { # <arith_name> : {<operand_name> : (<operand_quantization_spec_name>,)} "integer": { "weight_entries": ("weight_width", "weight_frac_width"), "data_in_entries": ("data_in_width", "data_in_frac_width"), "bias_entries": ("bias_width", "bias_frac_width"), }, "fixed": { "weight_entries": ("weight_width", "weight_frac_width"), "data_in_entries": ("data_in_width", "data_in_frac_width"), "bias_entries": ("bias_width", "bias_frac_width"), }, "lutnet": { "weight_entries": ( "weight_width", "weight_frac_width", "weight_binarization_level", "weight_input_expanded", "weight_k", "weight_in_dim", ), "data_in_entries": ( "data_in_width", "data_in_frac_width", "data_in_binarization_level", # binarization_level (int): which level of binarization is applied, "binarized_weight" is only weights binarized others is no binarization "data_in_input_expanded", # input_expanded (bool): If set to True, means all LUT's inputs are considered during calculations , else only the first input will considered and the remaining will be masked. "data_in_k", # k entries of a LUT "data_in_levels", # data_in_levels (int): number of residual levels to use in lutnet "data_in_dim", # data input dimension (this is needed by convolution) ), "bias_entries": ( "bias_width", "bias_frac_width", "bias_binarization_level", "bias_input_expanded", "bias_k", "bias_in_dim", ), }, "logicnets": { "weight_entries": ( # TODO: change update_node_meta.py to take optional argument so this can be removed "weight_width", "weight_frac_width", ), "bias_entries": ( "bias_width", "bias_frac_width", ), "data_in_entries": ( "data_in_width", "data_in_frac_width", ), "data_out_entries": ( "data_out_width", "data_out_frac_width", ), "additional_layers_entries": { "additional_layers_inputs", "additional_layers_outputs", }, }, "binary": { "weight_entries": ( "weight_width", "weight_stochastic", "weight_bipolar", ), "data_in_entries": ( "data_in_width", "data_in_stochastic", "data_in_bipolar", ), "bias_entries": ( "bias_width", "bias_stochastic", "bias_bipolar", ), }, "binary_residual": { "weight_entries": ( "weight_width", "weight_stochastic", "weight_bipolar", "binary_training", ), "data_in_entries": ( "data_in_width", "data_in_stochastic", "data_in_bipolar", "data_in_residual_sign", "data_in_levels", # data_in_levels (int): number of residual levels to use in lutnet ), "bias_entries": ( "bias_width", "bias_stochastic", "bias_bipolar", ), }, "ternary": { "weight_entries": ( "weight_width", "weight_scaling_factor", "weight_mean", "weight_median", "weight_max", ), "data_in_entries": ( "data_in_width", "data_in_scaling_factor", "data_in_mean", "data_in_median", "data_in_max", ), "bias_entries": ( "bias_width", "bias_scaling_factor", "bias_mean", "bias_max", "bias_median", ), }, "minifloat_ieee": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias", ), "bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"), }, "minifloat_denorm": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias", ), "bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"), }, "log": { "weight_entries": ("weight_width", "weight_exponent_bias"), "data_in_entries": ("data_in_width", "data_in_exponent_bias"), "bias_entries": ("bias_width", "bias_exponent_bias"), }, "block_fp": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias", "weight_block_size", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias", "data_in_block_size", ), "bias_entries": ( "bias_width", "bias_exponent_width", "bias_exponent_bias", "bias_block_size", ), }, "block_minifloat": { "weight_entries": ( "weight_width", "weight_exponent_width", "weight_exponent_bias_width", "weight_block_size", ), "data_in_entries": ( "data_in_width", "data_in_exponent_width", "data_in_exponent_bias_width", "data_in_block_size", ), "bias_entries": ( "bias_width", "bias_exponent_width", "bias_exponent_bias_width", "bias_block_size", ), }, "block_log": { "weight_entries": ( "weight_width", "weight_exponent_bias_width", "weight_block_size", ), "data_in_entries": ( "data_in_width", "data_in_exponent_bias_width", "data_in_block_size", ), "bias_entries": ( "bias_width", "bias_exponent_bias_width", "bias_block_size", ), }, } """ cp_<entry_name> functions A collection of functions to copy values from a src config to a parsed config. """ def cp_name(config: dict, p_config: dict, entries=None, strict: bool = True): cp_multi_values(config, p_config, ("name",), strict=strict) def cp_bypass(config: dict, p_config: dict, entries=None, strict: bool = True): cp_multi_values(config, p_config, ("bypass",), strict=strict) def cp_weight_entries(config: dict, p_config: dict, entries: dict, strict: bool = True): cp_multi_values(config, p_config, entries["weight_entries"], strict=strict) def cp_data_in_entries( config: dict, p_config: dict, entries: dict, strict: bool = True ): cp_multi_values(config, p_config, entries["data_in_entries"], strict=strict) def cp_data_out_entries(config: dict, p_config: dict, entries: dict): cp_multi_values(config, p_config, entries["data_out_entries"]) def cp_bias_entries(config: dict, p_config: dict, entries: dict, strict: bool = True): cp_multi_values(config, p_config, entries["bias_entries"], strict=strict) def cp_weight_entries_to_bias( config: dict, p_config: dict, entries: dict, strict: bool = True ):
if has_multi_keys(config, entries["bias_entries"]):
1
2023-12-18 12:50:53+00:00
4k
yeyt97/AirDropPlus
AirDropPlus.py
[ { "identifier": "Config", "path": "config.py", "snippet": "class Config:\n def __init__(self, config_path):\n self.config = configparser.ConfigParser()\n self.config.read(config_path, encoding='utf-8')\n\n self.config_path = config_path\n self.key = self.config.get('config', 'key')\n self.save_path = self.config.get('config', 'save_path')\n if self.save_path == '' or self.save_path is None:\n self.save_path = os.path.join(os.path.expanduser('~'), 'Downloads')\n\n self.port = int(self.config.get('config', 'port'))\n self.basic_notifier = False if self.config.get('config', 'basic_notifier')=='0' else True\n\n self.version = self.config.get('info', 'version')" }, { "identifier": "create_notifier", "path": "notifier.py", "snippet": "def create_notifier(basic: bool = True):\n return BasicNotifier() if basic else Notifier()" }, { "identifier": "Server", "path": "server.py", "snippet": "class Server:\n def __init__(self, config: Config, notifier: INotifier):\n self.config = config\n self.notifier = notifier\n self.blueprint = Blueprint('server', __name__)\n self.register_routes()\n self.app = Flask(__name__)\n self.app.register_blueprint(self.blueprint)\n\n def run(self, host: str, port: int):\n self.app.run(host=host, port=port)\n\n def register_routes(self):\n \"\"\" ----------- 统一处理 ----------- \"\"\"\n # 统一认证\n @self.blueprint.before_request\n def check_api_key():\n if request.path == '/':\n return\n auth_header = request.headers.get(\"Authorization\")\n if auth_header != self.config.key:\n return Result.error(msg='密钥错误', code=401)\n version = request.headers.get(\"ShortcutVersion\")\n if version != self.config.version:\n msg = f'版本不匹配\\n\\nWindows版本为:{self.config.version}\\n快捷指令版本为:{version}'\n return Result.error(msg=msg, code=400)\n\n # 统一异常处理\n @self.blueprint.errorhandler(Exception)\n def handle_all_exceptions(error):\n msg = str(error)\n self.notifier.notify('错误', '遇到一个错误' + msg)\n return Result.error(msg, 500)\n\n \"\"\" ----------- 测试 ----------- \"\"\"\n @self.blueprint.route('/')\n def test():\n return 'Hello world!'\n\n \"\"\" ----------- 文件 ----------- \"\"\"\n # 手机端发送接下来要发送的文件列表\n @self.blueprint.route('/file/send/list', methods=['POST'])\n def send_file_list():\n filename_list = request.form['file_list'].splitlines()\n self.notifier.show_future_files(self.config.save_path, filename_list, to_mobile=False)\n return Result.success(msg=\"发送成功\")\n\n # 手机端发送文件\n @self.blueprint.route('/file/send', methods=['POST'])\n def send_file():\n if 'file' not in request.files:\n return Result.error(msg=\"文件不存在\")\n file = request.files['file']\n ori_filename = request.form['filename']\n notify_content = request.form['notify_content']\n filename = utils.avoid_duplicate_filename(self.config.save_path, ori_filename)\n file.save(os.path.join(self.config.save_path, filename))\n\n if notify_content != '':\n ori_filename_list = notify_content.splitlines()\n if len(ori_filename_list) == 1:\n self.notifier.show_received_file(self.config.save_path, filename, ori_filename)\n else:\n self.notifier.show_received_files(self.config.save_path, ori_filename_list)\n return Result.success(msg=\"发送成功\")\n\n # 获取电脑端复制的文件的路径列表\n @self.blueprint.route('/file/receive/list')\n def receive_file_list():\n success, res = utils.get_clipboard_files()\n if not success:\n msg = f'未复制文件: {res}'\n self.notifier.notify('错误', msg)\n return Result.error(msg=msg)\n if len(res) > 0:\n file_names = [os.path.basename(path) for path in res]\n self.notifier.show_future_files(None, file_names, to_mobile=True)\n return Result.success(data=res)\n return Result.error(msg='Windows未复制文件')\n\n # 获取电脑端文件\n @self.blueprint.route('/file/receive', methods=['POST'])\n def receive_file():\n path = request.form.get('path')\n file_name = os.path.basename(path)\n # self.notifier.notify('文件', f'发送: {file_name}')\n with open(path, 'rb') as f:\n file_content = f.read()\n return flask.send_file(io.BytesIO(file_content), as_attachment=True, download_name=file_name)\n\n \"\"\" ----------- 剪贴板 ----------- \"\"\"\n # 获取电脑端剪贴板\n @self.blueprint.route('/clipboard/receive')\n def receive_clipboard():\n success, res = utils.get_clipboard_content()\n if not success:\n msg = f'获取剪贴板出错: {res}'\n self.notifier.notify('错误', msg)\n return Result.error(msg=msg)\n if res != '':\n self.notifier.notify('剪贴板', f'发送: {res}')\n return Result.success(data=res)\n else:\n self.notifier.notify('剪贴板', '发送失败: Windows剪贴板为空')\n return Result.error(msg='Windows剪贴板为空')\n\n # 接收手机端剪贴板\n @self.blueprint.route('/clipboard/send', methods=['POST'])\n def send_clipboard():\n clipboard = request.form['clipboard']\n if clipboard is None or clipboard == '':\n self.notifier.notify('剪贴板', '接收失败: iPhone剪贴板为空')\n return Result.error(msg='iPhone剪贴板为空')\n success, msg = utils.set_clipboard_content(clipboard)\n if success:\n self.notifier.notify('剪贴板', f'收到剪贴板内容: {clipboard}')\n else:\n self.notifier.notify('错误', f'设置剪贴板出错: {msg}')\n return Result.success(msg='发送成功') if success else Result.error(msg=msg)" } ]
import os import sys import utils from config import Config from notifier import create_notifier from server import Server
1,664
if __name__ == '__main__': SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini') config = Config(config_file_path) notifier = create_notifier(config.basic_notifier) if not os.path.exists(config.save_path): notifier.notify('启动失败', f'文件保存路径:"{config.save_path}"不存在,请检查配置文件"{config_file_path}"') sys.exit() if utils.is_program_running(): notifier.notify('启动失败', '请不要重复启动') sys.exit() try:
if __name__ == '__main__': SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini') config = Config(config_file_path) notifier = create_notifier(config.basic_notifier) if not os.path.exists(config.save_path): notifier.notify('启动失败', f'文件保存路径:"{config.save_path}"不存在,请检查配置文件"{config_file_path}"') sys.exit() if utils.is_program_running(): notifier.notify('启动失败', '请不要重复启动') sys.exit() try:
server = Server(config, notifier)
2
2023-12-19 08:16:21+00:00
4k
byeongjun-park/HarmonyView
ldm/modules/diffusionmodules/openaimodel.py
[ { "identifier": "checkpoint", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" }, { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim,\n disable_self_attn=disable_self_attn)\n for d in range(depth)]\n )\n\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n for block in self.transformer_blocks:\n x = block(x, context=context)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" } ]
from abc import abstractmethod from functools import partial from typing import Iterable from ldm.modules.diffusionmodules.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, ) from ldm.modules.attention import SpatialTransformer from ldm.util import exists from omegaconf.listconfig import ListConfig import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F
2,531
def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels
# dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
3
2023-12-21 04:44:00+00:00
4k
EntySec/SeaShell
seashell/modules/unhook.py
[ { "identifier": "Loot", "path": "seashell/lib/loot.py", "snippet": "class Loot(String, FS):\n \"\"\" Subclass of seashell.lib module.\n\n This subclass of seashell.lib module is intended for providing\n tools for working with loot collected by SeaShell.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n self.badges = Badges()\n\n self.loot = Config().loot_path\n self.data = Config().data_path\n\n def create_loot(self) -> None:\n \"\"\" Create loot directory in workspace.\n\n :return None: None\n \"\"\"\n\n if not os.path.isdir(self.loot):\n os.mkdir(self.loot)\n\n def specific_loot(self, filename: str) -> str:\n \"\"\" Return full path to the specific file\n from the loot directory.\n\n :param str filename: file name\n :return str: path to the file\n \"\"\"\n\n return self.loot + filename\n\n def random_loot(self, extension: Optional[str] = None) -> str:\n \"\"\" Generate random loot path and add extension (if specified).\n\n :param Optional[str] extension: extension\n :return str: random loot path\n \"\"\"\n\n filename = self.random_string(16)\n\n if extension:\n filename += '.' + extension\n\n return self.loot + filename\n\n def get_file(self, filename: str) -> bytes:\n \"\"\" Get specific file contents.\n\n :param str filename: file name\n :return bytes: file contents\n \"\"\"\n\n self.check_file(filename)\n\n with open(filename, 'rb') as f:\n return f.read()\n\n def save_file(self, location: str, data: bytes, extension: Optional[str] = None,\n filename: Optional[str] = None) -> Union[str, None]:\n \"\"\" Save contents to specific location.\n\n :param str location: location\n :param bytes data: contents to save\n :param Optional[str] extension: file extension\n :param Optional[str] filename: file name\n :return Union[str, None]: path if success else None\n \"\"\"\n\n exists, is_dir = self.exists(location)\n\n if exists:\n if is_dir:\n if location.endswith('/'):\n location += os.path.split(filename)[1] if filename else self.random_string(16)\n else:\n location += '/' + os.path.split(filename)[1] if filename else self.random_string(16)\n\n if extension:\n if not location.endswith('.' + extension):\n location += '.' + extension\n\n with open(location, 'wb') as f:\n f.write(data)\n\n self.badges.print_success(f\"Saved to {location}!\")\n return os.path.abspath(location)\n\n return None\n\n def remove_file(self, filename: str) -> None:\n \"\"\" Remove specific file.\n\n :param str filename: file name\n :return None: None\n \"\"\"\n\n self.check_file(filename)\n os.remove(filename)\n\n self.badges.print_success(f\"Removed {filename}!\")\n\n def get_loot(self, filename: str) -> bytes:\n \"\"\" Get specific loot contents.\n\n :param str filename: file name of loot\n :return bytes data: loot contents\n \"\"\"\n\n filename = os.path.split(filename)[1]\n return self.get_file(self.loot + filename)\n\n def save_loot(self, filename: str, data: bytes) -> Union[str, None]:\n \"\"\" Save contents to loot directory.\n\n :param str filename: file name of loot\n :param bytes data: loot contents\n :return Union[str, None]: path if success else None\n \"\"\"\n\n filename = os.path.split(filename)[1]\n return self.save_file(self.loot + filename, data)\n\n def remove_loot(self, filename: str) -> None:\n \"\"\" Remove specific loot from loot directory.\n\n :param str filename: file name of loot\n :return None: None\n \"\"\"\n\n filename = os.path.split(filename)[1]\n self.remove_file(self.loot + filename)\n\n def get_data(self, filename: str) -> bytes:\n \"\"\" Get contents of file from data directory.\n\n :param str filename: file name\n :return bytes: data contents\n :raises RuntimeError: with trailing error message\n \"\"\"\n\n if os.path.exists(self.data + filename):\n with open(self.data + filename, 'rb') as f:\n return f.read()\n else:\n raise RuntimeError(\"Invalid data given!\")\n\n def list_loot(self) -> list:\n \"\"\" List all loots from loot directory.\n\n :return list: all loots from loot directory\n \"\"\"\n\n loots = []\n\n for loot in os.listdir(self.loot):\n loots.append((loot, self.loot + loot, datetime.datetime.fromtimestamp(\n os.path.getmtime(self.loot + loot)).astimezone().strftime(\n \"%Y-%m-%d %H:%M:%S %Z\")))\n\n return loots" }, { "identifier": "Hook", "path": "seashell/core/hook.py", "snippet": "class Hook(object):\n \"\"\" Subclass of seashell.core module.\n\n This subclass of seashell.core module is intended for providing\n an implementation a persistence for a poor man.\n \"\"\"\n\n def __init__(self, host: Optional[str] = None, port: Optional[int] = None) -> None:\n \"\"\" Initialize device hook.\n\n :param Optional[str] host: host\n :param Optional[int] port: port\n :return None: None\n \"\"\"\n\n self.config = Config()\n\n self.host = host\n self.port = port\n\n if host and port:\n self.hash = String().base64_string(\n f'{host}:{str(port)}', decode=True)\n else:\n self.hash = ''\n\n self.main = self.config.data_path + 'hook'\n self.mussel = self.config.data_path + 'Mussel.app/mussel'\n\n def patch_ipa(self, path: str) -> None:\n \"\"\" Patch existing IPA.\n\n :param str path: path to IPA file\n :return None: None\n \"\"\"\n\n shutil.unpack_archive(path, format='zip')\n app_files = [file for file in os.listdir('Payload') if file.endswith('.app')]\n\n if not app_files:\n return\n\n bundle = '/'.join(('Payload', app_files[0] + '/'))\n executable = self.get_executable(bundle + 'Info.plist')\n\n self.patch_plist(bundle + 'Info.plist')\n\n shutil.move(bundle + executable, bundle + executable + '.hooked')\n shutil.copy(self.main, bundle + executable)\n shutil.copy(self.mussel, bundle + 'mussel')\n\n os.chmod(bundle + executable, 777)\n os.chmod(bundle + 'mussel', 777)\n\n app = path[:-4]\n os.remove(path)\n\n os.mkdir(app)\n shutil.move('Payload', app)\n shutil.make_archive(path, 'zip', app)\n shutil.move(path + '.zip', path)\n shutil.rmtree(app)\n\n @staticmethod\n def get_executable(path: str) -> str:\n \"\"\" Get CFBundleExecutable path from plist.\n\n :param str path: path to plist to parse\n :return str: content of CFBundleExecutable\n \"\"\"\n\n with open(path, 'rb') as f:\n plist_data = plistlib.load(f)\n\n if 'CFBundleExecutable' in plist_data:\n return plist_data['CFBundleExecutable']\n\n return ''\n\n def patch_plist(self, path: str, revert: bool = False) -> None:\n \"\"\" Patch plist file and insert object.\n\n :param str path: path to plist to patch\n :param bool revert: revert\n :return None: None\n \"\"\"\n\n with open(path, 'rb') as f:\n plist_data = plistlib.load(f)\n\n if not revert:\n plist_data['CFBundleBase64Hash'] = self.hash\n else:\n del plist_data['CFBundleBase64Hash']\n\n with open(path, 'wb') as f:\n plistlib.dump(plist_data, f)" } ]
from pwny.api import * from pwny.types import * from seashell.lib.loot import Loot from seashell.core.hook import Hook from hatsploit.lib.command import Command
2,595
""" This command requires SeaShell: https://github.com/EntySec/SeaShell Current source: https://github.com/EntySec/SeaShell """ class HatSploitCommand(Command): def __init__(self): super().__init__() self.details = { 'Category': "evasion", 'Name': "unhook", 'Authors': [ 'Ivan Nikolskiy (enty8080) - command developer' ], 'Description': "Remove hook from other app (e.g. Contacts.app).", 'Usage': "unhook <app_name>", 'MinArgs': 1 } self.plist = Loot().specific_loot('Info.plist') def find_app(self, app_name): containers = '/private/var/containers/Bundle/Application' result = self.session.send_command( tag=FS_LIST, args={ TLV_TYPE_PATH: containers } ) if result.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS: self.print_error("Failed to access application containers!") return self.print_process(f"Searching for {app_name} in containers...") file = result.get_tlv(TLV_TYPE_GROUP) path = None while file: apps = self.session.send_command( tag=FS_LIST, args={ TLV_TYPE_PATH: file.get_string(TLV_TYPE_PATH) } ) if apps.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS: continue app = apps.get_tlv(TLV_TYPE_GROUP) while app: if app.get_string(TLV_TYPE_FILENAME) == app_name: path = app.get_string(TLV_TYPE_PATH) self.print_success(f"Found {app_name} at {path}!") break app = apps.get_tlv(TLV_TYPE_GROUP) if path: break file = result.get_tlv(TLV_TYPE_GROUP) return path def run(self, argc, argv): path = self.find_app(argv[1]) if not path: self.print_error(f"Path for {argv[1]} not found!") return if not self.session.download(path + '/Info.plist', self.plist): self.print_error("Failed to access Info.plist!") return self.print_process("Patching Info.plist...")
""" This command requires SeaShell: https://github.com/EntySec/SeaShell Current source: https://github.com/EntySec/SeaShell """ class HatSploitCommand(Command): def __init__(self): super().__init__() self.details = { 'Category': "evasion", 'Name': "unhook", 'Authors': [ 'Ivan Nikolskiy (enty8080) - command developer' ], 'Description': "Remove hook from other app (e.g. Contacts.app).", 'Usage': "unhook <app_name>", 'MinArgs': 1 } self.plist = Loot().specific_loot('Info.plist') def find_app(self, app_name): containers = '/private/var/containers/Bundle/Application' result = self.session.send_command( tag=FS_LIST, args={ TLV_TYPE_PATH: containers } ) if result.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS: self.print_error("Failed to access application containers!") return self.print_process(f"Searching for {app_name} in containers...") file = result.get_tlv(TLV_TYPE_GROUP) path = None while file: apps = self.session.send_command( tag=FS_LIST, args={ TLV_TYPE_PATH: file.get_string(TLV_TYPE_PATH) } ) if apps.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS: continue app = apps.get_tlv(TLV_TYPE_GROUP) while app: if app.get_string(TLV_TYPE_FILENAME) == app_name: path = app.get_string(TLV_TYPE_PATH) self.print_success(f"Found {app_name} at {path}!") break app = apps.get_tlv(TLV_TYPE_GROUP) if path: break file = result.get_tlv(TLV_TYPE_GROUP) return path def run(self, argc, argv): path = self.find_app(argv[1]) if not path: self.print_error(f"Path for {argv[1]} not found!") return if not self.session.download(path + '/Info.plist', self.plist): self.print_error("Failed to access Info.plist!") return self.print_process("Patching Info.plist...")
hook = Hook()
1
2023-12-17 04:14:16+00:00
4k
FlagOpen/TACO
train.py
[ { "identifier": "Trainer", "path": "train_utils.py", "snippet": "class Trainer(transformers.Trainer):\n \"\"\"Use CosineAnnealingLR from pytorch \n \"\"\"\n \n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n \"\"\"\n Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n passed as an argument.\n\n Args:\n num_training_steps (int): The number of training steps to do.\n \"\"\"\n if self.lr_scheduler is None:\n num_warmup_steps=self.args.get_warmup_steps(num_training_steps)\n if getattr(self.args, 'use_cosine_anneal_with_warmup', False):\n lr_max=1\n lr_min=1e-1\n cosine_anneal_with_warmup = lambda cur_iter: max(cur_iter / num_warmup_steps, 1e-9) if cur_iter < num_warmup_steps else \\\n (lr_min + 0.5*(lr_max-lr_min)*(1.0+math.cos((cur_iter-num_warmup_steps)/(num_training_steps-num_warmup_steps)*math.pi)))\n \n self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer=self.optimizer if optimizer is None else optimizer, \n lr_lambda=cosine_anneal_with_warmup,\n )\n else:\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler" }, { "identifier": "DEFAULT_PAD_TOKEN", "path": "datamodule/constants.py", "snippet": "DEFAULT_PAD_TOKEN = \"[PAD]\"" }, { "identifier": "DEFAULT_EOS_TOKEN", "path": "datamodule/constants.py", "snippet": "DEFAULT_EOS_TOKEN = \"<|endoftext|>\"" }, { "identifier": "DEFAULT_BOS_TOKEN", "path": "datamodule/constants.py", "snippet": "DEFAULT_BOS_TOKEN = \"<|endoftext|>\"" }, { "identifier": "TacoDataset", "path": "datamodule/taco_dataset.py", "snippet": "class TacoDataset(Dataset):\n \"\"\"Dataset for fine-tune.\"\"\"\n \n def __init__(self, data_path: str, debug: bool=False, learning_skill: int=None):\n super(TacoDataset, self).__init__()\n logging.warning(\"Loading tokenized data...\")\n if os.path.exists(data_path):\n dataset = load_from_disk(data_path).shuffle()\n else:\n raise ValueError(\" The specified data_path does not exist. Please provide a tokenized dataset\")\n \n if not all(key in dataset.column_names for key in ['input_ids', 'source_ids_lens']):\n raise ValueError(\"Data has not been tokenized. Please tokenize the data first.\")\n if debug:\n dataset = dataset.select(range(1000))\n if learning_skill:\n dataset = dataset.filter(lambda entry: entry['labels'][learning_skill])\n \n logging.warning(\"Collect columns of hf dataset... This may take some time...\")\n input_ids = dataset['input_ids']\n source_ids_lens = dataset['source_ids_lens']\n \n self.learning_skill = None\n if learning_skill:\n scores = dataset['scores']\n scores = preprocess_scores(scores, source_ids_lens, learning_skill)\n self.scores = scores\n self.learning_skill = learning_skill\n \n logging.warning(\"Processing inputs...\")\n data_dict = preprocess(input_ids, source_ids_lens)\n \n self.input_ids = data_dict[\"input_ids\"]\n self.labels = data_dict[\"labels\"]\n\n def __len__(self):\n return len(self.input_ids)\n\n def __getitem__(self, i) -> Dict[str, torch.Tensor]:\n if self.learning_skill:\n return dict(input_ids=self.input_ids[i], labels=self.labels[i], scores=self.scores[i])\n else:\n return dict(input_ids=self.input_ids[i], labels=self.labels[i])" }, { "identifier": "DataCollatorForTacoDataset", "path": "datamodule/taco_dataset.py", "snippet": "class DataCollatorForTacoDataset(object):\n \"\"\"Collate examples for fine-tune.\"\"\"\n\n tokenizer: transformers.PreTrainedTokenizer\n\n def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:\n input_ids, labels = tuple([instance[key] for instance in instances] for key in (\"input_ids\", \"labels\"))\n input_ids = torch.nn.utils.rnn.pad_sequence(\n input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id\n )\n labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)\n return dict(\n input_ids=input_ids,\n labels=labels,\n )" } ]
from typing import Optional, Dict from dataclasses import dataclass, field from train_utils import Trainer from datamodule import DEFAULT_PAD_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_BOS_TOKEN, TacoDataset, DataCollatorForTacoDataset import transformers
1,780
""" Finetune models on TACO-Dataset train split """ @dataclass class ModelArguments: model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py") @dataclass class DataArguments: data_path: str = field(default=None, metadata={"help": "Path to the training data."}) @dataclass class TrainingArguments(transformers.TrainingArguments): cache_dir: Optional[str] = field(default=None) optim: str = field(default="adamw_torch") adam_beta1: float = field(default=0.9) adam_beta2: float = field(default=0.95) use_cosine_anneal_with_warmup: bool = field(default=True) model_max_length: int = field( default=2048, metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."}, ) resume_from_checkpoint: bool = field( default=False, metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."} ) def smart_tokenizer_and_embedding_resize( special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel, ): """Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64. """ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) model.resize_token_embeddings(len(tokenizer)) if num_new_tokens > 0: input_embeddings = model.get_input_embeddings().weight.data output_embeddings = model.get_output_embeddings().weight.data input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) input_embeddings[-num_new_tokens:] = input_embeddings_avg output_embeddings[-num_new_tokens:] = output_embeddings_avg def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict: """Make dataset and collator for fine-tune""" train_dataset = TacoDataset(data_path=data_args.data_path) data_collator = DataCollatorForTacoDataset(tokenizer=tokenizer) return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator) def train(): parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() model = transformers.AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=True, use_auth_token=True, cache_dir=training_args.cache_dir, ) tokenizer = transformers.AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=training_args.cache_dir, trust_remote_code=True, use_auth_token=True, model_max_length=training_args.model_max_length, padding_side="right", use_fast=False, ) special_tokens_dict = dict() if tokenizer.pad_token is None:
""" Finetune models on TACO-Dataset train split """ @dataclass class ModelArguments: model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py") @dataclass class DataArguments: data_path: str = field(default=None, metadata={"help": "Path to the training data."}) @dataclass class TrainingArguments(transformers.TrainingArguments): cache_dir: Optional[str] = field(default=None) optim: str = field(default="adamw_torch") adam_beta1: float = field(default=0.9) adam_beta2: float = field(default=0.95) use_cosine_anneal_with_warmup: bool = field(default=True) model_max_length: int = field( default=2048, metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."}, ) resume_from_checkpoint: bool = field( default=False, metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."} ) def smart_tokenizer_and_embedding_resize( special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel, ): """Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64. """ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) model.resize_token_embeddings(len(tokenizer)) if num_new_tokens > 0: input_embeddings = model.get_input_embeddings().weight.data output_embeddings = model.get_output_embeddings().weight.data input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) input_embeddings[-num_new_tokens:] = input_embeddings_avg output_embeddings[-num_new_tokens:] = output_embeddings_avg def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict: """Make dataset and collator for fine-tune""" train_dataset = TacoDataset(data_path=data_args.data_path) data_collator = DataCollatorForTacoDataset(tokenizer=tokenizer) return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator) def train(): parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() model = transformers.AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=True, use_auth_token=True, cache_dir=training_args.cache_dir, ) tokenizer = transformers.AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=training_args.cache_dir, trust_remote_code=True, use_auth_token=True, model_max_length=training_args.model_max_length, padding_side="right", use_fast=False, ) special_tokens_dict = dict() if tokenizer.pad_token is None:
special_tokens_dict["pad_token"] = DEFAULT_PAD_TOKEN
1
2023-12-20 03:12:01+00:00
4k
OPPOMKLab/u-LLaVA
datasets/builders/plain_type_builder.py
[ { "identifier": "registry", "path": "utils/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_collator(cls, name):\n def wrap(collator_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_processor_class(cls, name):\n def get_collator_class(cls, name):\n def get_task_class(cls, name):\n def list_models(cls):\n def list_processors(cls):\n def list_collators(cls):\n def list_builders(cls):\n def list_tasks(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "TgifDataset", "path": "datasets/datasets/tgif_dataset.py", "snippet": "class TgifDataset(BaseDataset):\n def __init__(self,\n vis_processor,\n tokenizer,\n vis_root,\n ann_root,\n portion=1,\n image_token_len=256,\n data_type='video',\n conv_type='conv_simple'\n ):\n \"\"\"\n vis_root (string): Root directory of images\n ann_root (string): Root directory of annotations\n \"\"\"\n super().__init__(vis_processor=vis_processor,\n tokenizer=tokenizer,\n vis_root=vis_root,\n ann_root=ann_root,\n portion=portion,\n data_type=data_type,\n conv_type=conv_type\n )\n\n self.resize_size = self.vis_processor.image_size\n self.num_frm = self.vis_processor.n_frm\n # temporal token (n_frm) + spatial token (num_patch)\n self.image_token_len = self.num_frm + image_token_len\n\n def __getitem__(self, index):\n num_retries = 10 # skip error videos\n for _ in range(num_retries):\n try:\n sample = self.annotation[index]\n\n conversation_list = sample['conversations']\n\n if 'gif' in sample:\n gif_path = os.path.join(self.vis_root, sample['gif'])\n gif = self.vis_processor(gif_path)\n # add <DEFAULT_IMAGE_PATCH_TOKEN>\n sources = preprocess_video_text(copy.deepcopy(conversation_list),\n cur_token_len=self.image_token_len)\n else:\n gif = None\n sources = [copy.deepcopy(conversation_list)]\n\n data_dict = preprocess(sources, self.tokenizer, self.conv_type)\n\n data_dict = dict(input_ids=data_dict[\"input_ids\"][0],\n labels=data_dict[\"labels\"][0])\n\n # video exist: process by CLIP, non video: zero tensor\n if gif is not None:\n data_dict['video'] = gif\n except:\n video_path = self.annotation[index]['gif'] if 'gif' in self.annotation[index] else str(index)\n print(f\"Failed to load examples with video: {video_path}. \"\n f\"Will randomly sample an example as a replacement.\")\n index = random.randint(0, len(self) - 1)\n continue\n break\n else:\n raise RuntimeError(f\"Failed to fetch video after {num_retries} retries.\")\n\n return data_dict" }, { "identifier": "BaseDatasetBuilder", "path": "datasets/builders/base_builder.py", "snippet": "class BaseDatasetBuilder:\n \"\"\"\n builders of preprocessor and dataset\n related to: processors, datasets\n \"\"\"\n dataset_cls = None\n\n def __init__(self, cfg=None):\n \"\"\"\n :param cfg: full config, including models, datasets, etc.\n \"\"\"\n super().__init__()\n\n if isinstance(cfg, str):\n # load from path\n self.config = self.load_dataset_config(cfg)\n else:\n # when called from task.build_dataset()\n self.config = cfg\n\n @staticmethod\n def load_dataset_config(cfg_path):\n cfg = OmegaConf.load(cfg_path).dataset\n cfg = cfg[list(cfg.keys())[0]]\n\n return cfg\n\n def fetch_processor(self, processor_type='vis_processor', processor_dict=None):\n \"\"\"\n\n :param processor_type: 'vis_processor' or 'box_processor'\n :param processor_dict: {'clip_image': CLIPImageProcessor()}\n :return:\n \"\"\"\n name = self.config.get(processor_type, None)\n\n return processor_dict[name] if name is not None else None\n\n def build(self, tokenizer, processors_dict, conv_type='conv_simple'):\n \"\"\"\n Create by split datasets inheriting torch.utils.data.Datasets.\n\n # build() can be dataset-specific. Overwrite to customize.\n \"\"\"\n\n build_info = self.config.build_info\n ann_root = build_info.anno_dir\n vis_root = build_info.image_dir\n\n # processors\n vis_processor = self.fetch_processor('vis_processor', processors_dict)\n\n if not os.path.exists(vis_root) or not os.path.exists(ann_root):\n warnings.warn(\"storage path {0} or {1} does not exist.\".format(vis_root, ann_root))\n\n # create dataset\n dataset_cls = self.dataset_cls\n\n dataset = dataset_cls(\n vis_processor=vis_processor,\n tokenizer=tokenizer,\n vis_root=vis_root,\n ann_root=ann_root,\n conv_type=conv_type\n )\n\n return dataset" }, { "identifier": "LLaVADataset", "path": "datasets/datasets/llava_dataset.py", "snippet": "class LLaVADataset(BaseDataset):\n def __init__(self,\n vis_processor,\n tokenizer,\n vis_root,\n ann_root,\n portion=1,\n image_token_len=256,\n data_type='image',\n conv_type='conv_simple'\n ):\n \"\"\"\n vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/)\n ann_root (string): Root directory of video (e.g. webvid_eval/annotations/)\n split (string): val or test\n \"\"\"\n super().__init__(vis_processor=vis_processor,\n tokenizer=tokenizer,\n vis_root=vis_root,\n ann_root=ann_root,\n portion=portion,\n data_type=data_type,\n conv_type=conv_type\n )\n\n self.image_token_len = image_token_len\n self.aspect_ratio = self.vis_processor.aspect_ratio\n\n def __getitem__(self, index):\n \"\"\"\n \"image_id\" is kept to stay compatible with the COCO evaluation format\n some datasets contain mixed sources, such as SQA (w/, w/o image)\n :param index:\n :return:\n \"\"\"\n num_retries = 10 # skip error images\n for _ in range(num_retries):\n try:\n sample = self.annotation[index]\n conversation_list = sample['conversations']\n\n if 'image' in sample:\n image_path = os.path.join(self.vis_root, sample['image'])\n image = Image.open(image_path).convert(\"RGB\")\n\n if self.aspect_ratio == 'pad':\n image = self.vis_processor.expand_image(image)\n\n image = self.vis_processor(image)\n sources = preprocess_image_text(copy.deepcopy(conversation_list),\n cur_token_len=self.image_token_len)\n else:\n image = None\n sources = [copy.deepcopy(conversation_list)]\n\n data_dict = preprocess(sources, self.tokenizer, self.conv_type)\n data_dict = dict(input_ids=data_dict[\"input_ids\"][0],\n labels=data_dict[\"labels\"][0])\n\n # image exist: process by CLIP, non image: no 'image' dict\n if image is not None:\n data_dict['image'] = image\n except:\n image_path = self.annotation[index]['image'] if 'image' in self.annotation[index] else str(index)\n print(f\"Failed to load examples with image: {image_path}. \"\n f\"Will randomly sample an example as a replacement.\")\n index = random.randint(0, len(self) - 1)\n continue\n break\n else:\n raise RuntimeError(f\"Failed to fetch video after {num_retries} retries.\")\n\n return data_dict" }, { "identifier": "LLaVASegDataset", "path": "datasets/datasets/llava_dataset.py", "snippet": "class LLaVASegDataset(LLaVADataset):\n sam_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n sam_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n sam_size = 1024\n ignore_label = 255\n sam_transform = ResizeLongestSide(sam_size)\n\n def preprocess_for_sam(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.sam_mean) / self.sam_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.sam_size - h\n padw = self.sam_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, index):\n sample = self.annotation[index]\n conversation_list = sample['conversations']\n\n # has image\n image_path = os.path.join(self.vis_root, sample['image'])\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n raw_size = image.shape[:2]\n\n if self.aspect_ratio == 'pad':\n image = self.vis_processor.expand_image(image)\n\n # preprocess images for clip\n image_clip = self.vis_processor(image)\n image_sam = self.sam_transform.apply_image(image) # preprocess images for sam\n resize = image_sam.shape[:2]\n image_sam = self.preprocess_for_sam(torch.from_numpy(image_sam).permute(2, 0, 1).contiguous())\n\n sources = preprocess_image_text(copy.deepcopy(conversation_list),\n cur_token_len=self.image_token_len)\n\n data_dict = preprocess(sources, self.tokenizer, self.conv_type)\n data_dict = dict(input_ids=data_dict[\"input_ids\"][0],\n labels=data_dict[\"labels\"][0])\n\n seg_mask = torch.rand(0, *raw_size)\n\n image_dict = {'image': image_clip, 'image_sam': image_sam, 'seg_mask': seg_mask,\n 'raw_size': raw_size, 'resize': resize}\n\n data_dict.update(image_dict)\n\n return data_dict" } ]
from utils.registry import registry from datasets.datasets.tgif_dataset import TgifDataset from datasets.builders.base_builder import BaseDatasetBuilder from datasets.datasets.llava_dataset import LLaVADataset, LLaVASegDataset
2,978
""" Copyright 2023 OPPO Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class PlainBuilder(BaseDatasetBuilder): dataset_cls = LLaVADataset def build(self, tokenizer, processor_dict, conv_type='conv_simple'): build_info = self.config.build_info dataset_cls = self.dataset_cls image_token_len = self.config.get('image_token_len', 256) image_dir = build_info.get('image_dir', '') anno_dir = build_info.get('anno_dir', '') portion = float(build_info.get('portion', 1)) data_type = self.config.get('data_type', 'image') vis_processor = self.fetch_processor('vis_processor', processor_dict) dataset = dataset_cls( vis_processor=vis_processor, tokenizer=tokenizer, vis_root=image_dir, ann_root=anno_dir, portion=portion, image_token_len=image_token_len, data_type=data_type, conv_type=conv_type, ) return dataset @registry.register_builder("llava_cc3m") @registry.register_builder("llava_instruct") @registry.register_builder("sqa") class LLaVACc3mBuilder(PlainBuilder): dataset_cls = LLaVADataset @registry.register_builder("llava_seg") class LlaVASegBuilder(PlainBuilder):
""" Copyright 2023 OPPO Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class PlainBuilder(BaseDatasetBuilder): dataset_cls = LLaVADataset def build(self, tokenizer, processor_dict, conv_type='conv_simple'): build_info = self.config.build_info dataset_cls = self.dataset_cls image_token_len = self.config.get('image_token_len', 256) image_dir = build_info.get('image_dir', '') anno_dir = build_info.get('anno_dir', '') portion = float(build_info.get('portion', 1)) data_type = self.config.get('data_type', 'image') vis_processor = self.fetch_processor('vis_processor', processor_dict) dataset = dataset_cls( vis_processor=vis_processor, tokenizer=tokenizer, vis_root=image_dir, ann_root=anno_dir, portion=portion, image_token_len=image_token_len, data_type=data_type, conv_type=conv_type, ) return dataset @registry.register_builder("llava_cc3m") @registry.register_builder("llava_instruct") @registry.register_builder("sqa") class LLaVACc3mBuilder(PlainBuilder): dataset_cls = LLaVADataset @registry.register_builder("llava_seg") class LlaVASegBuilder(PlainBuilder):
dataset_cls = LLaVASegDataset
4
2023-12-21 08:10:23+00:00
4k
shashikg/WhisperS2T
whisper_s2t/data.py
[ { "identifier": "pad_or_trim", "path": "whisper_s2t/audio.py", "snippet": "def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):\n \"\"\"\n Pad or trim the audio array to N_SAMPLES, as expected by the encoder.\n \"\"\"\n \n if torch.is_tensor(array):\n if array.shape[axis] > length:\n array = array.index_select(\n dim=axis, index=torch.arange(length, device=array.device)\n )\n\n if array.shape[axis] < length:\n pad_widths = [(0, 0)] * array.ndim\n pad_widths[axis] = (0, length - array.shape[axis])\n array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])\n else:\n if array.shape[axis] > length:\n array = array.take(indices=range(length), axis=axis)\n\n if array.shape[axis] < length:\n pad_widths = [(0, 0)] * array.ndim\n pad_widths[axis] = (0, length - array.shape[axis])\n array = np.pad(array, pad_widths)\n \n return array" }, { "identifier": "audio_batch_generator", "path": "whisper_s2t/audio.py", "snippet": "def audio_batch_generator(audio_files):\n return THREAD_POOL_AUDIO_LOADER.imap(load_audio, audio_files)" }, { "identifier": "load_audio", "path": "whisper_s2t/audio.py", "snippet": "def load_audio(input_file, sr=16000, return_duration=False):\n \n try:\n with wave.open(input_file, 'rb') as wf:\n if (wf.getframerate() != sr) or (wf.getnchannels() != 1):\n raise Exception(\"Not a 16kHz wav mono channel file!\")\n \n frames = wf.getnframes()\n x = wf.readframes(int(frames))\n except:\n with tempfile.TemporaryDirectory() as tmpdir:\n wav_file = f\"{tmpdir}/tmp.wav\"\n ret_code = os.system(f'ffmpeg -hide_banner -loglevel panic -i {input_file} -threads 1 -acodec pcm_s16le -ac 1 -af aresample=resampler={RESAMPLING_ENGINE} -ar {sr} {wav_file} -y')\n if ret_code != 0: raise RuntimeError(\"ffmpeg failed to resample the input audio file, make sure ffmpeg is compiled properly!\")\n \n with wave.open(wav_file, 'rb') as wf:\n frames = wf.getnframes()\n x = wf.readframes(int(frames))\n \n audio_signal = np.frombuffer(x, np.int16).flatten().astype(np.float32)/32768.0\n audio_duration = len(audio_signal)/sr\n \n if return_duration:\n return audio_signal, audio_duration\n else:\n return audio_signal" } ]
import torch import numpy as np from tqdm import tqdm from .configs import * from .audio import pad_or_trim, audio_batch_generator, load_audio
2,214
if type(audio_files[0]) == str: self.get_audio_signal = self._get_audio_signal_from_file else: self.get_audio_signal = self._get_audio_signal_from_array def _get_audio_signal_from_array(self, item): return self.audio_files[item] def _get_audio_signal_from_file(self, item): return load_audio(self.audio_files[item]) def __len__(self): return len(self.audio_files) def __getitem__(self, item): audio = self.get_audio_signal(item) seq_len = audio.shape[-1] if self.initial_prompts[item]: initial_prompt = " " + self.initial_prompts[item].strip() initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:] else: initial_prompt_tokens = [] prompt = self.tokenizer.sot_sequence(task=self.tasks[item], lang=self.lang_codes[item]) if self.without_timestamps: prompt = prompt + [self.tokenizer.no_timestamps] return audio, prompt, initial_prompt_tokens, seq_len class WhisperDataLoader: def __init__(self, device, tokenizer, speech_segmenter, dta_padding=3.0, without_timestamps=True, max_speech_len=29.0, max_initial_prompt_len=223, merge_chunks=True, use_dynamic_time_axis=False): self.device = device self.tokenizer = tokenizer self.speech_segmenter = speech_segmenter self.dta_padding = int(dta_padding*SAMPLE_RATE) self.without_timestamps = without_timestamps self.max_speech_len = max_speech_len self.max_initial_prompt_len = max_initial_prompt_len self.use_dynamic_time_axis = use_dynamic_time_axis self.merge_chunks = merge_chunks def data_collate_fn(self, batch): if self.use_dynamic_time_axis: max_len = min(max([_[3] for _ in batch]) + self.dta_padding, N_SAMPLES) else: max_len = N_SAMPLES signal_batch = torch.stack([torch.from_numpy(pad_or_trim(_[0], length=max_len)).to(self.device) for _ in batch]) seq_len = torch.tensor([_[3] for _ in batch]).to(self.device) prompt_batch = [] initial_prompt_max_len = max([len(_[2]) for _ in batch]) if initial_prompt_max_len: for _ in batch: prompt_batch.append([self.tokenizer.sot_prev] + (initial_prompt_max_len-len(_[2]))*[self.tokenizer.silent_token] + _[2] + _[1]) else: for _ in batch: prompt_batch.append(_[1]) if len(batch[0]) == 5: seg_metadata = [_[4] for _ in batch] return signal_batch, prompt_batch, seq_len, seg_metadata else: return signal_batch, prompt_batch, seq_len def get_segmented_audio_signal(self, audio_signal, file_id, lang, task, initial_prompt, sr=16000): start_ends, audio_signal = self.speech_segmenter(audio_signal=audio_signal) if initial_prompt: initial_prompt = " " + initial_prompt.strip() initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:] else: initial_prompt_tokens = [] prompt = self.tokenizer.sot_sequence(task=task, lang=lang) if self.without_timestamps: prompt.append(self.tokenizer.no_timestamps) else: prompt.append(self.tokenizer.timestamp_begin) segmented_audio_signal = [] if self.merge_chunks: stitched_speech_segments = stitch_speech_segments(start_ends, max_len=self.max_speech_len) for stitched_seg in stitched_speech_segments: audio = [] for st, et in stitched_seg: audio.append(audio_signal[int(st*sr):int(et*sr)]) audio = np.concatenate(audio) seq_len = audio.shape[-1] seg_metadata = { 'file_id': file_id, 'start_time': stitched_seg[0][0], 'end_time': stitched_seg[-1][1], 'stitched_seg': stitched_seg, 'lang_code': lang } segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, seg_metadata)) else: for st, et in start_ends: audio = audio_signal[int(st*sr):int(et*sr)] seq_len = audio.shape[-1] segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, {'file_id': file_id, 'start_time': st, 'end_time': et})) return segmented_audio_signal def get_data_loader_with_vad(self, audio_files, lang_codes, tasks, initial_prompts, batch_size=16): segmented_audio_signal = [] pbar_update_len = {}
def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None): speech_duration = [end - start for start, end in start_ends] stitched_speech_segments = [] curr_seg = [0] curr_dur = speech_duration[0] idx = 1 while idx < len(start_ends): if curr_dur + speech_duration[idx] > max_len: stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) curr_seg = [idx] curr_dur = speech_duration[idx] else: curr_dur += speech_duration[idx] curr_seg.append(idx) idx += 1 stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) if max_silent_region is None: return stitched_speech_segments stitched_speech_segments_joined = [] for segs in stitched_speech_segments: _segs = [] curr_seg_start_time, curr_seg_end_time = segs[0] for i in range(1, len(segs)): if (segs[i][0] - curr_seg_end_time) >= max_silent_region: _segs.append((curr_seg_start_time, curr_seg_end_time)) curr_seg_start_time = segs[i][0] curr_seg_end_time = segs[i][1] _segs.append((curr_seg_start_time, curr_seg_end_time)) stitched_speech_segments_joined.append(_segs) return stitched_speech_segments_joined class WhisperDataset(torch.utils.data.Dataset): def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len, device="cuda", dta_padding=48000, without_timestamps=True, use_dynamic_time_axis=False): self.audio_files = audio_files self.lang_codes = lang_codes self.tasks = tasks self.initial_prompts = initial_prompts self.tokenizer = tokenizer self.device = device self.dta_padding = dta_padding self.without_timestamps = without_timestamps self.use_dynamic_time_axis = use_dynamic_time_axis self.max_initial_prompt_len = max_initial_prompt_len if type(audio_files[0]) == str: self.get_audio_signal = self._get_audio_signal_from_file else: self.get_audio_signal = self._get_audio_signal_from_array def _get_audio_signal_from_array(self, item): return self.audio_files[item] def _get_audio_signal_from_file(self, item): return load_audio(self.audio_files[item]) def __len__(self): return len(self.audio_files) def __getitem__(self, item): audio = self.get_audio_signal(item) seq_len = audio.shape[-1] if self.initial_prompts[item]: initial_prompt = " " + self.initial_prompts[item].strip() initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:] else: initial_prompt_tokens = [] prompt = self.tokenizer.sot_sequence(task=self.tasks[item], lang=self.lang_codes[item]) if self.without_timestamps: prompt = prompt + [self.tokenizer.no_timestamps] return audio, prompt, initial_prompt_tokens, seq_len class WhisperDataLoader: def __init__(self, device, tokenizer, speech_segmenter, dta_padding=3.0, without_timestamps=True, max_speech_len=29.0, max_initial_prompt_len=223, merge_chunks=True, use_dynamic_time_axis=False): self.device = device self.tokenizer = tokenizer self.speech_segmenter = speech_segmenter self.dta_padding = int(dta_padding*SAMPLE_RATE) self.without_timestamps = without_timestamps self.max_speech_len = max_speech_len self.max_initial_prompt_len = max_initial_prompt_len self.use_dynamic_time_axis = use_dynamic_time_axis self.merge_chunks = merge_chunks def data_collate_fn(self, batch): if self.use_dynamic_time_axis: max_len = min(max([_[3] for _ in batch]) + self.dta_padding, N_SAMPLES) else: max_len = N_SAMPLES signal_batch = torch.stack([torch.from_numpy(pad_or_trim(_[0], length=max_len)).to(self.device) for _ in batch]) seq_len = torch.tensor([_[3] for _ in batch]).to(self.device) prompt_batch = [] initial_prompt_max_len = max([len(_[2]) for _ in batch]) if initial_prompt_max_len: for _ in batch: prompt_batch.append([self.tokenizer.sot_prev] + (initial_prompt_max_len-len(_[2]))*[self.tokenizer.silent_token] + _[2] + _[1]) else: for _ in batch: prompt_batch.append(_[1]) if len(batch[0]) == 5: seg_metadata = [_[4] for _ in batch] return signal_batch, prompt_batch, seq_len, seg_metadata else: return signal_batch, prompt_batch, seq_len def get_segmented_audio_signal(self, audio_signal, file_id, lang, task, initial_prompt, sr=16000): start_ends, audio_signal = self.speech_segmenter(audio_signal=audio_signal) if initial_prompt: initial_prompt = " " + initial_prompt.strip() initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:] else: initial_prompt_tokens = [] prompt = self.tokenizer.sot_sequence(task=task, lang=lang) if self.without_timestamps: prompt.append(self.tokenizer.no_timestamps) else: prompt.append(self.tokenizer.timestamp_begin) segmented_audio_signal = [] if self.merge_chunks: stitched_speech_segments = stitch_speech_segments(start_ends, max_len=self.max_speech_len) for stitched_seg in stitched_speech_segments: audio = [] for st, et in stitched_seg: audio.append(audio_signal[int(st*sr):int(et*sr)]) audio = np.concatenate(audio) seq_len = audio.shape[-1] seg_metadata = { 'file_id': file_id, 'start_time': stitched_seg[0][0], 'end_time': stitched_seg[-1][1], 'stitched_seg': stitched_seg, 'lang_code': lang } segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, seg_metadata)) else: for st, et in start_ends: audio = audio_signal[int(st*sr):int(et*sr)] seq_len = audio.shape[-1] segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, {'file_id': file_id, 'start_time': st, 'end_time': et})) return segmented_audio_signal def get_data_loader_with_vad(self, audio_files, lang_codes, tasks, initial_prompts, batch_size=16): segmented_audio_signal = [] pbar_update_len = {}
for file_id, (audio_signal, lang, task, initial_prompt) in enumerate(zip(audio_batch_generator(audio_files), lang_codes, tasks, initial_prompts)):
1
2023-12-16 18:09:16+00:00
4k
chinhsuanwu/ifusion
ldm/models/diffusion/ddim.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt(\n (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)\n )\n if verbose:\n print(\n f\"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}\"\n )\n print(\n f\"For the chosen value of eta, which is {eta}, \"\n f\"this results in the following sigma_t schedule for ddim sampler {sigmas}\"\n )\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(\n ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True\n):\n if ddim_discr_method == \"uniform\":\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == \"quad\":\n ddim_timesteps = (\n (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2\n ).astype(int)\n else:\n raise NotImplementedError(\n f'There is no ddim discretization method called \"{ddim_discr_method}\"'\n )\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f\"Selected timesteps for ddim sampler: {steps_out}\")\n return steps_out" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "norm_thresholding", "path": "ldm/models/diffusion/sampling_util.py", "snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)" } ]
import torch import numpy as np from tqdm import tqdm from ldm.modules.diffusionmodules.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor, ) from ldm.models.diffusion.sampling_util import ( norm_thresholding, )
3,234
self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas ) sigmas = ( self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas ) # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full( (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device ) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode( self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, ): num_reference_steps = ( self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] ) assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc="Encoding Image"): t = torch.full( (x0.shape[0],), i, device=self.model.device, dtype=torch.long ) if unconditional_guidance_scale == 1.0: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model( torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c)), ), 2, ) noise_pred = e_t_uncond + unconditional_guidance_scale * ( noise_pred - e_t_uncond ) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = ( alphas_next[i].sqrt() * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred ) x_next = xt_weighted + weighted_noise_pred if ( return_intermediates and i % (num_steps // return_intermediates) == 0 and i < num_steps - 1 ): intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) out = {"x_encoded": x_next, "intermediate_steps": inter_steps} if return_intermediates: out.update({"intermediates": intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0) return (
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def to(self, device): """Same as to in torch module Don't really underestand why this isn't a module in the first place""" for k, v in self.__dict__.items(): if isinstance(v, torch.Tensor): new_v = getattr(self, k).to(device) setattr(self, k, new_v) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule( self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True ): self.ddim_timesteps = make_ddim_timesteps( ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose, ) alphas_cumprod = self.model.alphas_cumprod assert ( alphas_cumprod.shape[0] == self.ddpm_num_timesteps ), "alphas have to be defined for each timestep" to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer("betas", to_torch(self.model.betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer( "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) ) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer( "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), ) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose, ) self.register_buffer("ddim_sigmas", ddim_sigmas) self.register_buffer("ddim_alphas", ddim_alphas) self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) ) self.register_buffer( "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps ) @torch.no_grad() def sample( self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, **kwargs, ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print( f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" ) else: if conditioning.shape[0] != batch_size: print( f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}" ) self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DDIM sampling is {size}, eta {eta}') samples, intermediates = self.ddim_sampling( conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) return samples, intermediates @torch.no_grad() def ddim_sampling( self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, t_start=-1, ): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = ( self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps ) elif timesteps is not None and not ddim_use_original_steps: subset_end = ( int( min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0] ) - 1 ) timesteps = self.ddim_timesteps[:subset_end] timesteps = timesteps[:t_start] intermediates = {"x_inter": [img], "pred_x0": [img]} time_range = ( reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps) ) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] # print(f"Running DDIM Sampling with {total_steps} timesteps") # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) # for i, step in enumerate(iterator): for i, step in enumerate(time_range): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample( x0, ts ) # TODO: deterministic forward pass? img = img_orig * mask + (1.0 - mask) * img outs = self.p_sample_ddim( img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) img, pred_x0 = outs if callback: img = callback(i, img, pred_x0) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates["x_inter"].append(img) intermediates["pred_x0"].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim( self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, ): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.0: e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [ torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k])) ] else: c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) else: c_in = torch.cat([unconditional_conditioning, c]) e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) if score_corrector is not None: assert self.model.parameterization == "eps" e_t = score_corrector.modify_score( self.model, e_t, x, t, c, **corrector_kwargs ) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = ( self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev ) sqrt_one_minus_alphas = ( self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas ) sigmas = ( self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas ) # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full( (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device ) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode( self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, ): num_reference_steps = ( self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] ) assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc="Encoding Image"): t = torch.full( (x0.shape[0],), i, device=self.model.device, dtype=torch.long ) if unconditional_guidance_scale == 1.0: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model( torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c)), ), 2, ) noise_pred = e_t_uncond + unconditional_guidance_scale * ( noise_pred - e_t_uncond ) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = ( alphas_next[i].sqrt() * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred ) x_next = xt_weighted + weighted_noise_pred if ( return_intermediates and i % (num_steps // return_intermediates) == 0 and i < num_steps - 1 ): intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) out = {"x_encoded": x_next, "intermediate_steps": inter_steps} if return_intermediates: out.update({"intermediates": intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0) return (
extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0
3
2023-12-17 12:45:38+00:00
4k
wangzhecheng/SkyScript
src/open_clip/openai.py
[ { "identifier": "OPENAI_DATASET_MEAN", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)" }, { "identifier": "OPENAI_DATASET_STD", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)" }, { "identifier": "build_model_from_openai_state_dict", "path": "src/open_clip/model.py", "snippet": "def build_model_from_openai_state_dict(\n model_name: str,\n state_dict: dict,\n quick_gelu=True,\n cast_dtype=torch.float16,\n):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len(\n [k for k in state_dict.keys() if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_size = vision_patch_size * grid_size\n else:\n counts: list = [\n len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"visual.layer{b}\"))) for b in [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_size = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"transformer.resblocks\")))\n\n vision_cfg = CLIPVisionCfg(\n layers=vision_layers,\n width=vision_width,\n patch_size=vision_patch_size,\n image_size=image_size,\n )\n text_cfg = CLIPTextCfg(\n context_length=context_length,\n vocab_size=vocab_size,\n width=transformer_width,\n heads=transformer_heads,\n layers=transformer_layers,\n )\n model = CLIP(\n embed_dim,\n vision_cfg=vision_cfg,\n text_cfg=text_cfg,\n quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU\n cast_dtype=cast_dtype,\n )\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n state_dict.pop(key, None)\n\n convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16\n model.load_state_dict(state_dict)\n return model.eval()" }, { "identifier": "convert_weights_to_lp", "path": "src/open_clip/model.py", "snippet": "def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):\n \"\"\"Convert applicable model parameters to low-precision (bf16 or fp16)\"\"\"\n\n def _convert_weights(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.to(dtype)\n if l.bias is not None:\n l.bias.data = l.bias.data.to(dtype)\n\n if isinstance(l, (nn.MultiheadAttention, Attention)):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.to(dtype)\n\n if isinstance(l, (CLIP, TextTransformer)):\n # convert text nn.Parameter projections\n attr = getattr(l, \"text_projection\", None)\n if attr is not None:\n attr.data = attr.data.to(dtype)\n\n if isinstance(l, VisionTransformer):\n # convert vision nn.Parameter projections\n attr = getattr(l, \"proj\", None)\n if attr is not None:\n attr.data = attr.data.to(dtype)\n\n model.apply(_convert_weights)" }, { "identifier": "get_cast_dtype", "path": "src/open_clip/model.py", "snippet": "def get_cast_dtype(precision: str):\n cast_dtype = None\n if precision == 'bf16':\n cast_dtype = torch.bfloat16\n elif precision == 'fp16':\n cast_dtype = torch.float16\n return cast_dtype" }, { "identifier": "get_pretrained_url", "path": "src/open_clip/pretrained.py", "snippet": "def get_pretrained_url(model: str, tag: str):\n cfg = get_pretrained_cfg(model, _clean_tag(tag))\n return cfg.get('url', '')" }, { "identifier": "list_pretrained_models_by_tag", "path": "src/open_clip/pretrained.py", "snippet": "def list_pretrained_models_by_tag(tag: str):\n \"\"\" return all models having the specified pretrain tag \"\"\"\n models = []\n tag = _clean_tag(tag)\n for k in _PRETRAINED.keys():\n if tag in _PRETRAINED[k]:\n models.append(k)\n return models" }, { "identifier": "download_pretrained_from_url", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained_from_url(\n url: str,\n cache_dir: Union[str, None] = None,\n):\n if not cache_dir:\n cache_dir = os.path.expanduser(\"~/.cache/clip\")\n os.makedirs(cache_dir, exist_ok=True)\n filename = os.path.basename(url)\n\n if 'openaipublic' in url:\n expected_sha256 = url.split(\"/\")[-2]\n elif 'mlfoundations' in url:\n expected_sha256 = os.path.splitext(filename)[0].split(\"-\")[-1]\n else:\n expected_sha256 = ''\n\n download_target = os.path.join(cache_dir, filename)\n\n if os.path.exists(download_target) and not os.path.isfile(download_target):\n raise RuntimeError(f\"{download_target} exists and is not a regular file\")\n\n if os.path.isfile(download_target):\n if expected_sha256:\n if hashlib.sha256(open(download_target, \"rb\").read()).hexdigest().startswith(expected_sha256):\n return download_target\n else:\n warnings.warn(f\"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file\")\n else:\n return download_target\n\n with urllib.request.urlopen(url) as source, open(download_target, \"wb\") as output:\n with tqdm(total=int(source.headers.get(\"Content-Length\")), ncols=80, unit='iB', unit_scale=True) as loop:\n while True:\n buffer = source.read(8192)\n if not buffer:\n break\n\n output.write(buffer)\n loop.update(len(buffer))\n\n if expected_sha256 and not hashlib.sha256(open(download_target, \"rb\").read()).hexdigest().startswith(expected_sha256):\n raise RuntimeError(f\"Model has been downloaded but the SHA256 checksum does not not match\")\n\n return download_target" } ]
import os import warnings import torch from typing import List, Optional, Union from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
2,467
""" OpenAI pretrained model functions Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ __all__ = ["list_openai_models", "load_openai_model"] def list_openai_models() -> List[str]: """Returns the names of available CLIP models""" return list_pretrained_models_by_tag('openai') def load_openai_model( name: str, precision: Optional[str] = None, device: Optional[Union[str, torch.device]] = None, cache_dir: Optional[str] = None, ): """Load a CLIP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict precision: str Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'. device : Union[str, torch.device] The device to put the loaded model cache_dir : Optional[str] The directory to cache the downloaded model weights Returns ------- model : torch.nn.Module The CLIP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input """ if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" if precision is None: precision = 'fp32' if device == 'cpu' else 'fp16' if get_pretrained_url(name, 'openai'): model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir) elif os.path.isfile(name): model_path = name else: raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}") try: # loading JIT archive model = torch.jit.load(model_path, map_location="cpu").eval() state_dict = None except RuntimeError: # loading saved state dict state_dict = torch.load(model_path, map_location="cpu") # Build a non-jit model from the OpenAI jitted model state dict cast_dtype = get_cast_dtype(precision) try: model = build_model_from_openai_state_dict(name, state_dict or model.state_dict(), cast_dtype=cast_dtype) except KeyError: sd = {k[7:]: v for k, v in state_dict["state_dict"].items()} model = build_model_from_openai_state_dict(name, sd, cast_dtype=cast_dtype) # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use model = model.to(device) # FIXME support pure fp16/bf16 precision modes if precision != 'fp16': model.float() if precision == 'bf16': # for bf16, convert back to low-precision convert_weights_to_lp(model, dtype=torch.bfloat16) # add mean / std attributes for consistency with OpenCLIP models model.visual.image_mean = OPENAI_DATASET_MEAN
""" OpenAI pretrained model functions Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ __all__ = ["list_openai_models", "load_openai_model"] def list_openai_models() -> List[str]: """Returns the names of available CLIP models""" return list_pretrained_models_by_tag('openai') def load_openai_model( name: str, precision: Optional[str] = None, device: Optional[Union[str, torch.device]] = None, cache_dir: Optional[str] = None, ): """Load a CLIP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict precision: str Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'. device : Union[str, torch.device] The device to put the loaded model cache_dir : Optional[str] The directory to cache the downloaded model weights Returns ------- model : torch.nn.Module The CLIP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input """ if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" if precision is None: precision = 'fp32' if device == 'cpu' else 'fp16' if get_pretrained_url(name, 'openai'): model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir) elif os.path.isfile(name): model_path = name else: raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}") try: # loading JIT archive model = torch.jit.load(model_path, map_location="cpu").eval() state_dict = None except RuntimeError: # loading saved state dict state_dict = torch.load(model_path, map_location="cpu") # Build a non-jit model from the OpenAI jitted model state dict cast_dtype = get_cast_dtype(precision) try: model = build_model_from_openai_state_dict(name, state_dict or model.state_dict(), cast_dtype=cast_dtype) except KeyError: sd = {k[7:]: v for k, v in state_dict["state_dict"].items()} model = build_model_from_openai_state_dict(name, sd, cast_dtype=cast_dtype) # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use model = model.to(device) # FIXME support pure fp16/bf16 precision modes if precision != 'fp16': model.float() if precision == 'bf16': # for bf16, convert back to low-precision convert_weights_to_lp(model, dtype=torch.bfloat16) # add mean / std attributes for consistency with OpenCLIP models model.visual.image_mean = OPENAI_DATASET_MEAN
model.visual.image_std = OPENAI_DATASET_STD
1
2023-12-19 11:50:56+00:00
4k
JarodMica/ai-voice-cloning
modules/rvc/tools/torchgate/torchgate.py
[ { "identifier": "linspace", "path": "modules/rvc/tools/torchgate/utils.py", "snippet": "@torch.no_grad()\ndef linspace(\n start: Number, stop: Number, num: int = 50, endpoint: bool = True, **kwargs\n) -> torch.Tensor:\n \"\"\"\n Generate a linearly spaced 1-D tensor.\n\n Arguments:\n start {[Number]} -- [The starting value of the sequence.]\n stop {[Number]} -- [The end value of the sequence, unless `endpoint` is set to False.\n In that case, the sequence consists of all but the last of ``num + 1``\n evenly spaced samples, so that `stop` is excluded. Note that the step\n size changes when `endpoint` is False.]\n\n Keyword Arguments:\n num {[int]} -- [Number of samples to generate. Default is 50. Must be non-negative.]\n endpoint {[bool]} -- [If True, `stop` is the last sample. Otherwise, it is not included.\n Default is True.]\n **kwargs -- [Additional arguments to be passed to the underlying PyTorch `linspace` function.]\n\n Returns:\n [torch.Tensor] -- [1-D tensor of `num` equally spaced samples from `start` to `stop`.]\n \"\"\"\n if endpoint:\n return torch.linspace(start, stop, num, **kwargs)\n else:\n return torch.linspace(start, stop, num + 1, **kwargs)[:-1]" }, { "identifier": "temperature_sigmoid", "path": "modules/rvc/tools/torchgate/utils.py", "snippet": "@torch.no_grad()\ndef temperature_sigmoid(x: torch.Tensor, x0: float, temp_coeff: float) -> torch.Tensor:\n \"\"\"\n Apply a sigmoid function with temperature scaling.\n\n Arguments:\n x {[torch.Tensor]} -- [Input tensor.]\n x0 {[float]} -- [Parameter that controls the threshold of the sigmoid.]\n temp_coeff {[float]} -- [Parameter that controls the slope of the sigmoid.]\n\n Returns:\n [torch.Tensor] -- [Output tensor after applying the sigmoid with temperature scaling.]\n \"\"\"\n return torch.sigmoid((x - x0) / temp_coeff)" }, { "identifier": "amp_to_db", "path": "modules/rvc/tools/torchgate/utils.py", "snippet": "@torch.no_grad()\ndef amp_to_db(\n x: torch.Tensor, eps=torch.finfo(torch.float64).eps, top_db=40\n) -> torch.Tensor:\n \"\"\"\n Convert the input tensor from amplitude to decibel scale.\n\n Arguments:\n x {[torch.Tensor]} -- [Input tensor.]\n\n Keyword Arguments:\n eps {[float]} -- [Small value to avoid numerical instability.]\n (default: {torch.finfo(torch.float64).eps})\n top_db {[float]} -- [threshold the output at ``top_db`` below the peak]\n ` (default: {40})\n\n Returns:\n [torch.Tensor] -- [Output tensor in decibel scale.]\n \"\"\"\n x_db = 20 * torch.log10(x.abs() + eps)\n return torch.max(x_db, (x_db.max(-1).values - top_db).unsqueeze(-1))" } ]
import torch from torch.nn.functional import conv1d, conv2d from typing import Union, Optional from .utils import linspace, temperature_sigmoid, amp_to_db
2,577
def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]: """ A PyTorch module that applies a spectral gate to an input signal using the STFT. Returns: smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter, with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency bins to smooth and n_grad_time is the number of time frames to smooth. If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None. """ if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None: return None n_grad_freq = ( 1 if self.freq_mask_smooth_hz is None else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2))) ) if n_grad_freq < 1: raise ValueError( f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz" ) n_grad_time = ( 1 if self.time_mask_smooth_ms is None else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000)) ) if n_grad_time < 1: raise ValueError( f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms" ) if n_grad_time == 1 and n_grad_freq == 1: return None v_f = torch.cat( [ linspace(0, 1, n_grad_freq + 1, endpoint=False), linspace(1, 0, n_grad_freq + 2), ] )[1:-1] v_t = torch.cat( [ linspace(0, 1, n_grad_time + 1, endpoint=False), linspace(1, 0, n_grad_time + 2), ] )[1:-1] smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0) return smoothing_filter / smoothing_filter.sum() @torch.no_grad() def _stationary_mask( self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None ) -> torch.Tensor: """ Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram. Arguments: X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram. xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db. Returns: sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold are set to 1, and the rest are set to 0. """ if xn is not None: XN = torch.stft( xn, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, return_complex=True, pad_mode="constant", center=True, window=torch.hann_window(self.win_length).to(xn.device), ) XN_db = amp_to_db(XN).to(dtype=X_db.dtype) else: XN_db = X_db # calculate mean and standard deviation along the frequency axis std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1) # compute noise threshold noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary # create binary mask by thresholding the spectrogram sig_mask = X_db > noise_thresh.unsqueeze(2) return sig_mask @torch.no_grad() def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor: """ Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram. Arguments: X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram. Returns: sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold are set to 1, and the rest are set to 0. """ X_smoothed = ( conv1d( X_abs.reshape(-1, 1, X_abs.shape[-1]), torch.ones( self.n_movemean_nonstationary, dtype=X_abs.dtype, device=X_abs.device, ).view(1, 1, -1), padding="same", ).view(X_abs.shape) / self.n_movemean_nonstationary ) # Compute slowness ratio and apply temperature sigmoid slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6)
class TorchGate(torch.nn.Module): """ A PyTorch module that applies a spectral gate to an input signal. Arguments: sr {int} -- Sample rate of the input signal. nonstationary {bool} -- Whether to use non-stationary or stationary masking (default: {False}). n_std_thresh_stationary {float} -- Number of standard deviations above mean to threshold noise for stationary masking (default: {1.5}). n_thresh_nonstationary {float} -- Number of multiplies above smoothed magnitude spectrogram. for non-stationary masking (default: {1.3}). temp_coeff_nonstationary {float} -- Temperature coefficient for non-stationary masking (default: {0.1}). n_movemean_nonstationary {int} -- Number of samples for moving average smoothing in non-stationary masking (default: {20}). prop_decrease {float} -- Proportion to decrease signal by where the mask is zero (default: {1.0}). n_fft {int} -- Size of FFT for STFT (default: {1024}). win_length {[int]} -- Window length for STFT. If None, defaults to `n_fft` (default: {None}). hop_length {[int]} -- Hop length for STFT. If None, defaults to `win_length` // 4 (default: {None}). freq_mask_smooth_hz {float} -- Frequency smoothing width for mask (in Hz). If None, no smoothing is applied (default: {500}). time_mask_smooth_ms {float} -- Time smoothing width for mask (in ms). If None, no smoothing is applied (default: {50}). """ @torch.no_grad() def __init__( self, sr: int, nonstationary: bool = False, n_std_thresh_stationary: float = 1.5, n_thresh_nonstationary: float = 1.3, temp_coeff_nonstationary: float = 0.1, n_movemean_nonstationary: int = 20, prop_decrease: float = 1.0, n_fft: int = 1024, win_length: bool = None, hop_length: int = None, freq_mask_smooth_hz: float = 500, time_mask_smooth_ms: float = 50, ): super().__init__() # General Params self.sr = sr self.nonstationary = nonstationary assert 0.0 <= prop_decrease <= 1.0 self.prop_decrease = prop_decrease # STFT Params self.n_fft = n_fft self.win_length = self.n_fft if win_length is None else win_length self.hop_length = self.win_length // 4 if hop_length is None else hop_length # Stationary Params self.n_std_thresh_stationary = n_std_thresh_stationary # Non-Stationary Params self.temp_coeff_nonstationary = temp_coeff_nonstationary self.n_movemean_nonstationary = n_movemean_nonstationary self.n_thresh_nonstationary = n_thresh_nonstationary # Smooth Mask Params self.freq_mask_smooth_hz = freq_mask_smooth_hz self.time_mask_smooth_ms = time_mask_smooth_ms self.register_buffer("smoothing_filter", self._generate_mask_smoothing_filter()) @torch.no_grad() def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]: """ A PyTorch module that applies a spectral gate to an input signal using the STFT. Returns: smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter, with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency bins to smooth and n_grad_time is the number of time frames to smooth. If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None. """ if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None: return None n_grad_freq = ( 1 if self.freq_mask_smooth_hz is None else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2))) ) if n_grad_freq < 1: raise ValueError( f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz" ) n_grad_time = ( 1 if self.time_mask_smooth_ms is None else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000)) ) if n_grad_time < 1: raise ValueError( f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms" ) if n_grad_time == 1 and n_grad_freq == 1: return None v_f = torch.cat( [ linspace(0, 1, n_grad_freq + 1, endpoint=False), linspace(1, 0, n_grad_freq + 2), ] )[1:-1] v_t = torch.cat( [ linspace(0, 1, n_grad_time + 1, endpoint=False), linspace(1, 0, n_grad_time + 2), ] )[1:-1] smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0) return smoothing_filter / smoothing_filter.sum() @torch.no_grad() def _stationary_mask( self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None ) -> torch.Tensor: """ Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram. Arguments: X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram. xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db. Returns: sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold are set to 1, and the rest are set to 0. """ if xn is not None: XN = torch.stft( xn, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, return_complex=True, pad_mode="constant", center=True, window=torch.hann_window(self.win_length).to(xn.device), ) XN_db = amp_to_db(XN).to(dtype=X_db.dtype) else: XN_db = X_db # calculate mean and standard deviation along the frequency axis std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1) # compute noise threshold noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary # create binary mask by thresholding the spectrogram sig_mask = X_db > noise_thresh.unsqueeze(2) return sig_mask @torch.no_grad() def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor: """ Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram. Arguments: X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram. Returns: sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold are set to 1, and the rest are set to 0. """ X_smoothed = ( conv1d( X_abs.reshape(-1, 1, X_abs.shape[-1]), torch.ones( self.n_movemean_nonstationary, dtype=X_abs.dtype, device=X_abs.device, ).view(1, 1, -1), padding="same", ).view(X_abs.shape) / self.n_movemean_nonstationary ) # Compute slowness ratio and apply temperature sigmoid slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6)
sig_mask = temperature_sigmoid(
1
2023-12-18 00:10:23+00:00
4k
Lavreniuk/EVP
refer/models_refer/model.py
[ { "identifier": "UNetWrapper", "path": "evp/models.py", "snippet": "class UNetWrapper(nn.Module):\n def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None:\n super().__init__()\n self.unet = unet\n self.attention_store = AttentionStore(base_size=base_size // 8, max_size=max_attn_size)\n self.size16 = base_size // 32\n self.size32 = base_size // 16\n self.size64 = base_size // 8\n self.use_attn = use_attn\n if self.use_attn:\n register_attention_control(unet, self.attention_store)\n register_hier_output(unet)\n self.attn_selector = attn_selector.split('+')\n\n def forward(self, *args, **kwargs):\n if self.use_attn:\n self.attention_store.reset()\n out_list = self.unet(*args, **kwargs)\n if self.use_attn:\n avg_attn = self.attention_store.get_average_attention()\n attn16, attn32, attn64 = self.process_attn(avg_attn)\n out_list[1] = torch.cat([out_list[1], attn16], dim=1)\n out_list[2] = torch.cat([out_list[2], attn32], dim=1)\n if attn64 is not None:\n out_list[3] = torch.cat([out_list[3], attn64], dim=1)\n return out_list[::-1]\n\n def process_attn(self, avg_attn):\n attns = {self.size16: [], self.size32: [], self.size64: []}\n for k in self.attn_selector:\n for up_attn in avg_attn[k]:\n size = int(math.sqrt(up_attn.shape[1]))\n attns[size].append(rearrange(up_attn, 'b (h w) c -> b c h w', h=size))\n attn16 = torch.stack(attns[self.size16]).mean(0)\n attn32 = torch.stack(attns[self.size32]).mean(0)\n if len(attns[self.size64]) > 0:\n attn64 = torch.stack(attns[self.size64]).mean(0)\n else:\n attn64 = None\n return attn16, attn32, attn64" }, { "identifier": "TextAdapterRefer", "path": "evp/models.py", "snippet": "class TextAdapterRefer(nn.Module):\n def __init__(self, text_dim=768):\n super().__init__()\n \n self.fc = nn.Sequential(\n nn.Linear(text_dim, text_dim),\n nn.GELU(),\n nn.Linear(text_dim, text_dim)\n )\n\n def forward(self, latents, texts, gamma):\n texts_after = self.fc(texts)\n texts = texts + gamma * texts_after\n return texts" } ]
import torch import torch.nn as nn import torch.nn.functional as F import os import sys from ldm.util import instantiate_from_config from transformers.models.clip.modeling_clip import CLIPTextModel from omegaconf import OmegaConf from lib.mask_predictor import SimpleDecoding from evp.models import UNetWrapper, TextAdapterRefer
3,071
# ReLU Activation self.relu = nn.ReLU() self.upscale = PixelShuffle(in_channels, 2) def forward(self, x): # Apply spatial attention spatial_attention = self.spatial_attention(x) x = x * spatial_attention # Apply channel attention channel_attention = self.channel_attention(x) x = x * channel_attention # Apply convolutional layers x = self.conv1(x) x = self.group_norm(x) x = self.relu(x) x = self.conv2(x) x = self.group_norm(x) x = self.relu(x) # Upsample x = self.upscale(x) return x class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels): super(ConvLayer, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels, out_channels, 1), nn.GroupNorm(20, out_channels), nn.ReLU(), ) def forward(self, x): x = self.conv1(x) return x class InverseMultiAttentiveFeatureRefinement(nn.Module): def __init__(self, in_channels_list): super(InverseMultiAttentiveFeatureRefinement, self).__init__() self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0]) self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2) self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1]) self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2) self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2]) self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2) self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3]) ''' self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3]) self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2]) self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2]) self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1]) self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1]) self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0]) ''' def forward(self, inputs): x_c4, x_c3, x_c2, x_c1 = inputs x_c4 = self.layer1(x_c4) x_c4_3 = self.layer2(x_c4) x_c3 = torch.cat([x_c4_3, x_c3], dim=1) x_c3 = self.layer3(x_c3) x_c3_2 = self.layer4(x_c3) x_c2 = torch.cat([x_c3_2, x_c2], dim=1) x_c2 = self.layer5(x_c2) x_c2_1 = self.layer6(x_c2) x_c1 = torch.cat([x_c2_1, x_c1], dim=1) x_c1 = self.layer7(x_c1) ''' x_c1_2 = self.layer8(x_c1) x_c2 = torch.cat([x_c1_2, x_c2], dim=1) x_c2 = self.layer9(x_c2) x_c2_3 = self.layer10(x_c2) x_c3 = torch.cat([x_c2_3, x_c3], dim=1) x_c3 = self.layer11(x_c3) x_c3_4 = self.layer12(x_c3) x_c4 = torch.cat([x_c3_4, x_c4], dim=1) x_c4 = self.layer13(x_c4) ''' return [x_c4, x_c3, x_c2, x_c1] class EVPRefer(nn.Module): """Encoder Decoder segmentors. EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. Note that auxiliary_head is only used for deep supervision during training, which could be dumped during inference. """ def __init__(self, sd_path=None, base_size=512, token_embed_dim=768, neck_dim=[320,680,1320,1280], **args): super().__init__() config = OmegaConf.load('./v1-inference.yaml') if os.path.exists(f'{sd_path}'): config.model.params.ckpt_path = f'{sd_path}' else: config.model.params.ckpt_path = None sd_model = instantiate_from_config(config.model) self.encoder_vq = sd_model.first_stage_model self.unet = UNetWrapper(sd_model.model, base_size=base_size) del sd_model.cond_stage_model del self.encoder_vq.decoder for param in self.encoder_vq.parameters(): param.requires_grad = True
def icnr(x, scale=2, init=nn.init.kaiming_normal_): """ Checkerboard artifact free sub-pixel convolution https://arxiv.org/abs/1707.02937 """ ni,nf,h,w = x.shape ni2 = int(ni/(scale**2)) k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1) k = k.contiguous().view(ni2, nf, -1) k = k.repeat(1, 1, scale**2) k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1) x.data.copy_(k) class PixelShuffle(nn.Module): """ Real-Time Single Image and Video Super-Resolution https://arxiv.org/abs/1609.05158 """ def __init__(self, n_channels, scale): super(PixelShuffle, self).__init__() self.conv = nn.Conv2d(n_channels, n_channels*(scale**2), kernel_size=1) icnr(self.conv.weight) self.shuf = nn.PixelShuffle(scale) self.relu = nn.ReLU() def forward(self,x): x = self.shuf(self.relu(self.conv(x))) return x class AttentionModule(nn.Module): def __init__(self, in_channels, out_channels): super(AttentionModule, self).__init__() # Convolutional Layers self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) # Group Normalization self.group_norm = nn.GroupNorm(20, out_channels) # ReLU Activation self.relu = nn.ReLU() # Spatial Attention self.spatial_attention = nn.Sequential( nn.Conv2d(in_channels, 1, kernel_size=1), nn.Sigmoid() ) def forward(self, x): # Apply spatial attention spatial_attention = self.spatial_attention(x) x = x * spatial_attention # Apply convolutional layer x = self.conv1(x) x = self.group_norm(x) x = self.relu(x) return x class AttentionDownsamplingModule(nn.Module): def __init__(self, in_channels, out_channels, scale_factor=2): super(AttentionDownsamplingModule, self).__init__() # Spatial Attention self.spatial_attention = nn.Sequential( nn.Conv2d(in_channels, 1, kernel_size=1), nn.Sigmoid() ) # Channel Attention self.channel_attention = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, in_channels // 8, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(in_channels // 8, in_channels, kernel_size=1), nn.Sigmoid() ) # Convolutional Layers if scale_factor == 2: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) elif scale_factor == 4: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1) # Group Normalization self.group_norm = nn.GroupNorm(20, out_channels) # ReLU Activation self.relu = nn.ReLU(inplace=True) def forward(self, x): # Apply spatial attention spatial_attention = self.spatial_attention(x) x = x * spatial_attention # Apply channel attention channel_attention = self.channel_attention(x) x = x * channel_attention # Apply convolutional layers x = self.conv1(x) x = self.group_norm(x) x = self.relu(x) x = self.conv2(x) x = self.group_norm(x) x = self.relu(x) return x class AttentionUpsamplingModule(nn.Module): def __init__(self, in_channels, out_channels): super(AttentionUpsamplingModule, self).__init__() # Spatial Attention for outs[2] self.spatial_attention = nn.Sequential( nn.Conv2d(in_channels, 1, kernel_size=1), nn.Sigmoid() ) # Channel Attention for outs[2] self.channel_attention = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, in_channels // 8, kernel_size=1), nn.ReLU(), nn.Conv2d(in_channels // 8, in_channels, kernel_size=1), nn.Sigmoid() ) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) # Group Normalization self.group_norm = nn.GroupNorm(20, out_channels) # ReLU Activation self.relu = nn.ReLU() self.upscale = PixelShuffle(in_channels, 2) def forward(self, x): # Apply spatial attention spatial_attention = self.spatial_attention(x) x = x * spatial_attention # Apply channel attention channel_attention = self.channel_attention(x) x = x * channel_attention # Apply convolutional layers x = self.conv1(x) x = self.group_norm(x) x = self.relu(x) x = self.conv2(x) x = self.group_norm(x) x = self.relu(x) # Upsample x = self.upscale(x) return x class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels): super(ConvLayer, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels, out_channels, 1), nn.GroupNorm(20, out_channels), nn.ReLU(), ) def forward(self, x): x = self.conv1(x) return x class InverseMultiAttentiveFeatureRefinement(nn.Module): def __init__(self, in_channels_list): super(InverseMultiAttentiveFeatureRefinement, self).__init__() self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0]) self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2) self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1]) self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2) self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2]) self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2) self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3]) ''' self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3]) self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2]) self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2]) self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1]) self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1]) self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0]) ''' def forward(self, inputs): x_c4, x_c3, x_c2, x_c1 = inputs x_c4 = self.layer1(x_c4) x_c4_3 = self.layer2(x_c4) x_c3 = torch.cat([x_c4_3, x_c3], dim=1) x_c3 = self.layer3(x_c3) x_c3_2 = self.layer4(x_c3) x_c2 = torch.cat([x_c3_2, x_c2], dim=1) x_c2 = self.layer5(x_c2) x_c2_1 = self.layer6(x_c2) x_c1 = torch.cat([x_c2_1, x_c1], dim=1) x_c1 = self.layer7(x_c1) ''' x_c1_2 = self.layer8(x_c1) x_c2 = torch.cat([x_c1_2, x_c2], dim=1) x_c2 = self.layer9(x_c2) x_c2_3 = self.layer10(x_c2) x_c3 = torch.cat([x_c2_3, x_c3], dim=1) x_c3 = self.layer11(x_c3) x_c3_4 = self.layer12(x_c3) x_c4 = torch.cat([x_c3_4, x_c4], dim=1) x_c4 = self.layer13(x_c4) ''' return [x_c4, x_c3, x_c2, x_c1] class EVPRefer(nn.Module): """Encoder Decoder segmentors. EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. Note that auxiliary_head is only used for deep supervision during training, which could be dumped during inference. """ def __init__(self, sd_path=None, base_size=512, token_embed_dim=768, neck_dim=[320,680,1320,1280], **args): super().__init__() config = OmegaConf.load('./v1-inference.yaml') if os.path.exists(f'{sd_path}'): config.model.params.ckpt_path = f'{sd_path}' else: config.model.params.ckpt_path = None sd_model = instantiate_from_config(config.model) self.encoder_vq = sd_model.first_stage_model self.unet = UNetWrapper(sd_model.model, base_size=base_size) del sd_model.cond_stage_model del self.encoder_vq.decoder for param in self.encoder_vq.parameters(): param.requires_grad = True
self.text_adapter = TextAdapterRefer(text_dim=token_embed_dim)
1
2023-12-15 14:13:59+00:00
4k
penghao-wu/vstar
LLaVA/llava/model/llava_arch.py
[ { "identifier": "build_vision_tower", "path": "LLaVA/llava/model/multimodal_encoder/builder.py", "snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')" }, { "identifier": "build_vision_projector", "path": "LLaVA/llava/model/multimodal_projector/builder.py", "snippet": "def build_vision_projector(config, object_projector=False, delay_load=False, **kwargs):\n\tif not object_projector:\n\t\tprojector_type = getattr(config, 'mm_projector_type', 'linear')\n\telse:\n\t\tprojector_type = getattr(config, 'object_mm_projector_type', 'perceiver')\n\t\n\tif projector_type == 'linear':\n\t\treturn nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n\tmlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n\tif mlp_gelu_match:\n\t\tmlp_depth = int(mlp_gelu_match.group(1))\n\t\tmodules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n\t\tfor _ in range(1, mlp_depth):\n\t\t\tmodules.append(nn.GELU())\n\t\t\tmodules.append(nn.Linear(config.hidden_size, config.hidden_size))\n\t\treturn nn.Sequential(*modules)\n\n\tif projector_type == 'identity':\n\t\treturn IdentityMap()\n\n\tif projector_type == \"perceiver\":\n\t\treturn nn.Sequential(\n\t\t\t\t\tnn.LayerNorm(config.mm_hidden_size),\n\t\t\t\t\tPerceiverResampler(\n\t\t\t\t\tdim = config.mm_hidden_size,\n\t\t\t\t\tdim_head = 96,\n\t\t\t\t\tdepth = 6,\n\t\t\t\t\theads = 16,\n\t\t\t\t\tnum_latents = 32,\n\t\t\t\t\tnum_media_embeds = 1\n\t\t\t\t\t),\n\t\t\t\t\tnn.Linear(\n\t\t\t\t\tconfig.mm_hidden_size, config.hidden_size\n\t\t\t\t\t)\n\t\t\t\t\t)\n\n\traise ValueError(f'Unknown projector type: {projector_type}')" }, { "identifier": "IGNORE_INDEX", "path": "LLaVA/llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "LLaVA/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_PATCH_TOKEN", "path": "LLaVA/llava/constants.py", "snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "LLaVA/llava/constants.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "LLaVA/llava/constants.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" } ]
from abc import ABC, abstractmethod from LLaVA.llava.model.multimodal_encoder.builder import build_vision_tower from LLaVA.llava.model.multimodal_projector.builder import build_vision_projector from ..constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN import torch
1,774
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LlavaMetaModel: def __init__(self, config): super(LlavaMetaModel, self).__init__(config) if hasattr(config, "mm_vision_tower"): self.vision_tower = build_vision_tower(config, delay_load=True) self.mm_projector = build_vision_projector(config) def get_vision_tower(self): vision_tower = getattr(self, 'vision_tower', None) if type(vision_tower) is list: vision_tower = vision_tower[0] return vision_tower def initialize_vision_modules(self, model_args, fsdp=None): vision_tower = model_args.vision_tower mm_vision_select_layer = model_args.mm_vision_select_layer mm_vision_select_feature = model_args.mm_vision_select_feature pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter self.config.mm_vision_tower = vision_tower if self.get_vision_tower() is None: vision_tower = build_vision_tower(model_args) if fsdp is not None and len(fsdp) > 0: self.vision_tower = [vision_tower] else: self.vision_tower = vision_tower else: if fsdp is not None and len(fsdp) > 0: vision_tower = self.vision_tower[0] else: vision_tower = self.vision_tower vision_tower.load_model() self.config.use_mm_proj = True self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') self.config.mm_hidden_size = vision_tower.hidden_size self.config.mm_vision_select_layer = mm_vision_select_layer self.config.mm_vision_select_feature = mm_vision_select_feature if getattr(self, 'mm_projector', None) is None: self.mm_projector = build_vision_projector(self.config) if pretrain_mm_mlp_adapter is not None: mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') def get_w(weights, keyword): return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) class LlavaMetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_images(self, images): image_features = self.get_model().get_vision_tower()(images) image_features = self.get_model().mm_projector(image_features) return image_features def prepare_inputs_labels_for_multimodal( self, input_ids, attention_mask, past_key_values, labels, images ): vision_tower = self.get_vision_tower() if vision_tower is None or images is None or input_ids.shape[1] == 1: if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) return input_ids, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids):
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LlavaMetaModel: def __init__(self, config): super(LlavaMetaModel, self).__init__(config) if hasattr(config, "mm_vision_tower"): self.vision_tower = build_vision_tower(config, delay_load=True) self.mm_projector = build_vision_projector(config) def get_vision_tower(self): vision_tower = getattr(self, 'vision_tower', None) if type(vision_tower) is list: vision_tower = vision_tower[0] return vision_tower def initialize_vision_modules(self, model_args, fsdp=None): vision_tower = model_args.vision_tower mm_vision_select_layer = model_args.mm_vision_select_layer mm_vision_select_feature = model_args.mm_vision_select_feature pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter self.config.mm_vision_tower = vision_tower if self.get_vision_tower() is None: vision_tower = build_vision_tower(model_args) if fsdp is not None and len(fsdp) > 0: self.vision_tower = [vision_tower] else: self.vision_tower = vision_tower else: if fsdp is not None and len(fsdp) > 0: vision_tower = self.vision_tower[0] else: vision_tower = self.vision_tower vision_tower.load_model() self.config.use_mm_proj = True self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') self.config.mm_hidden_size = vision_tower.hidden_size self.config.mm_vision_select_layer = mm_vision_select_layer self.config.mm_vision_select_feature = mm_vision_select_feature if getattr(self, 'mm_projector', None) is None: self.mm_projector = build_vision_projector(self.config) if pretrain_mm_mlp_adapter is not None: mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') def get_w(weights, keyword): return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) class LlavaMetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_images(self, images): image_features = self.get_model().get_vision_tower()(images) image_features = self.get_model().mm_projector(image_features) return image_features def prepare_inputs_labels_for_multimodal( self, input_ids, attention_mask, past_key_values, labels, images ): vision_tower = self.get_vision_tower() if vision_tower is None or images is None or input_ids.shape[1] == 1: if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) return input_ids, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
3
2023-12-15 14:58:24+00:00
4k
ValdonVitija/crap
crap/cli.py
[ { "identifier": "CrapManager", "path": "crap/crap_manager.py", "snippet": "class CrapManager:\n __slots__ = (\"path_\", \"venv_checker\", \"package_usage_counter\", \"deleted_packages\")\n\n def __init__(self, path_: str):\n self.path_ = pathlib.Path(path_).absolute()\n self.venv_checker = VirtualEnvChecker()\n self.package_usage_counter = PackageUsageCounter()\n self.deleted_packages = set()\n \n def run(self):\n if not self.path_.exists():\n raise FileNotFoundError(\"File/Dir not found\")\n\n total_steps = 4 \n bar_width = 100 \n bar_color = 'red' \n\n with tqdm(total=total_steps, ncols=bar_width, colour=bar_color) as pbar:\n self._process_path()\n pbar.update(1) \n\n initial_packages = get_current_packages()\n pbar.update(1) \n\n self._cleanup_packages()\n pbar.update(1) \n\n freeze_into_requirements()\n reinstall_from_requirements()\n pbar.update(1) \n\n final_packages = get_current_packages()\n self.deleted_packages = initial_packages - final_packages\n\n print()\n self.print_deleted_packages()\n\n def _process_path(self):\n if self.path_.is_file():\n self._analyze_file(self.path_)\n elif self.path_.is_dir():\n self._analyze_directory()\n\n def _analyze_file(self, file_path):\n pre_cleanup_with_ruff(file_path)\n analyzer = PythonFileAnalyzer(file_path)\n analyzer.analyze()\n for package in get_current_packages():\n if package in analyzer.imported_modules:\n self.package_usage_counter.increment_package_count(package)\n\n def _analyze_directory(self):\n \"\"\"\n Analyzes the directory and its subdirectories, excluding certain directories,\n and analyzes each Python file found.\n\n This method walks through the directory tree using os.walk() function,\n excluding directories that are likely to be virtual environments or specified\n in the excluded_dirs list. For each file with a \".py\" extension, it calls\n the _analyze_file() method to perform analysis.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n excluded_dirs = self._get_excluded_dirs()\n\n for dirpath, dirnames, filenames in os.walk(self.path_):\n dirnames[:] = [\n d\n for d in dirnames\n if not self.venv_checker.is_likely_venv(os.path.join(dirpath, d))\n and d not in excluded_dirs\n ]\n\n for filename in filenames:\n if filename.endswith(\".py\"):\n file_path = pathlib.Path(dirpath) / filename\n self._analyze_file(file_path)\n\n def _cleanup_packages(self):\n important_packages = self._get_important_packages()\n unused_packages = self.package_usage_counter.get_unused_packages(\n important_packages\n )\n\n for package in unused_packages:\n uninstall_package(package)\n self.deleted_packages.add(package)\n\n @staticmethod\n def _get_important_packages() -> Set[str]:\n \"\"\"\n Retrieves the important packages from the important_packages.txt file.\n These packages cannot be removed, because they are most likely dev tools that do not\n get imported in the code.\n\n Returns:\n A set of strings representing the important packages.\n \"\"\"\n with open(\n f\"{pathlib.Path(__file__).parent}/data/important_packages.txt\",\n \"r\",\n encoding=\"utf-8\",\n ) as file:\n return {line.strip() for line in file}\n\n def print_deleted_packages(self):\n print(\"🗑️📦Deleted packages:\")\n for package in self.deleted_packages:\n print(f\" - {package}\")\n\n @staticmethod\n def _get_excluded_dirs() -> Set[str]:\n \"\"\"\n These directories are excluded from the analysis.\n \"\"\"\n return {\"__pycache__\", \".git\"}" }, { "identifier": "PackageManagement", "path": "crap/package_management.py", "snippet": "class PackageManagement:\n def __init__(self, filename=\"important_packages.txt\"):\n self.filename = filename\n self.filepath = pathlib.Path(__file__).parent / \"data\" / self.filename\n\n def add_important_package(self, package: str):\n \"\"\"\n Add a package to the list of important packages.\n\n Args:\n package (str): The name of the package to add.\n filename (str, optional): The name of the file to store the list of important packages.\n Defaults to \"important_packages.txt\".\n \"\"\"\n with open(self.filepath, \"r+\", encoding=\"utf-8\") as file:\n existing_packages = {line.strip() for line in file}\n if package not in existing_packages:\n file.write(package + \"\\n\")\n print(f\"✅ Added '{package}' to important packages\")\n else:\n print(f\"'{package}' is already listed as an important package\")\n\n def show_important_packages(self) -> List[str]:\n \"\"\"\n Display the list of important packages.\n\n Args:\n filename (str): The name of the file containing the list of important packages.\n Default is \"important_packages.txt\".\n\n Returns:\n List[str]: The list of important packages.\n\n \"\"\"\n with open(self.filepath, \"r\", encoding=\"utf-8\") as file:\n important_packages = [line.strip() for line in file]\n print(\"📦 Important packages:\")\n for package in important_packages:\n print(f\" - {package}\")\n\n def remove_important_package(self, package: str):\n \"\"\"\n Removes a package from the list of important packages.\n\n Args:\n package (str): The name of the package to be removed.\n filename (str, optional): The name of the file containing the list of important packages.\n Defaults to \"important_packages.txt\".\n \"\"\"\n with open(self.filepath, \"r+\", encoding=\"utf-8\") as file:\n lines = file.readlines()\n file.seek(0)\n file.truncate(0)\n for line in lines:\n if line.strip() != package:\n file.write(line)\n\n if package in (line.strip() for line in lines):\n print(f\"❌ Removed '{package}' from important packages\")\n else:\n print(f\"'{package}' was not found in important packages\")\n\n def flush_important_packages(self):\n \"\"\"\n Flushes the important packages by removing the contents of the specified file.\n\n Args:\n filename (str, optional): The name of the file to flush. Defaults to \"important_packages.txt\".\n \"\"\"\n with open(self.filepath, \"w\", encoding=\"utf-8\"):\n pass\n print(\"All important packages have been removed.\")\n\n def factory_reset_important_packages(self):\n \"\"\"\n Reset the list of important packages to the default packages\n \"\"\"\n DEFAULT_PACKAGES_FILE = \"default_important_packages.txt\"\n default_packages_path = (\n pathlib.Path(__file__).parent / \"data\" / DEFAULT_PACKAGES_FILE\n )\n\n try:\n shutil.copyfile(default_packages_path, self.filepath)\n print(\"🔄 Reset important packages to default\")\n except FileNotFoundError:\n print(f\"Default packages file '{DEFAULT_PACKAGES_FILE}' not found\")" } ]
from functools import lru_cache from typing import List from typing_extensions import Annotated, Optional from crap.crap_manager import CrapManager from crap.package_management import PackageManagement import typer
2,138
__all__: List[str] = ["get_app"] app = typer.Typer(no_args_is_help=True) @app.command() def crap( path_: Annotated[str, typer.Argument(help="path to file/files")] = ".", important: Optional[str] = typer.Option( None, "--important", "-i", help="Add a package to the list of important packages", ), remove: Optional[str] = typer.Option( None, "--remove", "-r", help="Remove a package from the list of important packages", ), flush: bool = typer.Option( False, "--flush", "-f", help="Remove all packages from the list of important packages", ), show: bool = typer.Option( False, "--show", "-s", help="Show all important packages", ), factory_reset: bool = typer.Option( False, "--factory-reset", "-fr", help="Reset all settings to default", ), ): if ( sum( [ bool(opt) for opt in [path_ != ".", important, remove, flush, show, factory_reset] ] ) > 1 ): print("Error: Options cannot be used together.") raise typer.Exit(code=1) package_management = PackageManagement() if important: package_management.add_important_package(important) elif remove: package_management.remove_important_package(remove) elif flush: package_management.flush_important_packages() elif show: package_management.show_important_packages() elif factory_reset: package_management.factory_reset_important_packages() else:
__all__: List[str] = ["get_app"] app = typer.Typer(no_args_is_help=True) @app.command() def crap( path_: Annotated[str, typer.Argument(help="path to file/files")] = ".", important: Optional[str] = typer.Option( None, "--important", "-i", help="Add a package to the list of important packages", ), remove: Optional[str] = typer.Option( None, "--remove", "-r", help="Remove a package from the list of important packages", ), flush: bool = typer.Option( False, "--flush", "-f", help="Remove all packages from the list of important packages", ), show: bool = typer.Option( False, "--show", "-s", help="Show all important packages", ), factory_reset: bool = typer.Option( False, "--factory-reset", "-fr", help="Reset all settings to default", ), ): if ( sum( [ bool(opt) for opt in [path_ != ".", important, remove, flush, show, factory_reset] ] ) > 1 ): print("Error: Options cannot be used together.") raise typer.Exit(code=1) package_management = PackageManagement() if important: package_management.add_important_package(important) elif remove: package_management.remove_important_package(remove) elif flush: package_management.flush_important_packages() elif show: package_management.show_important_packages() elif factory_reset: package_management.factory_reset_important_packages() else:
manager = CrapManager(path_=path_)
0
2023-12-19 20:22:37+00:00
4k
worm128/AI-YinMei
text-generation-webui/extensions/openai/script.py
[ { "identifier": "ChatCompletionRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams):\n pass" }, { "identifier": "ChatCompletionResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class ChatCompletionResponse(BaseModel):\n id: str\n choices: List[dict]\n created: int = int(time.time())\n model: str\n object: str = \"chat.completion\"\n usage: dict" }, { "identifier": "CompletionRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class CompletionRequest(GenerationOptions, CompletionRequestParams):\n pass" }, { "identifier": "CompletionResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class CompletionResponse(BaseModel):\n id: str\n choices: List[dict]\n created: int = int(time.time())\n model: str\n object: str = \"text_completion\"\n usage: dict" }, { "identifier": "DecodeRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class DecodeRequest(BaseModel):\n tokens: List[int]" }, { "identifier": "DecodeResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class DecodeResponse(BaseModel):\n text: str" }, { "identifier": "EmbeddingsRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class EmbeddingsRequest(BaseModel):\n input: str | List[str]\n model: str | None = Field(default=None, description=\"Unused parameter. To change the model, set the OPENEDAI_EMBEDDING_MODEL and OPENEDAI_EMBEDDING_DEVICE environment variables before starting the server.\")\n encoding_format: str = Field(default=\"float\", description=\"Can be float or base64.\")\n user: str | None = Field(default=None, description=\"Unused parameter.\")" }, { "identifier": "EmbeddingsResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class EmbeddingsResponse(BaseModel):\n index: int\n embedding: List[float]\n object: str = \"embedding\"" }, { "identifier": "EncodeRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class EncodeRequest(BaseModel):\n text: str" }, { "identifier": "EncodeResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class EncodeResponse(BaseModel):\n tokens: List[int]\n length: int" }, { "identifier": "LoadLorasRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class LoadLorasRequest(BaseModel):\n lora_names: List[str]" }, { "identifier": "LoadModelRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class LoadModelRequest(BaseModel):\n model_name: str\n args: dict | None = None\n settings: dict | None = None" }, { "identifier": "LogitsRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class LogitsRequest(GenerationOptions, LogitsRequestParams):\n pass" }, { "identifier": "LogitsResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class LogitsResponse(BaseModel):\n logits: dict" }, { "identifier": "LoraListResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class LoraListResponse(BaseModel):\n lora_names: List[str]" }, { "identifier": "ModelInfoResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class ModelInfoResponse(BaseModel):\n model_name: str\n lora_names: List[str]" }, { "identifier": "ModelListResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class ModelListResponse(BaseModel):\n model_names: List[str]" }, { "identifier": "TokenCountResponse", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class TokenCountResponse(BaseModel):\n length: int" }, { "identifier": "to_dict", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "def to_dict(obj):\n return obj.__dict__" } ]
import asyncio import json import os import traceback import speech_recognition as sr import uvicorn import extensions.openai.completions as OAIcompletions import extensions.openai.embeddings as OAIembeddings import extensions.openai.images as OAIimages import extensions.openai.logits as OAIlogits import extensions.openai.models as OAImodels import extensions.openai.moderations as OAImoderations from threading import Thread from fastapi import Depends, FastAPI, Header, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.requests import Request from fastapi.responses import JSONResponse from pydub import AudioSegment from sse_starlette import EventSourceResponse from extensions.openai.errors import ServiceUnavailableError from extensions.openai.tokens import token_count, token_decode, token_encode from extensions.openai.utils import _start_cloudflared from modules import shared from modules.logging_colors import logger from modules.models import unload_model from modules.text_generation import stop_everything_event from .typing import ( ChatCompletionRequest, ChatCompletionResponse, CompletionRequest, CompletionResponse, DecodeRequest, DecodeResponse, EmbeddingsRequest, EmbeddingsResponse, EncodeRequest, EncodeResponse, LoadLorasRequest, LoadModelRequest, LogitsRequest, LogitsResponse, LoraListResponse, ModelInfoResponse, ModelListResponse, TokenCountResponse, to_dict )
2,821
@app.get('/v1/billing/usage', dependencies=check_key) def handle_billing_usage(): ''' Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31 ''' return JSONResponse(content={"total_usage": 0}) @app.post('/v1/audio/transcriptions', dependencies=check_key) async def handle_audio_transcription(request: Request): r = sr.Recognizer() form = await request.form() audio_file = await form["file"].read() audio_data = AudioSegment.from_file(audio_file) # Convert AudioSegment to raw data raw_data = audio_data.raw_data # Create AudioData object audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width) whipser_language = form.getvalue('language', None) whipser_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny transcription = {"text": ""} try: transcription["text"] = r.recognize_whisper(audio_data, language=whipser_language, model=whipser_model) except sr.UnknownValueError: print("Whisper could not understand audio") transcription["text"] = "Whisper could not understand audio UnknownValueError" except sr.RequestError as e: print("Could not request results from Whisper", e) transcription["text"] = "Whisper could not understand audio RequestError" return JSONResponse(content=transcription) @app.post('/v1/images/generations', dependencies=check_key) async def handle_image_generation(request: Request): if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')): raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.") body = await request.json() prompt = body['prompt'] size = body.get('size', '1024x1024') response_format = body.get('response_format', 'url') # or b64_json n = body.get('n', 1) # ignore the batch limits of max 10 response = await OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n) return JSONResponse(response) @app.post("/v1/embeddings", response_model=EmbeddingsResponse, dependencies=check_key) async def handle_embeddings(request: Request, request_data: EmbeddingsRequest): input = request_data.input if not input: raise HTTPException(status_code=400, detail="Missing required argument input") if type(input) is str: input = [input] response = OAIembeddings.embeddings(input, request_data.encoding_format) return JSONResponse(response) @app.post("/v1/moderations", dependencies=check_key) async def handle_moderations(request: Request): body = await request.json() input = body["input"] if not input: raise HTTPException(status_code=400, detail="Missing required argument input") response = OAImoderations.moderations(input) return JSONResponse(response) @app.post("/v1/internal/encode", response_model=EncodeResponse, dependencies=check_key) async def handle_token_encode(request_data: EncodeRequest): response = token_encode(request_data.text) return JSONResponse(response) @app.post("/v1/internal/decode", response_model=DecodeResponse, dependencies=check_key) async def handle_token_decode(request_data: DecodeRequest): response = token_decode(request_data.tokens) return JSONResponse(response) @app.post("/v1/internal/token-count", response_model=TokenCountResponse, dependencies=check_key) async def handle_token_count(request_data: EncodeRequest): response = token_count(request_data.text) return JSONResponse(response) @app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key) async def handle_logits(request_data: LogitsRequest): ''' Given a prompt, returns the top 50 most likely logits as a dict. The keys are the tokens, and the values are the probabilities. ''' response = OAIlogits._get_next_logits(to_dict(request_data)) return JSONResponse(response) @app.post("/v1/internal/stop-generation", dependencies=check_key) async def handle_stop_generation(request: Request): stop_everything_event() return JSONResponse(content="OK") @app.get("/v1/internal/model/info", response_model=ModelInfoResponse, dependencies=check_key) async def handle_model_info(): payload = OAImodels.get_current_model_info() return JSONResponse(content=payload)
params = { 'embedding_device': 'cpu', 'embedding_model': 'sentence-transformers/all-mpnet-base-v2', 'sd_webui_url': '', 'debug': 0 } streaming_semaphore = asyncio.Semaphore(1) def verify_api_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.api_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") def verify_admin_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.admin_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") app = FastAPI() check_key = [Depends(verify_api_key)] check_admin_key = [Depends(verify_admin_key)] # Configure CORS settings to allow all origins, methods, and headers app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.options("/", dependencies=check_key) async def options_route(): return JSONResponse(content="OK") @app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key) async def openai_completions(request: Request, request_data: CompletionRequest): path = request.url.path is_legacy = "/generate" in path if request_data.stream: async def generator(): async with streaming_semaphore: response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy) for resp in response: disconnected = await request.is_disconnected() if disconnected: break yield {"data": json.dumps(resp)} return EventSourceResponse(generator()) # SSE streaming else: response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy) return JSONResponse(response) @app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key) async def openai_chat_completions(request: Request, request_data: ChatCompletionRequest): path = request.url.path is_legacy = "/generate" in path if request_data.stream: async def generator(): async with streaming_semaphore: response = OAIcompletions.stream_chat_completions(to_dict(request_data), is_legacy=is_legacy) for resp in response: disconnected = await request.is_disconnected() if disconnected: break yield {"data": json.dumps(resp)} return EventSourceResponse(generator()) # SSE streaming else: response = OAIcompletions.chat_completions(to_dict(request_data), is_legacy=is_legacy) return JSONResponse(response) @app.get("/v1/models", dependencies=check_key) @app.get("/v1/models/{model}", dependencies=check_key) async def handle_models(request: Request): path = request.url.path is_list = request.url.path.split('?')[0].split('#')[0] == '/v1/models' if is_list: response = OAImodels.list_dummy_models() else: model_name = path[len('/v1/models/'):] response = OAImodels.model_info_dict(model_name) return JSONResponse(response) @app.get('/v1/billing/usage', dependencies=check_key) def handle_billing_usage(): ''' Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31 ''' return JSONResponse(content={"total_usage": 0}) @app.post('/v1/audio/transcriptions', dependencies=check_key) async def handle_audio_transcription(request: Request): r = sr.Recognizer() form = await request.form() audio_file = await form["file"].read() audio_data = AudioSegment.from_file(audio_file) # Convert AudioSegment to raw data raw_data = audio_data.raw_data # Create AudioData object audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width) whipser_language = form.getvalue('language', None) whipser_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny transcription = {"text": ""} try: transcription["text"] = r.recognize_whisper(audio_data, language=whipser_language, model=whipser_model) except sr.UnknownValueError: print("Whisper could not understand audio") transcription["text"] = "Whisper could not understand audio UnknownValueError" except sr.RequestError as e: print("Could not request results from Whisper", e) transcription["text"] = "Whisper could not understand audio RequestError" return JSONResponse(content=transcription) @app.post('/v1/images/generations', dependencies=check_key) async def handle_image_generation(request: Request): if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')): raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.") body = await request.json() prompt = body['prompt'] size = body.get('size', '1024x1024') response_format = body.get('response_format', 'url') # or b64_json n = body.get('n', 1) # ignore the batch limits of max 10 response = await OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n) return JSONResponse(response) @app.post("/v1/embeddings", response_model=EmbeddingsResponse, dependencies=check_key) async def handle_embeddings(request: Request, request_data: EmbeddingsRequest): input = request_data.input if not input: raise HTTPException(status_code=400, detail="Missing required argument input") if type(input) is str: input = [input] response = OAIembeddings.embeddings(input, request_data.encoding_format) return JSONResponse(response) @app.post("/v1/moderations", dependencies=check_key) async def handle_moderations(request: Request): body = await request.json() input = body["input"] if not input: raise HTTPException(status_code=400, detail="Missing required argument input") response = OAImoderations.moderations(input) return JSONResponse(response) @app.post("/v1/internal/encode", response_model=EncodeResponse, dependencies=check_key) async def handle_token_encode(request_data: EncodeRequest): response = token_encode(request_data.text) return JSONResponse(response) @app.post("/v1/internal/decode", response_model=DecodeResponse, dependencies=check_key) async def handle_token_decode(request_data: DecodeRequest): response = token_decode(request_data.tokens) return JSONResponse(response) @app.post("/v1/internal/token-count", response_model=TokenCountResponse, dependencies=check_key) async def handle_token_count(request_data: EncodeRequest): response = token_count(request_data.text) return JSONResponse(response) @app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key) async def handle_logits(request_data: LogitsRequest): ''' Given a prompt, returns the top 50 most likely logits as a dict. The keys are the tokens, and the values are the probabilities. ''' response = OAIlogits._get_next_logits(to_dict(request_data)) return JSONResponse(response) @app.post("/v1/internal/stop-generation", dependencies=check_key) async def handle_stop_generation(request: Request): stop_everything_event() return JSONResponse(content="OK") @app.get("/v1/internal/model/info", response_model=ModelInfoResponse, dependencies=check_key) async def handle_model_info(): payload = OAImodels.get_current_model_info() return JSONResponse(content=payload)
@app.get("/v1/internal/model/list", response_model=ModelListResponse, dependencies=check_admin_key)
16
2023-12-20 14:13:38+00:00
4k
foocker/Bert-VITS2-Faster
modules.py
[ { "identifier": "init_weights", "path": "commons.py", "snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)" }, { "identifier": "get_padding", "path": "commons.py", "snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)" }, { "identifier": "piecewise_rational_quadratic_transform", "path": "transforms.py", "snippet": "def piecewise_rational_quadratic_transform(\n inputs,\n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None,\n tail_bound=1.0,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE,\n):\n\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\"tails\": tails, \"tail_bound\": tail_bound}\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet" }, { "identifier": "Encoder", "path": "attentions.py", "snippet": "class Encoder(nn.Module):\n def __init__(\n self,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size=1,\n p_dropout=0.0,\n window_size=4,\n isflow=True,\n **kwargs\n ):\n super().__init__()\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n \n self.window_size = window_size\n\n self.cond_layer_idx = self.n_layers\n \n self.gin_channels = kwargs[\"gin_channels\"] \n self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)\n self.cond_layer_idx = (\n kwargs[\"cond_layer_idx\"] if \"cond_layer_idx\" in kwargs else 2\n )\n \n self.drop = nn.Dropout(p_dropout)\n self.attn_layers = nn.ModuleList()\n self.norm_layers_1 = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n self.norm_layers_2 = nn.ModuleList()\n for i in range(self.n_layers):\n self.attn_layers.append(\n MultiHeadAttention(\n hidden_channels,\n hidden_channels,\n n_heads,\n p_dropout=p_dropout,\n window_size=window_size,\n )\n )\n self.norm_layers_1.append(LayerNorm(hidden_channels))\n self.ffn_layers.append(\n FFN(\n hidden_channels,\n hidden_channels,\n filter_channels,\n kernel_size,\n p_dropout=p_dropout,\n )\n )\n self.norm_layers_2.append(LayerNorm(hidden_channels))\n\n def forward(self, x, x_mask, g=None):\n attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)\n x = x * x_mask\n for i in range(self.n_layers):\n if i == self.cond_layer_idx and g is not None:\n g = self.spk_emb_linear(g.transpose(1, 2))\n g = g.transpose(1, 2)\n x = x + g\n x = x * x_mask\n y = self.attn_layers[i](x, x, attn_mask)\n y = self.drop(y)\n x = self.norm_layers_1[i](x + y)\n\n y = self.ffn_layers[i](x, x_mask)\n y = self.drop(y)\n x = self.norm_layers_2[i](x + y)\n x = x * x_mask\n return x" } ]
import math import torch import commons from torch import nn from torch.nn import functional as F from torch.nn import Conv1d from torch.nn.utils import weight_norm, remove_weight_norm from commons import init_weights, get_padding from transforms import piecewise_rational_quadratic_transform from attentions import Encoder
2,653
) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) def forward(self, x, x_mask, g=None): if g is not None: x = x + g for i in range(self.n_layers): y = self.convs_sep[i](x * x_mask) y = self.norms_1[i](y) y = F.gelu(y) y = self.convs_1x1[i](y) y = self.norms_2[i](y) y = F.gelu(y) y = self.drop(y) x = x + y return x * x_mask class WN(torch.nn.Module): def __init__( self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, ): super(WN, self).__init__() assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.p_dropout = p_dropout self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.drop = nn.Dropout(p_dropout) if gin_channels != 0: cond_layer = torch.nn.Conv1d( gin_channels, 2 * hidden_channels * n_layers, 1 ) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i padding = int((kernel_size * dilation - dilation) / 2) in_layer = torch.nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding, ) in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2 * hidden_channels else: res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward(self, x, x_mask, g=None, **kwargs): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) for i in range(self.n_layers): x_in = self.in_layers[i](x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in) acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask output = output + res_skip_acts[:, self.hidden_channels :, :] else: output = output + res_skip_acts return output * x_mask def remove_weight_norm(self): if self.gin_channels != 0: torch.nn.utils.remove_weight_norm(self.cond_layer) for l in self.in_layers: torch.nn.utils.remove_weight_norm(l) for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.convs1 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0],
LRELU_SLOPE = 0.1 class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): x = x.transpose(1, -1) x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) return x.transpose(1, -1) class ConvReluNorm(nn.Module): def __init__( self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout, ): super().__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = p_dropout assert n_layers > 1, "Number of layers should be larger than 0." self.conv_layers = nn.ModuleList() self.norm_layers = nn.ModuleList() self.conv_layers.append( nn.Conv1d( in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 ) ) self.norm_layers.append(LayerNorm(hidden_channels)) self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) for _ in range(n_layers - 1): self.conv_layers.append( nn.Conv1d( hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2, ) ) self.norm_layers.append(LayerNorm(hidden_channels)) self.proj = nn.Conv1d(hidden_channels, out_channels, 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() def forward(self, x, x_mask): x_org = x for i in range(self.n_layers): x = self.conv_layers[i](x * x_mask) x = self.norm_layers[i](x) x = self.relu_drop(x) x = x_org + self.proj(x) return x * x_mask class DDSConv(nn.Module): """ Dialted and Depth-Separable Convolution """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): super().__init__() self.channels = channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = p_dropout self.drop = nn.Dropout(p_dropout) self.convs_sep = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.norms_1 = nn.ModuleList() self.norms_2 = nn.ModuleList() for i in range(n_layers): dilation = kernel_size**i padding = (kernel_size * dilation - dilation) // 2 self.convs_sep.append( nn.Conv1d( channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding, ) ) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) def forward(self, x, x_mask, g=None): if g is not None: x = x + g for i in range(self.n_layers): y = self.convs_sep[i](x * x_mask) y = self.norms_1[i](y) y = F.gelu(y) y = self.convs_1x1[i](y) y = self.norms_2[i](y) y = F.gelu(y) y = self.drop(y) x = x + y return x * x_mask class WN(torch.nn.Module): def __init__( self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, ): super(WN, self).__init__() assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.p_dropout = p_dropout self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.drop = nn.Dropout(p_dropout) if gin_channels != 0: cond_layer = torch.nn.Conv1d( gin_channels, 2 * hidden_channels * n_layers, 1 ) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i padding = int((kernel_size * dilation - dilation) / 2) in_layer = torch.nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding, ) in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2 * hidden_channels else: res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward(self, x, x_mask, g=None, **kwargs): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) for i in range(self.n_layers): x_in = self.in_layers[i](x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in) acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask output = output + res_skip_acts[:, self.hidden_channels :, :] else: output = output + res_skip_acts return output * x_mask def remove_weight_norm(self): if self.gin_channels != 0: torch.nn.utils.remove_weight_norm(self.cond_layer) for l in self.in_layers: torch.nn.utils.remove_weight_norm(l) for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.convs1 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
1
2023-12-18 09:53:41+00:00
4k
sinoyou/nelf-pro
nerfstudio/configs/base_config.py
[ { "identifier": "to_immutable_dict", "path": "nerfstudio/configs/config_utils.py", "snippet": "def to_immutable_dict(d: Dict[str, Any]):\n \"\"\"Method to convert mutable dict to default factory dict\n\n Args:\n d: dictionary to convert into default factory dict for dataclass\n \"\"\"\n return field(default_factory=lambda: dict(d))" }, { "identifier": "writer", "path": "nerfstudio/utils/writer.py", "snippet": "CONSOLE = Console(width=120)\nEVENT_WRITERS = []\nEVENT_STORAGE = []\nGLOBAL_BUFFER = {}\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n IMAGE = \"write_image\"\n PLOTLY = \"write_plotly\"\n SCALAR = \"write_scalar\"\n DICT = \"write_scalar_dict\"\n CONFIG = \"write_config\"\nclass EventName(enum.Enum):\nclass EventType(enum.Enum):\nclass Writer:\nclass TimeWriter:\nclass WandbWriter(Writer):\nclass TensorboardWriter(Writer):\nclass LocalWriter:\ndef put_image(name, image: TensorType[\"H\", \"W\", \"C\"], step: int):\ndef put_plotly(name: str, figure: Any, step: int = 0):\ndef put_scalar(name: str, scalar: Any, step: int):\ndef put_dict(name: str, scalar_dict: Dict[str, Any], step: int):\ndef put_config(name: str, config_dict: Dict[str, Any], step: int):\ndef put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):\ndef write_out_storage():\ndef setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:\ndef setup_event_writer(config: cfg.Config, log_dir: Path) -> None:\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], step: int) -> None:\n def __init__(self, writer, name, step=None, write=True):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, log_dir: Path, experiment_name: str):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def __init__(self, log_dir: Path):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument\ndef _cursorup(x: int):\ndef _format_time(seconds):\n def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):\n def write_stats_log(self, step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def _consolidate_events(self):\n def _update_header(self, latest_map, new_key):\n def _print_stats(self, latest_map, padding=\" \"):" }, { "identifier": "OptimizerConfig", "path": "nerfstudio/engine/optimizers.py", "snippet": "class OptimizerConfig(base_config.PrintableConfig):\n \"\"\"Basic optimizer config with RAdam\"\"\"\n\n _target: Type = AdamTensorMask\n lr: float = 0.0005\n eps: float = 1e-08\n gradmask: bool = False\n\n # TODO: somehow make this more generic. i dont like the idea of overriding the setup function\n # but also not sure how to go about passing things into predefined torch objects.\n def setup(self, params) -> Any:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n kwargs = vars(self).copy()\n kwargs.pop(\"_target\")\n return self._target(params, **kwargs)" }, { "identifier": "SchedulerConfig", "path": "nerfstudio/engine/schedulers.py", "snippet": "class SchedulerConfig(InstantiateConfig):\n \"\"\"Basic scheduler config with self-defined exponential decay schedule\"\"\"\n\n _target: Type = field(default_factory=lambda: ExponentialDecaySchedule)\n lr_final: float = 0.000005\n max_steps: int = 1000000\n\n # TODO: somehow make this more generic. i dont like the idea of overriding the setup function\n # but also not sure how to go about passing things into predefined torch objects.\n def setup(self, optimizer=None, lr_init=None, **kwargs) -> Any:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n return self._target(optimizer, lr_init, self.lr_final, self.max_steps)" }, { "identifier": "VanillaPipelineConfig", "path": "nerfstudio/pipelines/base_pipeline.py", "snippet": "class VanillaPipelineConfig(cfg.InstantiateConfig):\n \"\"\"Configuration for pipeline instantiation\"\"\"\n\n _target: Type = field(default_factory=lambda: VanillaPipeline)\n \"\"\"target class to instantiate\"\"\"\n datamanager: VanillaDataManagerConfig = VanillaDataManagerConfig()\n \"\"\"specifies the datamanager config\"\"\"\n model: ModelConfig = ModelConfig()\n \"\"\"specifies the model config\"\"\"" } ]
import warnings import yaml from dataclasses import dataclass from datetime import datetime from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Type from rich.console import Console from typing_extensions import Literal from nerfstudio.configs.config_utils import to_immutable_dict from nerfstudio.utils import writer from nerfstudio.engine.optimizers import OptimizerConfig from nerfstudio.engine.schedulers import SchedulerConfig from nerfstudio.pipelines.base_pipeline import VanillaPipelineConfig
3,250
"""maximum number of rows to print before wrapping. if 0, will print everything.""" def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any: """Instantiate local writer Args: banner_messages: List of strings that always print at the bottom of screen. """ return self._target(self, banner_messages=banner_messages, **kwargs) @dataclass class LoggingConfig(PrintableConfig): """Configuration of loggers and profilers""" relative_log_dir: Path = Path("./") """relative path to save all logged events""" steps_per_log: int = 10 """number of steps between logging stats""" max_buffer_size: int = 20 """maximum history size to keep for computing running averages of stats. e.g. if 20, averages will be computed over past 20 occurances.""" local_writer: LocalWriterConfig = LocalWriterConfig(enable=True) """if provided, will print stats locally. if None, will disable printing""" enable_profiler: bool = True """whether to enable profiling code; prints speed of functions at the end of a program. profiler logs run times of functions and prints at end of training""" # Trainer related configs @dataclass class TrainerConfig(PrintableConfig): """Configuration for training regimen""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 500 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" relative_model_dir: Path = Path("models/") """Relative path to save all checkpoints.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Optionally specify model config to load from; if none, will use the default config?""" load_scheduler: bool = True """Whether to load the lr scheduler state_dict if exists""" visualize_scene: bool = False """Whether to visualize the scene by plotly on the wandb.""" visualize_seperate_eval_images: bool = False """Whether to visualize the eval images seperately. (cloud storage is huge)""" # Viewer related configs @dataclass class ViewerConfig(PrintableConfig): """Configuration for viewer instantiation""" relative_log_filename: str = "viewer_log_filename.txt" """Filename to use for the log file.""" start_train: bool = False """whether to immediately start training upon loading viewer if False, will just visualize dataset but you can toggle training in viewer""" zmq_port: Optional[int] = None """The zmq port to connect to for communication. If None, find an available port.""" launch_bridge_server: bool = True """whether or not to launch the bridge server""" websocket_port: Optional[int] = 7007 """the default websocket port to connect to""" ip_address: str = "127.0.0.1" """the ip address where the bridge server is running""" num_rays_per_chunk: int = 32768 """number of rays per chunk to render with viewer""" max_num_display_images: int = 512 """Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are actually used in training/evaluation. If -1, display all.""" quit_on_train_completion: bool = False """Whether to kill the training job when it has completed. Note this will stop rendering in the viewer.""" @dataclass class Config(PrintableConfig): """Full config contents""" output_dir: Path = Path("outputs") """relative or absolute output directory to save all checkpoints and logging""" method_name: Optional[str] = None """Method name. Required to set in python or via cli""" experiment_name: Optional[str] = None """Experiment name. If None, will automatically be set to dataset name""" timestamp: str = "{timestamp}" """Experiment timestamp.""" machine: MachineConfig = MachineConfig() """Machine configuration""" logging: LoggingConfig = LoggingConfig() """Logging configuration""" viewer: ViewerConfig = ViewerConfig() """Viewer configuration""" trainer: TrainerConfig = TrainerConfig() """Trainer configuration""" pipeline: VanillaPipelineConfig = VanillaPipelineConfig() """Pipeline configuration""" optimizers: Dict[str, Any] = to_immutable_dict( { "fields": { "optimizer": OptimizerConfig(),
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base Configs""" # pylint: disable=wrong-import-position from __future__ import annotations # model instances warnings.filterwarnings("ignore", module="torchvision") CONSOLE = Console(width=120) # Pretty printing class class PrintableConfig: # pylint: disable=too-few-public-methods """Printable Config defining str function""" def __str__(self): lines = [self.__class__.__name__ + ":"] for key, val in vars(self).items(): if isinstance(val, Tuple): flattened_val = "[" for item in val: flattened_val += str(item) + "\n" flattened_val = flattened_val.rstrip("\n") val = flattened_val + "]" lines += f"{key}: {str(val)}".split("\n") return "\n ".join(lines) # Base instantiate configs @dataclass class InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods """Config class for instantiating an the class specified in the _target attribute.""" _target: Type def setup(self, **kwargs) -> Any: """Returns the instantiated object using the config.""" return self._target(self, **kwargs) # Machine related configs @dataclass class MachineConfig(PrintableConfig): """Configuration of machine setup""" seed: int = 42 """random seed initilization""" @dataclass class LocalWriterConfig(InstantiateConfig): """Local Writer config""" _target: Type = writer.LocalWriter """target class to instantiate""" enable: bool = False """if True enables local logging, else disables""" stats_to_track: Tuple[writer.EventName, ...] = ( writer.EventName.ITER_TRAIN_TIME, writer.EventName.TRAIN_RAYS_PER_SEC, writer.EventName.CURR_TEST_PSNR, writer.EventName.VIS_RAYS_PER_SEC, writer.EventName.TEST_RAYS_PER_SEC, ) """specifies which stats will be logged/printed to terminal""" max_log_size: int = 10 """maximum number of rows to print before wrapping. if 0, will print everything.""" def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any: """Instantiate local writer Args: banner_messages: List of strings that always print at the bottom of screen. """ return self._target(self, banner_messages=banner_messages, **kwargs) @dataclass class LoggingConfig(PrintableConfig): """Configuration of loggers and profilers""" relative_log_dir: Path = Path("./") """relative path to save all logged events""" steps_per_log: int = 10 """number of steps between logging stats""" max_buffer_size: int = 20 """maximum history size to keep for computing running averages of stats. e.g. if 20, averages will be computed over past 20 occurances.""" local_writer: LocalWriterConfig = LocalWriterConfig(enable=True) """if provided, will print stats locally. if None, will disable printing""" enable_profiler: bool = True """whether to enable profiling code; prints speed of functions at the end of a program. profiler logs run times of functions and prints at end of training""" # Trainer related configs @dataclass class TrainerConfig(PrintableConfig): """Configuration for training regimen""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 500 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" relative_model_dir: Path = Path("models/") """Relative path to save all checkpoints.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Optionally specify model config to load from; if none, will use the default config?""" load_scheduler: bool = True """Whether to load the lr scheduler state_dict if exists""" visualize_scene: bool = False """Whether to visualize the scene by plotly on the wandb.""" visualize_seperate_eval_images: bool = False """Whether to visualize the eval images seperately. (cloud storage is huge)""" # Viewer related configs @dataclass class ViewerConfig(PrintableConfig): """Configuration for viewer instantiation""" relative_log_filename: str = "viewer_log_filename.txt" """Filename to use for the log file.""" start_train: bool = False """whether to immediately start training upon loading viewer if False, will just visualize dataset but you can toggle training in viewer""" zmq_port: Optional[int] = None """The zmq port to connect to for communication. If None, find an available port.""" launch_bridge_server: bool = True """whether or not to launch the bridge server""" websocket_port: Optional[int] = 7007 """the default websocket port to connect to""" ip_address: str = "127.0.0.1" """the ip address where the bridge server is running""" num_rays_per_chunk: int = 32768 """number of rays per chunk to render with viewer""" max_num_display_images: int = 512 """Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are actually used in training/evaluation. If -1, display all.""" quit_on_train_completion: bool = False """Whether to kill the training job when it has completed. Note this will stop rendering in the viewer.""" @dataclass class Config(PrintableConfig): """Full config contents""" output_dir: Path = Path("outputs") """relative or absolute output directory to save all checkpoints and logging""" method_name: Optional[str] = None """Method name. Required to set in python or via cli""" experiment_name: Optional[str] = None """Experiment name. If None, will automatically be set to dataset name""" timestamp: str = "{timestamp}" """Experiment timestamp.""" machine: MachineConfig = MachineConfig() """Machine configuration""" logging: LoggingConfig = LoggingConfig() """Logging configuration""" viewer: ViewerConfig = ViewerConfig() """Viewer configuration""" trainer: TrainerConfig = TrainerConfig() """Trainer configuration""" pipeline: VanillaPipelineConfig = VanillaPipelineConfig() """Pipeline configuration""" optimizers: Dict[str, Any] = to_immutable_dict( { "fields": { "optimizer": OptimizerConfig(),
"scheduler": SchedulerConfig(),
3
2023-12-15 20:07:22+00:00
4k
wuc9521/rep-flow
app.py
[ { "identifier": "read_keywords_from_file", "path": "utils/loader.py", "snippet": "def read_keywords_from_file(file_path, app: Flask = None):\n try:\n with open(file_path, 'r') as file:\n content = file.read()\n keywords_list = [keyword.strip() for keyword in re.split(',|\\n', content) if keyword.strip()]\n app.logger.info(f\"Keywords loaded: {keywords_list}\")\n return keywords_list\n\n except FileNotFoundError:\n app.logger.info(f\"Error: File '{file_path}' not found.\")\n return []" }, { "identifier": "HELP", "path": "utils/hints.py", "snippet": "HELP = get_HELP_HINT()" }, { "identifier": "get_NUMBER_EMBD_HINT", "path": "utils/hints.py", "snippet": "def get_NUMBER_EMBD_HINT(id):\n return f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>launching...</span></li> \n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>launched...</span></li> \n </ul>\n \"\"\"" }, { "identifier": "get_CURRENT_STATE_HINT", "path": "utils/hints.py", "snippet": "def get_CURRENT_STATE_HINT(id):\n return \\\n f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>ongoing...</span></li> \n </ul>\n \"\"\" if int(id) >= 0 else \\\n f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>No test launched</span></li> \n </ul>\n \"\"\"" }, { "identifier": "get_NEXT_STEP_HINT", "path": "utils/hints.py", "snippet": "def get_NEXT_STEP_HINT(id):\n return \\\n f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>ongoing...</span></li> \n </ul>\n \"\"\"" }, { "identifier": "extract_and_validate_test_number", "path": "utils/test.py", "snippet": "def extract_and_validate_test_number(query_text, app):\n \"\"\"\n refer to: https://regex101.com/r/x609CD/1\n \"\"\"\n match = re.match(r'\\/?test (\\d+)$', query_text)\n app.logger.info(f\"query_text: {query_text}\")\n if match:\n test_number = match.group(1)\n if test_number.isdigit():\n return test_number\n return None" }, { "identifier": "log_", "path": "utils/log.py", "snippet": "def log_(logger, level, message):\n cf = inspect.currentframe()\n caller_frame = cf.f_back\n caller_info = inspect.getframeinfo(caller_frame)\n log_message = f\"{caller_info.filename}:{caller_info.lineno} - {message}\"\n if level == 'info':\n logger.info(log_message)\n elif level == 'error':\n logger.error(log_message)\n elif level == 'warning':\n logger.warning(log_message)\n elif level == 'debug':\n logger.debug(log_message)\n else:\n raise ValueError(f\"Unsupported log level: {level}\")" }, { "identifier": "get_i", "path": "utils/file.py", "snippet": "def get_i(id, i):\n LIST_DIR = os.path.join(os.path.dirname(__file__), '../data/list')\n i = int(i)\n try:\n with open(os.path.join(LIST_DIR, str(id)+'.json'),'r') as f:\n data = json.load(f)\n if 0 <= i < len(data):\n return data[i]['guidance']+'.png', i==len(data)-1\n else:\n return f\"Index {i} is out of range.\"\n except Exception as e:\n return str(e)" }, { "identifier": "imgs", "path": "model/common.py", "snippet": "TEST_DIR = []\nDATA_DIR = os.path.join(os.path.dirname(__file__), '../data')\nLIST_DIR = os.path.join(DATA_DIR, 'list')" }, { "identifier": "image_process", "path": "model/process.py", "snippet": "def image_process(image_user_path, image_list, app=None):\n \"\"\"\n img_user_path: absolute path of user image\n img_list: list of guidance img\n \"\"\"\n print(TEST_DIR)\n image_user = io.imread(image_user_path)\n max_score = 0\n max_similar = 0\n for i in range(len(image_list)):\n if app: app.logger.info(f\"Calculating Similarity: image {i}\")\n score = classify_hist_with_split(image_user, image_list[i])\n if score > max_score:\n max_score = score\n max_similar = i\n if max_score < 0.7:\n return None\n\n return max_similar, max_score" } ]
import os import spacy import logging import pandas as pd from logging.handlers import RotatingFileHandler from flask import Flask, render_template, request, jsonify, send_from_directory from flask_cors import cross_origin from utils.loader import read_keywords_from_file from utils.hints import HELP, get_NUMBER_EMBD_HINT, get_CURRENT_STATE_HINT, get_NEXT_STEP_HINT from utils.test import extract_and_validate_test_number from utils.log import log_ from utils.file import get_i from model.common import imgs from model.process import image_process
2,125
DEFAULT_RESPONSE_FLAG = "*" NUMBER_EMBD_HINT = None CURRENT_BUG_ID = -1 # Load spaCy English model nlp = spacy.load("en_core_web_sm") app = Flask(__name__, template_folder='') # Configure LOG_DIR = os.path.join(app.root_path, 'log') DATA_DIR = os.path.join(app.root_path, 'data') MODEL_DIR = os.path.join(app.root_path, 'model') CORPUS_DIR = os.path.join(DATA_DIR, 'corpus') GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance') STATE_DIR = os.path.join(DATA_DIR, 'state') std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv')) df = pd.merge( pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')), std, on='ID', how='left' ) qa = dict(zip(df['Q'], df['A'])) at = dict(zip(std['A'], std['TYPE'])) ta = dict(zip(std['TYPE'], std['A'])) key_words = read_keywords_from_file( os.path.join(CORPUS_DIR, 'kw.txt'), app=app) if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) log_file_path = os.path.join(LOG_DIR, f"app.log") formatter = logging.Formatter( "[%(asctime)s] [%(levelname)s] [%(module)s] - %(message)s") handler = RotatingFileHandler(log_file_path, maxBytes=10000, backupCount=1) handler.setFormatter(formatter) app.logger.addHandler(handler) app.logger.setLevel(logging.INFO) @app.route('/') def home(): return render_template('index.html'), 200 @app.route('/states/<filename>') def serve_image(filename): return send_from_directory(STATE_DIR, filename), 200 @app.route('/guidance/<filename>') def serve_guidance(filename): return send_from_directory(os.path.join(GUIDANCE_DIR, CURRENT_BUG_ID), filename), 200 @app.route('/ask', methods=['POST']) @cross_origin(supports_credentials=True) def ask(): try: data = request.get_json() query_text = data['query'] rgx_num = extract_and_validate_test_number(query_text, app) if rgx_num is not None and rgx_num != "": # "/test $BUG" global NUMBER_EMBD_HINT NUMBER_EMBD_HINT = get_NUMBER_EMBD_HINT(rgx_num) global CURRENT_BUG_ID CURRENT_BUG_ID = rgx_num return jsonify({ "type": "TEST", "answer": ta.get("TEST"), "img": None, "hint": NUMBER_EMBD_HINT }), 200 response = qa.get(DEFAULT_RESPONSE_FLAG) doc = nlp(query_text) nouns = [token.text for token in doc if token.pos_ == "NOUN"] question = DEFAULT_RESPONSE_FLAG for question_, answer in qa.items(): if doc.similarity(nlp(question_)) > doc.similarity(nlp(question)): response = answer question = question_ if response == qa.get(DEFAULT_RESPONSE_FLAG) or doc.similarity(nlp(question)) < 0.7: app.logger.warning( f"User query: \"{query_text}\" - No answer found") if set(key_words).intersection(set(nouns)): return jsonify({ "type": "SORRY", "answer": ta.get("SORRY") }), 200 else: return jsonify({ "type": at.get(qa.get(DEFAULT_RESPONSE_FLAG)), "answer": qa.get(DEFAULT_RESPONSE_FLAG) }), 200 app.logger.info(f"User query: \"{query_text}\" - Answer: {response}") app.logger.info("Current State: {}".format(monitor_current_state())) if at.get(response) == "HELP": return jsonify({ "type": at.get(response), "answer": response, "img": monitor_current_state(),
DEFAULT_RESPONSE_FLAG = "*" NUMBER_EMBD_HINT = None CURRENT_BUG_ID = -1 # Load spaCy English model nlp = spacy.load("en_core_web_sm") app = Flask(__name__, template_folder='') # Configure LOG_DIR = os.path.join(app.root_path, 'log') DATA_DIR = os.path.join(app.root_path, 'data') MODEL_DIR = os.path.join(app.root_path, 'model') CORPUS_DIR = os.path.join(DATA_DIR, 'corpus') GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance') STATE_DIR = os.path.join(DATA_DIR, 'state') std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv')) df = pd.merge( pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')), std, on='ID', how='left' ) qa = dict(zip(df['Q'], df['A'])) at = dict(zip(std['A'], std['TYPE'])) ta = dict(zip(std['TYPE'], std['A'])) key_words = read_keywords_from_file( os.path.join(CORPUS_DIR, 'kw.txt'), app=app) if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) log_file_path = os.path.join(LOG_DIR, f"app.log") formatter = logging.Formatter( "[%(asctime)s] [%(levelname)s] [%(module)s] - %(message)s") handler = RotatingFileHandler(log_file_path, maxBytes=10000, backupCount=1) handler.setFormatter(formatter) app.logger.addHandler(handler) app.logger.setLevel(logging.INFO) @app.route('/') def home(): return render_template('index.html'), 200 @app.route('/states/<filename>') def serve_image(filename): return send_from_directory(STATE_DIR, filename), 200 @app.route('/guidance/<filename>') def serve_guidance(filename): return send_from_directory(os.path.join(GUIDANCE_DIR, CURRENT_BUG_ID), filename), 200 @app.route('/ask', methods=['POST']) @cross_origin(supports_credentials=True) def ask(): try: data = request.get_json() query_text = data['query'] rgx_num = extract_and_validate_test_number(query_text, app) if rgx_num is not None and rgx_num != "": # "/test $BUG" global NUMBER_EMBD_HINT NUMBER_EMBD_HINT = get_NUMBER_EMBD_HINT(rgx_num) global CURRENT_BUG_ID CURRENT_BUG_ID = rgx_num return jsonify({ "type": "TEST", "answer": ta.get("TEST"), "img": None, "hint": NUMBER_EMBD_HINT }), 200 response = qa.get(DEFAULT_RESPONSE_FLAG) doc = nlp(query_text) nouns = [token.text for token in doc if token.pos_ == "NOUN"] question = DEFAULT_RESPONSE_FLAG for question_, answer in qa.items(): if doc.similarity(nlp(question_)) > doc.similarity(nlp(question)): response = answer question = question_ if response == qa.get(DEFAULT_RESPONSE_FLAG) or doc.similarity(nlp(question)) < 0.7: app.logger.warning( f"User query: \"{query_text}\" - No answer found") if set(key_words).intersection(set(nouns)): return jsonify({ "type": "SORRY", "answer": ta.get("SORRY") }), 200 else: return jsonify({ "type": at.get(qa.get(DEFAULT_RESPONSE_FLAG)), "answer": qa.get(DEFAULT_RESPONSE_FLAG) }), 200 app.logger.info(f"User query: \"{query_text}\" - Answer: {response}") app.logger.info("Current State: {}".format(monitor_current_state())) if at.get(response) == "HELP": return jsonify({ "type": at.get(response), "answer": response, "img": monitor_current_state(),
"hint": HELP
1
2023-12-20 09:44:09+00:00
4k
yash-srivastava19/verizon
class_utils.py
[ { "identifier": "VerizonIndex", "path": "classes.py", "snippet": "class VerizonIndex:\n version = None \n entries = []\n\n def __init__(self, version = 2, entries=None) -> None:\n if not entries:\n entries = list()\n \n self.version = version \n self.entries = entries" }, { "identifier": "VerizonIndexEntry", "path": "classes.py", "snippet": "class VerizonIndexEntry:\n def __init__(self, ctime=None, mtime=None, dev=None, ino=None, \n mode_type=None, mode_perms=None, uid=None, gid=None,\n fsize=None, sha=None, flag_assume_valid=None, flag_stage=None,\n name=None) -> None:\n \n self.ctime = ctime # the last time the file's metadata changed.\n self.mtime = mtime # the last time the file's data changed.\n self.dev = dev # the ID of the device containing this file.\n self.ino = ino # the file's inode number.\n self.mode_type = mode_type # the object type - b1000(regular), b1010(symlink), b1110(verlink)\n self.mode_perms = mode_perms # the object's permission(an integer)\n self.uid = uid # the user id of the owner.\n self.gid = gid # the group id of owner\n self.fsize = fsize # the size of this object(in bytes)\n self.sha = sha # the object's sha\n self.flag_assume_valid = flag_assume_valid\n self.flag_stage = flag_stage\n self.name = name # the name of the object(full path)" }, { "identifier": "VerizonCommit", "path": "classes.py", "snippet": "class VerizonCommit(VerizonObject):\n fmt = b'commit'\n\n def deserialize(self, data):\n self.kvlm = kvlm_parse(data)\n \n def serialize(self, repo):\n return kvlm_serialize(self.kvlm)\n \n def init(self):\n self.kvlm = dict()" }, { "identifier": "VerizonBlob", "path": "classes.py", "snippet": "class VerizonBlob(VerizonObject):\n fmt = b'blob'\n\n def serialize(self):\n return self.blobdata\n \n def deserialize(self, data):\n self.blobdata = data" }, { "identifier": "VerizonIgnore", "path": "classes.py", "snippet": "class VerizonIgnore:\n absolute = None \n scoped = None \n\n def __init__(self, absolute, scoped) -> None:\n self.absolute = absolute\n self.scoped = scoped" }, { "identifier": "VerizonTag", "path": "classes.py", "snippet": "class VerizonTag(VerizonCommit):\n fmt = b'tag'" }, { "identifier": "VerizonTree", "path": "classes.py", "snippet": "class VerizonTree(VerizonObject):\n fmt = b'tree'\n\n def deserialize(self, data):\n self.items = tree_parse(data)\n \n def serialize(self):\n return tree_serialize(self)\n \n def init(self):\n self.items = list()" }, { "identifier": "VerizonTreeLeaf", "path": "classes.py", "snippet": "class VerizonTreeLeaf:\n def __init__(self, mode, path, sha) -> None:\n self.mode = mode \n self.path = path\n self.sha = sha " }, { "identifier": "repo_file", "path": "utils.py", "snippet": "def repo_file(repo, *path, mkdir=False):\n if repo_dir(repo, *path[:-1], mkdir=mkdir):\n return repo_path(repo, *path)" }, { "identifier": "repo_dir", "path": "utils.py", "snippet": "def repo_dir(repo, *path, mkdir=False):\n path = repo_path(repo, *path)\n\n if os.path.exists(path):\n if (os.path.isdir(path)):\n return path\n raise Exception(f\"Not a directory : {path}\")\n \n if mkdir:\n os.mkdir(path)\n return path\n return None" }, { "identifier": "ref_resolve", "path": "other_utils.py", "snippet": "def ref_resolve(repo, ref):\n path = repo_file(repo, ref)\n\n if not os.path.isfile(path):\n return None\n \n with open(path, 'r') as fp:\n data = fp.read()[:-1] # For dropping final \\n\n \n if data.startswith('ref: '):\n return ref_resolve(repo, data[5:])\n \n return data" } ]
from imports import * from classes import VerizonIndex, VerizonIndexEntry, VerizonCommit, VerizonBlob, VerizonIgnore, VerizonTag, VerizonTree, VerizonTreeLeaf from utils import repo_file, repo_dir from other_utils import ref_resolve
3,283
obj.items.sort(key=tree_leaf_sort_key) ret = b'' for i in obj.items: ret += i.mode ret += b'' ret += i.path.encode('utf8') ret += b'\x00' sha = int(i.sha, 16) ret += sha.to_bytes(20, byteorder="big") return ret def object_read(repo, sha): path = repo_file(repo, "objects", sha[0:2], sha[2:]) if not os.path.isfile(path): return None with open(path, "rb") as f: raw = zlib.decompress(f.read()) # Read the object type x = raw.find(b'') fmt = raw[0:x] # Read and Validate the object size y = raw.find(b'\x00', x) size = int(raw[x:y].decode('ascii')) if size != len(raw)-y-1: raise Exception(f"Malformed object {sha}: bad length") match fmt: case b'commit' : c=VerizonCommit case b'tree' : c=VerizonTree case b'tag' : c=VerizonTag case b'blob' : c=VerizonBlob case _ : raise Exception(f"Unknown type {fmt.decode('ascii')} for object {sha}") # Call constructor and return object. return c(raw[y+1]) def object_write(obj, repo=None): data = obj.serialize() result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data sha = hashlib.sha1(result).hexdigest() if repo: path = repo_file(repo, "objects", sha[0:2], sha[2:], mkdir=True) if not os.path.exists(path): with open(path, "wb") as f: f.write(zlib.compress(result)) return sha def object_find(repo, name, fmt=None, follow=True): sha = object_resolve(repo, name) if not sha: raise Exception(f"No such reference : {name}") if len(sha) > 1: raise Exception("Ambigious Reference - {0}. Candidates are :\n - {1}".format(name, '\n - '.join(sha))) sha = sha[0] if not fmt: return sha while True: obj = object_read(repo, sha) if obj.fmt == fmt : return sha if not follow: return None # Follow tags if obj.fmt == b'tag': sha = obj.kvlm[b'object'].decode('ascii') elif obj.fmt == b'commit': sha = obj.kvlm[b'tree'].decode('ascii') else: return None def object_hash(fd, fmt, repo=None): data = fd.read() match fmt: case b'commit': obj=VerizonCommit(data) case b'tree' : obj=VerizonTree(data) case b'tag' : obj=VerizonTag(data) case b'blob' : obj=VerizonBlob(data) case _ : raise Exception(f"Unknown Type : {fmt}") return object_write(obj, repo) def object_resolve(repo, name): """Resolve names to an object has in repo.""" candidates = list() hashRE = re.compile(r"^[0-9A-Fa-f]{4,40}$") if not name.strip(): return None # If it's head, then it is non-ambigious. if name == "HEAD": return [ref_resolve(repo, "HEAD")] if hashRE.match(name): name = name.lower() prefix = name[0:2]
def index_read(repo): index_file = repo_file(repo, "index") if not os.path.exists(index_file): return VerizonIndex() with open(index_file, 'rb') as f: raw = f.read() header = raw[:12] signature = header[:4] assert signature == b'DIRC' version = int.from_bytes(header[4:8], 'big') assert version == 2, "Verizon supports only index file version 2" count = int.from_bytes(header[8:12], 'big') entries = list() content = raw[12:] idx = 0 for i in range(0, count): ctime_s = int.from_bytes(content[idx:idx+4], 'big') ctime_ns = int.from_bytes(content[idx+4:idx+8], 'big') mtime_s = int.from_bytes(content[idx+8:idx+12], 'big') mtime_ns = int.from_bytes(content[idx+12:idx+16], 'big') dev = int.from_bytes(content[idx+16:idx+20], 'big') ino = int.from_bytes(content[idx+20:idx+24], 'big') unused = int.from_bytes(content[idx+24:idx+26], 'big') assert 0 == unused mode = int.from_bytes(content[idx+26:idx+28], 'big') mode_type = mode >> 12 assert mode_type in [0b1000, 0b1010, 0b1110] mode_perms = mode & 0b0000000111111111 uid = int.from_bytes(content[idx+28:idx+32], 'big') gid = int.from_bytes(content[idx+32:idx+36], 'big') fsize = int.from_bytes(content[idx+36:idx+40], 'big') sha = format(int.from_bytes(content[idx+40:idx+60], 'big'), '040x') flags = int.from_bytes(content[idx+60:idx+62], 'big') flag_assume_valid = (flags & 0b1000000000000000) != 0 flag_extended = (flags & 0b0100000000000000) != 0 assert not flag_extended flag_stage = flags & 0b0011000000000000 name_length = flags & 0b0000111111111111 idx += 62 if name_length < 0xFFF: assert content[idx + name_length] == 0x00 raw_name = content[idx: idx+name_length] idx += name_length + 1 else: print("Notice that Name is 0x{:X} bytes long".format(name_length)) null_idx = content.find(b'\x00', idx + 0xFFF) raw_name = content[idx:null_idx] idx = null_idx + 1 name = raw_name.decode('utf8') idx = 8*ceil(idx/8) entries.append(VerizonIndexEntry( ctime = (ctime_s, ctime_ns), mtime = (mtime_s, mtime_ns), dev = dev, ino = ino, mode_type= mode_type, mode_perms= mode_perms, uid = uid, gid = gid, fsize = fsize, sha=sha, flag_assume_valid=flag_assume_valid, flag_stage=flag_stage, name=name)) return VerizonIndex(version = version, entries=entries) def index_write(repo, index): with open(repo_file(repo, "index"), "wb") as f: f.write(b'DIRC') f.write(index.version.to_bytes(4, "big")) f.write(len(index.entries).to_bytes(4, "big")) idx = 0 # Entries for e in index.entries: f.write(e.ctime[0].to_bytes(4, "big")) f.write(e.ctime[1].to_bytes(4, "big")) f.write(e.mtime[0].to_bytes(4, "big")) f.write(e.mtime[1].to_bytes(4, "big")) f.write(e.dev.to_bytes(4, "big")) f.write(e.ino.to_bytes(4, "big")) # Mode mode = (e.mode_type << 12) | e.mode_perms f.write(mode.to_bytes(4, "big")) f.write(e.uid.to_bytes(4, "big")) f.write(e.gid.to_bytes(4, "big")) f.write(e.fsize.to_bytes(4, "big")) f.write(int(e.sha, 16).to_bytes(20, "big")) flag_assume_valid = 0x1 << 15 if e.flag_assume_valid else 0 name_bytes = e.name.encode('utf8') bytes_len = len(name_bytes) if bytes_len >= 0xFFF : name_length = 0xFFF else: name_length = bytes_len f.write((flag_assume_valid | e.flag_stage | name_length).to_bytes(2, "big")) f.write(name_bytes) f.write((0).to_bytes(1, "big")) idx += 62 + len(name_bytes) + 1 if idx % 8 != 0: pad = 8-(idx % 8) f.write((0).to_bytes(pad, "big")) idx += pad def tree_parse_one(raw, start=0): x = raw.find(b' ', start) assert x-start == 5 or x-start == 6 mode = raw[start:x] if len(mode) == 5: mode = b' ' + mode y = raw.find(b'\x00', x) path = raw[x+1:y] sha = format(int.from_bytes(raw[y+1:y+21], "big"), "040x") return y+21, VerizonTreeLeaf(mode, path.decode('utf8'), sha) def tree_parse(raw): pos = 0 max = len(raw) ret = list() while pos<max: pos, data = tree_parse_one(raw, pos) ret.append(data) return ret # This is the ordering function. Entries are sorted by name, alphabetically, but directories are sorted with a final / added. def tree_leaf_sort_key(leaf): if leaf.mode.startswith(b'10'): return leaf.path return leaf.path + '/' def tree_serialize(obj): obj.items.sort(key=tree_leaf_sort_key) ret = b'' for i in obj.items: ret += i.mode ret += b'' ret += i.path.encode('utf8') ret += b'\x00' sha = int(i.sha, 16) ret += sha.to_bytes(20, byteorder="big") return ret def object_read(repo, sha): path = repo_file(repo, "objects", sha[0:2], sha[2:]) if not os.path.isfile(path): return None with open(path, "rb") as f: raw = zlib.decompress(f.read()) # Read the object type x = raw.find(b'') fmt = raw[0:x] # Read and Validate the object size y = raw.find(b'\x00', x) size = int(raw[x:y].decode('ascii')) if size != len(raw)-y-1: raise Exception(f"Malformed object {sha}: bad length") match fmt: case b'commit' : c=VerizonCommit case b'tree' : c=VerizonTree case b'tag' : c=VerizonTag case b'blob' : c=VerizonBlob case _ : raise Exception(f"Unknown type {fmt.decode('ascii')} for object {sha}") # Call constructor and return object. return c(raw[y+1]) def object_write(obj, repo=None): data = obj.serialize() result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data sha = hashlib.sha1(result).hexdigest() if repo: path = repo_file(repo, "objects", sha[0:2], sha[2:], mkdir=True) if not os.path.exists(path): with open(path, "wb") as f: f.write(zlib.compress(result)) return sha def object_find(repo, name, fmt=None, follow=True): sha = object_resolve(repo, name) if not sha: raise Exception(f"No such reference : {name}") if len(sha) > 1: raise Exception("Ambigious Reference - {0}. Candidates are :\n - {1}".format(name, '\n - '.join(sha))) sha = sha[0] if not fmt: return sha while True: obj = object_read(repo, sha) if obj.fmt == fmt : return sha if not follow: return None # Follow tags if obj.fmt == b'tag': sha = obj.kvlm[b'object'].decode('ascii') elif obj.fmt == b'commit': sha = obj.kvlm[b'tree'].decode('ascii') else: return None def object_hash(fd, fmt, repo=None): data = fd.read() match fmt: case b'commit': obj=VerizonCommit(data) case b'tree' : obj=VerizonTree(data) case b'tag' : obj=VerizonTag(data) case b'blob' : obj=VerizonBlob(data) case _ : raise Exception(f"Unknown Type : {fmt}") return object_write(obj, repo) def object_resolve(repo, name): """Resolve names to an object has in repo.""" candidates = list() hashRE = re.compile(r"^[0-9A-Fa-f]{4,40}$") if not name.strip(): return None # If it's head, then it is non-ambigious. if name == "HEAD": return [ref_resolve(repo, "HEAD")] if hashRE.match(name): name = name.lower() prefix = name[0:2]
path = repo_dir(repo, "objects", prefix, mkdir=False)
9
2023-12-18 18:53:26+00:00
4k
Infleqtion/qLDPC
qldpc/objects_test.py
[ { "identifier": "abstract", "path": "qldpc/abstract.py", "snippet": "DEFAULT_FIELD_ORDER = 2\nclass GroupMember(comb.Permutation):\nclass Group:\nclass Element:\nclass Protograph:\nclass TrivialGroup(Group):\nclass CyclicGroup(Group):\nclass DihedralGroup(Group):\nclass QuaternionGroup(Group):\n def __mul__(self, other: UnknownType) -> UnknownType:\n def __add__(self, other: UnknownType) -> UnknownType:\n def __lt__(self, other: GroupMember) -> bool:\n def __matmul__(self, other: GroupMember) -> GroupMember:\ndef default_lift(member: GroupMember) -> IntegerArray:\n def __init__(\n self, group: PermutationGroup, field: int | None = None, lift: Lift | None = None\n ) -> None:\n def __eq__(self, other: object) -> bool:\n def __mul__(self, other: Group) -> Group:\n def lift(member: GroupMember) -> galois.FieldArray:\n def __contains__(self, member: GroupMember) -> bool:\n def field(self) -> type[galois.FieldArray]:\n def order(self) -> int:\n def generators(self) -> Sequence[GroupMember]:\n def generate(self) -> Iterator[GroupMember]:\n def identity(self) -> GroupMember:\n def product(cls, *groups: Group, repeat: int = 1) -> Group:\n def lift(self, member: GroupMember) -> galois.FieldArray:\n def lift_dim(self) -> int:\n def table(self) -> IntegerArray:\n def from_table(\n cls,\n table: IntegerArray | Sequence[Sequence[int]],\n field: int | None = None,\n integer_lift: IntegerLift | None = None,\n ) -> Group:\n def lift(member: GroupMember) -> IntegerArray:\n def from_generators(\n cls, *generators: GroupMember, field: int | None = None, lift: Lift | None = None\n ) -> Group:\n def __init__(self, group: Group, *members: GroupMember):\n def __eq__(self, other: object) -> bool:\n def __iter__(self) -> Iterator[tuple[GroupMember, galois.FieldArray]]:\n def __add__(self, other: GroupMember | Element) -> Element:\n def __radd__(self, other: GroupMember) -> Element:\n def __mul__(self, other: int | GroupMember | Element) -> Element:\n def __rmul__(self, other: int | GroupMember) -> Element:\n def __neg__(self) -> Element:\n def __pow__(self, power: int) -> Element:\n def copy(self) -> Element:\n def field(self) -> type[galois.FieldArray]:\n def group(self) -> Group:\n def lift(self) -> galois.FieldArray:\n def zero(self) -> Element:\n def one(self) -> Element:\n def T(self) -> Element:\n def __init__(self, matrix: Protograph | ObjectMatrix) -> None:\n def __eq__(self, other: object) -> bool:\n def __rmul__(self, val: int) -> Protograph:\n def __mul__(self, val: int) -> Protograph:\n def matrix(self) -> npt.NDArray[np.object_]:\n def shape(self) -> tuple[int, ...]:\n def group(self) -> Group:\n def field(self) -> type[galois.FieldArray]:\n def lift(self) -> galois.FieldArray:\n def T(self) -> Protograph:\n def build(cls, group: Group, matrix: ObjectMatrix, *, field: int = 2) -> Protograph:\n def __init__(self, field: int | None = None) -> None:\n def to_protograph(\n cls, matrix: IntegerArray | Sequence[Sequence[int]], field: int | None = None\n ) -> Protograph:\n def __init__(self, order: int) -> None:\n def __init__(self, order: int) -> None:\n def __init__(self) -> None:\n def lift(member: int) -> IntegerArray:" }, { "identifier": "objects", "path": "qldpc/objects.py", "snippet": "class Pauli(enum.Enum):\nclass QuditOperator:\nclass Node:\nclass CayleyComplex:\n I = (0, 0) # noqa: E741\n Z = (0, 1)\n X = (1, 0)\n Y = (1, 1)\n def __mul__(self, other: Pauli) -> Pauli:\n def __invert__(self) -> Pauli:\n def __str__(self) -> str:\n def from_string(cls, string: str) -> Pauli:\n def index(self) -> int:\n def __init__(self, value: tuple[int, int] = (0, 0)) -> None:\n def __eq__(self, other: object) -> bool:\n def __invert__(self) -> QuditOperator:\n def __str__(self) -> str:\n def from_string(cls, string: str) -> QuditOperator:\n def __hash__(self) -> int:\n def __lt__(self, other: Node) -> bool:\n def __str__(self) -> str:\n def __init__(\n self,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember] | None = None,\n *,\n rank: int | None = None,\n ) -> None:\n def get_min_rank(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> Literal[0, 1, 2]:\n def satisfies_total_no_conjugacy(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> bool:\n def get_cayley_graphs(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> tuple[nx.Graph, nx.Graph]:" } ]
import numpy as np import pytest from qldpc import abstract, objects
1,648
"""Unit tests for objects.py Copyright 2023 The qLDPC Authors and Infleqtion Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ def test_pauli() -> None: """Pauli operator capabilities.""" for string in ["I", "X", "Y", "Z"]:
"""Unit tests for objects.py Copyright 2023 The qLDPC Authors and Infleqtion Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ def test_pauli() -> None: """Pauli operator capabilities.""" for string in ["I", "X", "Y", "Z"]:
assert str(objects.Pauli.from_string(string)) == string
1
2023-12-19 22:29:42+00:00
4k
CosmicLaca/ComfyUI_Primere_Nodes
Nodes/Inputs.py
[ { "identifier": "ImageExifReader", "path": "Nodes/modules/image_meta_reader.py", "snippet": "class ImageExifReader:\n def __init__(self, file):\n self._raw = \"\"\n self._parser = {}\n self._parameter = {}\n self._tool = \"\"\n self.read_data(file)\n\n def read_data(self, file):\n def is_json(jsoninput):\n try:\n json.loads(jsoninput)\n except ValueError as e:\n return False\n return True\n\n with Image.open(file) as f:\n p2metadata = pyexiv2.Image(file)\n is_primere = p2metadata.read_exif()\n if 'Exif.Image.ImageDescription' in is_primere:\n primere_exif_string = is_primere.get('Exif.Image.ImageDescription').strip()\n if is_json(primere_exif_string) == True:\n json_object = json.loads(primere_exif_string)\n # keysList = {'positive', 'negative', 'positive_l', 'negative_l', 'positive_r', 'negative_r', 'seed', 'model_hash', 'model_name', 'sampler_name'}\n # if not (keysList - json_object.keys()):\n self._tool = \"Primere\"\n self._parser = Primere(info=json_object)\n else:\n if f.format == \"PNG\":\n if \"parameters\" in f.info:\n print('A11')\n self._tool = \"Automatic1111\"\n self._parser = Automatic1111(info=f.info)\n elif \"prompt\" in f.info:\n print('Comfy')\n self._tool = \"ComfyUI\"\n self._parser = ComfyUI(info=f.info)\n\n elif f.format == \"JPEG\" or f.format == \"WEBP\":\n exif = piexif.load(f.info.get(\"exif\")) or {}\n self._raw = piexif.helper.UserComment.load(\n exif.get(\"Exif\").get(piexif.ExifIFD.UserComment)\n )\n if is_json(self._raw) != True:\n self._tool = \"Automatic1111\"\n self._parser = Automatic1111(raw=self._raw)\n\n @property\n def parser(self):\n return self._parser\n\n @property\n def tool(self):\n return self._tool" }, { "identifier": "exif_data_checker", "path": "Nodes/modules/exif_data_checker.py", "snippet": "def get_model_hash(filename):\ndef check_model_from_exif(model_hash_exif, model_name_exif, model_name, model_hash_check):\ndef change_exif_samplers(sampler_name_exif, comfy_schedulers):\ndef check_sampler_from_exif(sampler_name_exif, sampler_name, scheduler_name):\ndef check_vae_exif(vae_name_exif, vae_name):" }, { "identifier": "advanced_encode", "path": "Nodes/modules/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):\n embs_l = None\n embs_g = None\n pooled = None\n if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):\n embs_l, _ = advanced_encode_from_tokens(tokenized['l'], \n token_normalization, \n weight_interpretation, \n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max, \n return_pooled=False)\n if 'g' in tokenized:\n embs_g, pooled = advanced_encode_from_tokens(tokenized['g'], \n token_normalization, \n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_g),\n w_max=w_max, \n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n return prepareXL(embs_l, embs_g, pooled, clip_balance)\n else:\n return advanced_encode_from_tokens(tokenized['l'],\n token_normalization, \n weight_interpretation, \n lambda x: (clip.encode_from_tokens({'l': x}), None),\n w_max=w_max)" } ]
from ..components.tree import TREE_INPUTS from ..components.tree import PRIMERE_ROOT from dynamicprompts.parser.parse import ParserConfig from dynamicprompts.wildcards.wildcard_manager import WildcardManager from .modules.image_meta_reader import ImageExifReader from .modules import exif_data_checker from ..components import utility from pathlib import Path from .modules.adv_encode import advanced_encode import os import re import chardet import pandas import comfy.samplers import folder_paths import hashlib import nodes import random import string
2,718
}, } def get_prompt(self, positive_prompt, negative_prompt, extra_pnginfo, id, subpath="", model="", orientation=""): def debug_state(self, extra_pnginfo, id): workflow = extra_pnginfo["workflow"] for node in workflow["nodes"]: node_id = str(node["id"]) name = node["type"] if node_id == id and name == 'PrimerePrompt': if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name: continue return node['widgets_values'] rawResult = debug_state(self, extra_pnginfo, id) if not rawResult: rawResult = (positive_prompt, negative_prompt) if len(subpath.strip()) < 1 or subpath.strip() == 'None': subpath = None if len(model.strip()) < 1 or model.strip() == 'None': model = None if len(orientation.strip()) < 1 or orientation.strip() == 'None': orientation = None if orientation == 'Random': orientations = ["Horizontal", "Vertical"] orientation = random.choice(orientations) return (rawResult[0].replace('\n', ' '), rawResult[1].replace('\n', ' '), subpath, model, orientation) class PrimereRefinerPrompt: RETURN_TYPES = ("STRING", "STRING", "CONDITIONING", "CONDITIONING") RETURN_NAMES = ("PROMPT+", "PROMPT-", "COND+", "COND-") FUNCTION = "refiner_prompt" CATEGORY = TREE_INPUTS @classmethod def INPUT_TYPES(cls): return { "required": { "positive_refiner": ("STRING", {"default": "", "multiline": True}), "negative_refiner": ("STRING", {"default": "", "multiline": True}), "positive_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "negative_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "positive_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "negative_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "clip": ("CLIP",), "seed": ("INT", {"default": 0, "min": -1, "max": 0xffffffffffffffff, "forceInput": True}), "token_normalization": (["none", "mean", "length", "length+mean"],), "weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), }, "optional": { "positive_original": ("STRING", {"default": None, "forceInput": True}), "negative_original": ("STRING", {"default": None, "forceInput": True}), }, "hidden": { "extra_pnginfo": "EXTRA_PNGINFO", "id": "UNIQUE_ID", }, } def __init__(self): wildcard_dir = os.path.join(PRIMERE_ROOT, 'wildcards') self._wildcard_manager = WildcardManager(wildcard_dir) self._parser_config = ParserConfig( variant_start = "{", variant_end = "}", wildcard_wrap = "__" ) def refiner_prompt(self, extra_pnginfo, id, clip, seed, token_normalization, weight_interpretation, positive_refiner = "", negative_refiner = "", positive_original = None, negative_original = None, positive_refiner_strength = 1, negative_refiner_strength = 1, positive_original_strength = 1, negative_original_strength = 1): def refiner_debug_state(self, extra_pnginfo, id): workflow = extra_pnginfo["workflow"] for node in workflow["nodes"]: node_id = str(node["id"]) name = node["type"] if node_id == id and name == 'PrimereRefinerPrompt': if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name: continue return node['widgets_values'] rawResult = refiner_debug_state(self, extra_pnginfo, id) if not rawResult: rawResult = (positive_refiner, negative_refiner) output_positive = rawResult[0].replace('\n', ' ') output_negative = rawResult[1].replace('\n', ' ') final_positive = "" final_negative = "" if positive_refiner_strength != 0: if positive_refiner_strength != 1: final_positive = f'({output_positive}:{positive_refiner_strength:.2f})' if output_positive is not None and output_positive != '' else '' else: final_positive = f'{output_positive}' if output_positive is not None and output_positive != '' else '' if negative_refiner_strength != 0: if negative_refiner_strength != 1: final_negative = f'({output_negative}:{negative_refiner_strength:.2f})' if output_negative is not None and output_negative != '' else '' else: final_negative = f'{output_negative}' if output_negative is not None and output_negative != '' else '' if positive_original is not None and positive_original != "" and positive_original_strength != 0: if positive_original_strength != 1: final_positive = f'{final_positive} ({positive_original}:{positive_original_strength:.2f})' else: final_positive = f'{final_positive} {positive_original}' if negative_original is not None and negative_original != "" and negative_original_strength != 0: if negative_original_strength != 1: final_negative = f'{final_negative} ({negative_original}:{negative_original_strength:.2f})' else: final_negative = f'{final_negative} {negative_original}' final_positive = utility.DynPromptDecoder(self, final_positive.strip(' ,;'), seed) final_negative = utility.DynPromptDecoder(self, final_negative.strip(' ,;'), seed)
class PrimereDoublePrompt: RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING") RETURN_NAMES = ("PROMPT+", "PROMPT-", "SUBPATH", "MODEL", "ORIENTATION") FUNCTION = "get_prompt" CATEGORY = TREE_INPUTS @classmethod def INPUT_TYPES(cls): return { "required": { "positive_prompt": ("STRING", {"default": "", "multiline": True}), "negative_prompt": ("STRING", {"default": "", "multiline": True}), }, "optional": { "subpath": ("STRING", {"default": "", "multiline": False}), "model": (["None"] + folder_paths.get_filename_list("checkpoints"), {"default": "None"}), "orientation": (["None", "Random", "Horizontal", "Vertical"], {"default": "None"}), }, "hidden": { "extra_pnginfo": "EXTRA_PNGINFO", "id": "UNIQUE_ID", }, } def get_prompt(self, positive_prompt, negative_prompt, extra_pnginfo, id, subpath="", model="", orientation=""): def debug_state(self, extra_pnginfo, id): workflow = extra_pnginfo["workflow"] for node in workflow["nodes"]: node_id = str(node["id"]) name = node["type"] if node_id == id and name == 'PrimerePrompt': if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name: continue return node['widgets_values'] rawResult = debug_state(self, extra_pnginfo, id) if not rawResult: rawResult = (positive_prompt, negative_prompt) if len(subpath.strip()) < 1 or subpath.strip() == 'None': subpath = None if len(model.strip()) < 1 or model.strip() == 'None': model = None if len(orientation.strip()) < 1 or orientation.strip() == 'None': orientation = None if orientation == 'Random': orientations = ["Horizontal", "Vertical"] orientation = random.choice(orientations) return (rawResult[0].replace('\n', ' '), rawResult[1].replace('\n', ' '), subpath, model, orientation) class PrimereRefinerPrompt: RETURN_TYPES = ("STRING", "STRING", "CONDITIONING", "CONDITIONING") RETURN_NAMES = ("PROMPT+", "PROMPT-", "COND+", "COND-") FUNCTION = "refiner_prompt" CATEGORY = TREE_INPUTS @classmethod def INPUT_TYPES(cls): return { "required": { "positive_refiner": ("STRING", {"default": "", "multiline": True}), "negative_refiner": ("STRING", {"default": "", "multiline": True}), "positive_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "negative_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "positive_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "negative_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "clip": ("CLIP",), "seed": ("INT", {"default": 0, "min": -1, "max": 0xffffffffffffffff, "forceInput": True}), "token_normalization": (["none", "mean", "length", "length+mean"],), "weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), }, "optional": { "positive_original": ("STRING", {"default": None, "forceInput": True}), "negative_original": ("STRING", {"default": None, "forceInput": True}), }, "hidden": { "extra_pnginfo": "EXTRA_PNGINFO", "id": "UNIQUE_ID", }, } def __init__(self): wildcard_dir = os.path.join(PRIMERE_ROOT, 'wildcards') self._wildcard_manager = WildcardManager(wildcard_dir) self._parser_config = ParserConfig( variant_start = "{", variant_end = "}", wildcard_wrap = "__" ) def refiner_prompt(self, extra_pnginfo, id, clip, seed, token_normalization, weight_interpretation, positive_refiner = "", negative_refiner = "", positive_original = None, negative_original = None, positive_refiner_strength = 1, negative_refiner_strength = 1, positive_original_strength = 1, negative_original_strength = 1): def refiner_debug_state(self, extra_pnginfo, id): workflow = extra_pnginfo["workflow"] for node in workflow["nodes"]: node_id = str(node["id"]) name = node["type"] if node_id == id and name == 'PrimereRefinerPrompt': if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name: continue return node['widgets_values'] rawResult = refiner_debug_state(self, extra_pnginfo, id) if not rawResult: rawResult = (positive_refiner, negative_refiner) output_positive = rawResult[0].replace('\n', ' ') output_negative = rawResult[1].replace('\n', ' ') final_positive = "" final_negative = "" if positive_refiner_strength != 0: if positive_refiner_strength != 1: final_positive = f'({output_positive}:{positive_refiner_strength:.2f})' if output_positive is not None and output_positive != '' else '' else: final_positive = f'{output_positive}' if output_positive is not None and output_positive != '' else '' if negative_refiner_strength != 0: if negative_refiner_strength != 1: final_negative = f'({output_negative}:{negative_refiner_strength:.2f})' if output_negative is not None and output_negative != '' else '' else: final_negative = f'{output_negative}' if output_negative is not None and output_negative != '' else '' if positive_original is not None and positive_original != "" and positive_original_strength != 0: if positive_original_strength != 1: final_positive = f'{final_positive} ({positive_original}:{positive_original_strength:.2f})' else: final_positive = f'{final_positive} {positive_original}' if negative_original is not None and negative_original != "" and negative_original_strength != 0: if negative_original_strength != 1: final_negative = f'{final_negative} ({negative_original}:{negative_original_strength:.2f})' else: final_negative = f'{final_negative} {negative_original}' final_positive = utility.DynPromptDecoder(self, final_positive.strip(' ,;'), seed) final_negative = utility.DynPromptDecoder(self, final_negative.strip(' ,;'), seed)
embeddings_final_pos, pooled_pos = advanced_encode(clip, final_positive, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled=True)
2
2023-12-17 20:42:27+00:00
4k
amazon-science/c2f-seg
test_c2f_seg.py
[ { "identifier": "load_dataset", "path": "data/dataloader_transformer.py", "snippet": "def load_dataset(config, args, mode):\n if mode==\"train\":\n if args.dataset==\"KINS\":\n train_dataset = Kins_Fusion_dataset(config, mode='train')\n test_dataset = Kins_Fusion_dataset(config, mode='test')\n elif args.dataset==\"COCOA\":\n train_dataset = COCOA_Fusion_dataset(config, mode='train')\n test_dataset = COCOA_Fusion_dataset(config, mode='test')\n elif args.dataset==\"Fishbowl\":\n train_dataset = FishBowl(config, mode='train')\n test_dataset = FishBowl(config, mode='test')\n elif args.dataset==\"MOViD_A\":\n train_dataset = MOViD_A(config, mode='train')\n test_dataset = MOViD_A(config, mode='test')\n return train_dataset, test_dataset \n else:\n if args.dataset==\"KINS\":\n test_dataset = KINS_Aisformer_VRSP_Intersection(config, mode='test')\n elif args.dataset==\"COCOA\":\n test_dataset = COCOA_Fusion_dataset(config, mode='test')\n elif args.dataset==\"Fishbowl\":\n test_dataset = FishBowl(config, mode='test')\n elif args.dataset==\"MOViD_A\":\n test_dataset = MOViD_A(config, mode='test')\n return test_dataset" }, { "identifier": "setup_logger", "path": "utils/logger.py", "snippet": "def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='log'):\n \"\"\"Sets up logger from target work directory.\n\n The function will sets up a logger with `DEBUG` log level. Two handlers will\n be added to the logger automatically. One is the `sys.stdout` stream, with\n `INFO` log level, which will print improtant messages on the screen. The other\n is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will\n be added time stamp and log level before logged.\n\n NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be\n skipped.\n\n Args:\n work_dir: The work directory. All intermediate files will be saved here.\n (default: None)\n logfile_name: Name of the file to save log message. (default: `log.txt`)\n logger_name: Unique name for the logger. (default: `logger`)\n\n Returns:\n A `logging.Logger` object.\n\n Raises:\n SystemExit: If the work directory has already existed, of the logger with\n specified name `logger_name` has already existed.\n \"\"\"\n logger = logging.getLogger(logger_name)\n formatter = logging.Formatter(\"[%(asctime)s][%(levelname)s] %(message)s\")\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n # Print log message with `INFO` level or above onto the screen.\n sh = logging.StreamHandler(stream=sys.stdout)\n # sh.setLevel(logging.INFO)\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n logger.propagate = False\n\n if not work_dir or not logfile_name:\n return logger\n\n if os.path.exists(work_dir):\n print(f'Work directory `{work_dir}` has already existed!')\n os.makedirs(work_dir, exist_ok=True)\n\n # Save log message with all levels in log file.\n fh = logging.FileHandler(os.path.join(work_dir, logfile_name))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger" }, { "identifier": "Config", "path": "utils/utils.py", "snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')" }, { "identifier": "to_cuda", "path": "utils/utils.py", "snippet": "def to_cuda(meta, device):\n for k in meta:\n if meta[k] is not None:\n meta[k] = meta[k].to(device)\n return meta" } ]
import os import cv2 import time import random import argparse import numpy as np import torch import torch.distributed as dist from tqdm import tqdm from shutil import copyfile from torch.utils.data import DataLoader from data.dataloader_transformer import load_dataset from utils.logger import setup_logger from utils.utils import Config, to_cuda from src.image_model import C2F_Seg from src.video_model import C2F_Seg
2,031
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=42) # path parser.add_argument('--path', type=str, required=True, help='model checkpoints path') parser.add_argument('--check_point_path', type=str, default="../check_points", ) parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan') # dataset parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset") parser.add_argument('--data_type', type=str, default="image", help = "select image or video model") parser.add_argument('--batch', type=int, default=1) parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training") args = parser.parse_args() if args.data_type=="image": elif args.data_type=="video": dist.init_process_group(backend="nccl") torch.cuda.set_device(args.local_rank) rank = dist.get_rank() args.path = os.path.join(args.check_point_path, args.path) vq_model_path = os.path.join(args.check_point_path, args.vq_path) os.makedirs(args.path, exist_ok=True) config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset)) # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # load config file config = Config(config_path) config.path = args.path config.batch_size = args.batch config.dataset = args.dataset log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file) if rank==0: # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # save samples and eval pictures os.makedirs(os.path.join(args.path, 'test_samples'), exist_ok=True) for k in config._dict: logger.info("{}:{}".format(k, config._dict[k])) # init device if torch.cuda.is_available(): config.device = torch.device("cuda") torch.backends.cudnn.benchmark = True # cudnn auto-tuner else: config.device = torch.device("cpu") n_gpu = torch.cuda.device_count() # set cv2 running threads to 1 (prevents deadlocks with pytorch dataloader) cv2.setNumThreads(0) # initialize random seed torch.manual_seed(config.seed) np.random.seed(config.seed) random.seed(config.seed) torch.cuda.manual_seed_all(config.seed) test_dataset = load_dataset(config, args, "test") test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) test_loader = DataLoader( dataset=test_dataset, sampler=test_sampler, batch_size=config.batch_size, num_workers=8, drop_last=False ) sample_iterator = test_dataset.create_iterator(config.sample_size) model = C2F_Seg(config, vq_model_path, mode='test', logger=logger) model.load(is_test=True ,prefix = config.stage2_iteration) model.restore_from_stage1(prefix = config.stage1_iteration) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank]) iter = 0 iou = 0 iou_count = 0 invisible_iou_ = 0 occ_count = 0 iou_post = 0 iou_count_post = 0 invisible_iou_post = 0 occ_count_post = 0 model.eval() with torch.no_grad(): if rank==0: test_loader = tqdm(test_loader) for items in test_loader:
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=42) # path parser.add_argument('--path', type=str, required=True, help='model checkpoints path') parser.add_argument('--check_point_path', type=str, default="../check_points", ) parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan') # dataset parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset") parser.add_argument('--data_type', type=str, default="image", help = "select image or video model") parser.add_argument('--batch', type=int, default=1) parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training") args = parser.parse_args() if args.data_type=="image": elif args.data_type=="video": dist.init_process_group(backend="nccl") torch.cuda.set_device(args.local_rank) rank = dist.get_rank() args.path = os.path.join(args.check_point_path, args.path) vq_model_path = os.path.join(args.check_point_path, args.vq_path) os.makedirs(args.path, exist_ok=True) config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset)) # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # load config file config = Config(config_path) config.path = args.path config.batch_size = args.batch config.dataset = args.dataset log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file) if rank==0: # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # save samples and eval pictures os.makedirs(os.path.join(args.path, 'test_samples'), exist_ok=True) for k in config._dict: logger.info("{}:{}".format(k, config._dict[k])) # init device if torch.cuda.is_available(): config.device = torch.device("cuda") torch.backends.cudnn.benchmark = True # cudnn auto-tuner else: config.device = torch.device("cpu") n_gpu = torch.cuda.device_count() # set cv2 running threads to 1 (prevents deadlocks with pytorch dataloader) cv2.setNumThreads(0) # initialize random seed torch.manual_seed(config.seed) np.random.seed(config.seed) random.seed(config.seed) torch.cuda.manual_seed_all(config.seed) test_dataset = load_dataset(config, args, "test") test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) test_loader = DataLoader( dataset=test_dataset, sampler=test_sampler, batch_size=config.batch_size, num_workers=8, drop_last=False ) sample_iterator = test_dataset.create_iterator(config.sample_size) model = C2F_Seg(config, vq_model_path, mode='test', logger=logger) model.load(is_test=True ,prefix = config.stage2_iteration) model.restore_from_stage1(prefix = config.stage1_iteration) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank]) iter = 0 iou = 0 iou_count = 0 invisible_iou_ = 0 occ_count = 0 iou_post = 0 iou_count_post = 0 invisible_iou_post = 0 occ_count_post = 0 model.eval() with torch.no_grad(): if rank==0: test_loader = tqdm(test_loader) for items in test_loader:
items = to_cuda(items, config.device)
3
2023-12-21 04:25:47+00:00
4k
alipay/PainlessInferenceAcceleration
pia/lookahead/common/pretrained_model.py
[ { "identifier": "LookaheadCache", "path": "pia/lookahead/common/lookahead_cache.py", "snippet": "class LookaheadCache():\n def __init__(self, debug=False, eos=2, stop_words=None, max_node=512, max_output_node=256):\n self.debug = debug\n self.eos = eos\n self.max_node = max_node\n self.max_output_node = max_output_node\n self.mem = {}\n self._output_ids = defaultdict(list)\n self._update_trees = set()\n self._update_input_trees = set()\n self.stop_words = stop_words if stop_words is not None else {}\n self.default_mask = np.ones((1, 1), dtype=np.int64)\n\n def put(self, token_ids, branch_length=8, final=False, mode='output', idx=-1):\n if self.eos in token_ids:\n token_ids = token_ids[:token_ids.index(self.eos)]\n if len(token_ids) >= 2:\n ts = len(token_ids) # ts: token_ids size\n\n for i in range(ts - 1):\n token_id = token_ids[i]\n tup = token_ids[i + 1:i + branch_length + 1]\n if self.debug:\n print(f'input token:{token_id} tokens:{tup}')\n tree = self.mem.get(token_id, None)\n if tree is not None:\n tree.put(tup, mode=mode, idx=idx)\n else:\n tree = Tree(token_id, max_node=self.max_node, max_output_node=self.max_output_node)\n tree.put(tup, mode=mode, idx=idx)\n self.mem[token_id] = tree\n self._update_trees.add(tree)\n if mode == 'input':\n self._update_input_trees.add(tree)\n\n if final:\n self.reset_input_freqs()\n self.squeeze_branch_counts()\n\n def stream_put(self, token_ids, branch_length=8, final=False, mode='output', idx=0):\n # idx is only used for caching output_ids\n assert mode == 'output' and idx >= 0\n if self.eos in token_ids:\n token_ids = token_ids[:token_ids.index(self.eos)]\n self._output_ids[idx].extend(token_ids)\n output_ids = self._output_ids[idx]\n ts = len(output_ids)\n min_branch_length = 1 if final else branch_length\n if ts > min_branch_length:\n for i in range(ts - min_branch_length):\n token_id = output_ids[i]\n tup = output_ids[i + 1:i + branch_length + 1]\n if self.debug:\n print(f'input token:{token_id} tokens:{tup}')\n tree = self.mem.get(token_id, None)\n if tree:\n tree.put(tup, mode='output', idx=-1)\n else:\n tree = Tree(token_id, max_node=self.max_node, max_output_node=self.max_output_node)\n tree.put(tup, mode='output', idx=-1)\n self.mem[token_id] = tree\n self._update_trees.add(tree)\n if not final:\n self._output_ids[idx] = output_ids[ts - branch_length:]\n if final:\n self._output_ids[idx] = []\n self.reset_input_freqs()\n self.squeeze_branch_counts()\n\n def hier_get(self, token_ids, decoding_length=64, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',\n idx=0):\n assert mode in ('input', 'output', 'mix')\n\n decoding_masks = self.default_mask\n if decoding_length <= 1 or branch_length == 0:\n return token_ids[-1:], decoding_masks, []\n\n decoding_ids = None\n sizes = [0, 0]\n match_count = len(token_ids)\n for i, t in enumerate(token_ids):\n tree = self.mem.get(t, None)\n if tree is not None:\n ids = token_ids[i + 1:]\n if t in self.stop_words and len(ids) == 0:\n continue\n decoding_ids, decoding_masks, sizes = tree.get(ids,\n max_size=decoding_length,\n max_length=branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=idx)\n s = len(decoding_ids)\n match_count = len(token_ids) - i\n # token count is enough, not need retrieve again\n if s >= branch_length:\n break\n\n if decoding_ids is None:\n decoding_ids = token_ids[-1:]\n\n return decoding_ids, decoding_masks, sizes\n\n def par_get(self, token_ids, decoding_length=16, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',\n idx=0):\n\n output_ids, decoding_masks, decoding_lengths = self.trie_get(token_ids,\n decoding_length=decoding_length,\n branch_length=branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=idx)\n sets = []\n true_decoding_length = len(output_ids) - 1\n for i in range(true_decoding_length, 0, -1):\n indices, = np.nonzero(decoding_masks[i, 1:])\n indices = set(indices)\n flag = True\n for ss in sets:\n if len(indices - ss) == 0:\n flag = False\n break\n if flag:\n sets.append(indices)\n\n sets.reverse()\n count = 0\n max_decoding_length = true_decoding_length\n branches = []\n for indices in sets:\n indices = sorted(list(indices))\n rest_count = max_decoding_length - count\n indices = indices[:rest_count]\n count += len(indices)\n branch = []\n for i in indices:\n branch.append(output_ids[i + 1])\n branches.append(branch)\n if count >= max_decoding_length:\n break\n ids = [output_ids[0]]\n masks = np.tril(np.ones((count + 1, count + 1)), 0)\n count = 1\n for branch in branches:\n ids.extend(branch)\n length = len(branch)\n masks[count:count + length, 1:count] = 0\n count += length\n\n return ids, masks, [count - 1]\n\n def one_get(self, token_ids, decoding_length=64, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',\n idx=0):\n assert mode in ('input', 'output', 'mix')\n\n decoding_masks = self.default_mask\n if decoding_length <= 1 or branch_length == 0:\n return token_ids[-1:], decoding_masks, []\n\n decoding_ids = None\n sizes = [0, 0]\n for i, t in enumerate(token_ids):\n tree = self.mem.get(t, None)\n if tree is not None:\n ids = token_ids[i + 1:]\n if t in self.stop_words and len(ids) == 0:\n continue\n decoding_ids, decoding_masks, sizes = tree.get_one_branch(ids,\n max_length=branch_length,\n mode=mode,\n idx=idx)\n s = len(decoding_ids)\n # token count is enough, not need retrieve again\n if s >= branch_length // 2:\n break\n print(f'{decoding_ids=}')\n if decoding_ids is None:\n decoding_ids = token_ids[-1:]\n\n return decoding_ids, decoding_masks, sizes\n\n def bat_get(self, token_id_list, decoding_length=64, branch_length=8, decoding_cursors=None, mode='output',\n indices=None, decoding_mode='hier'):\n assert mode in ('input', 'output', 'mix')\n assert decoding_mode in ('hier', 'one')\n bs = len(token_id_list)\n assert bs == len(decoding_cursors) and bs == len(indices), f'{bs=} {len(decoding_cursors)=} {len(indices)=}'\n\n decoding_id_list = []\n decoding_mask_list = []\n size_list = []\n\n min_cur = min(decoding_cursors)\n max_cur = max(decoding_cursors)\n bs = len(decoding_cursors)\n for sub_idx, token_ids in enumerate(token_id_list):\n update_decoding_length = decoding_length // bs\n min_input_size = 0\n min_output_size = max(update_decoding_length // 2, 1)\n method_name = decoding_mode + '_get'\n decoding_ids, decoding_masks, sizes = getattr(self, method_name)(token_ids,\n decoding_length=update_decoding_length,\n branch_length=branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=indices[sub_idx])\n decoding_id_list.append(decoding_ids)\n decoding_mask_list.append(decoding_masks)\n size_list.append(sizes)\n\n bs = len(token_id_list)\n max_size = max([len(x) for x in decoding_id_list])\n\n decoding_masks = np.zeros((bs, max_size, max_cur - min_cur + max_size), dtype=np.int64)\n for i, decoding_ids in enumerate(decoding_id_list):\n org_size = len(decoding_ids)\n gap = max_size - org_size\n if gap > 0:\n decoding_ids.extend([self.eos] * gap)\n cur = decoding_cursors[i]\n decoding_masks[i, :org_size, cur - min_cur:cur - min_cur + org_size] = decoding_mask_list[i]\n decoding_masks[i, :, :cur - min_cur + 1] = 1\n return decoding_id_list, decoding_masks, size_list\n\n def fresh(self):\n self.mem = {}\n\n def reset_input_freqs(self):\n if len(self._update_input_trees) > 0:\n for c in self._update_input_trees:\n c.reset_input_freq()\n self._update_input_trees.clear()\n\n def squeeze_branch_counts(self):\n if len(self._update_trees) >= 1024:\n for c in self._update_trees:\n c.squeeze()\n self._update_trees.clear()\n\n def save_mem(self, save_mem_dir):\n cache_mem = self.mem\n serialized_object = pickle.dumps(cache_mem)\n json_string = json.dumps(serialized_object.decode('latin-1'))\n with open(save_mem_dir, 'w') as f:\n json.dump(json_string, f)\n\n def load_mem(self, load_mem_dir):\n with open(load_mem_dir, 'r') as f:\n json_string = json.load(f)\n deserialized_object = pickle.loads(json.loads(json_string).encode('latin-1'))\n cache_mem = deserialized_object\n self.mem = cache_mem" }, { "identifier": "GenerationMode", "path": "pia/lookahead/common/lookahead_generation_utils.py", "snippet": "class GenerationMode(ExplicitEnum):\n \"\"\"\n Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.\n \"\"\"\n\n # Non-beam methods\n CONTRASTIVE_SEARCH = \"contrastive_search\"\n GREEDY_SEARCH = \"greedy_search\"\n LOOKAHEAD_GENERATION = \"lookahead_generation\"\n SAMPLE = \"sample\"\n ASSISTED_GENERATION = \"assisted_generation\"\n # Beam methods\n BEAM_SEARCH = \"beam_search\"\n BEAM_SAMPLE = \"beam_sample\"\n CONSTRAINED_BEAM_SEARCH = \"constrained_beam_search\"\n GROUP_BEAM_SEARCH = \"group_beam_search\"" }, { "identifier": "LookaheadDecoderOnlyOutput", "path": "pia/lookahead/common/lookahead_generation_utils.py", "snippet": "class LookaheadDecoderOnlyOutput(ModelOutput):\n \"\"\"\n Base class for outputs of decoder-only generation models using greedy search.\n\n\n Args:\n sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter\n if all batches finished early due to the `eos_token_id`.\n scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):\n Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)\n at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for\n each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.\n attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.\n hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.\n \"\"\"\n\n sequences: torch.LongTensor = None\n scores: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n kwargs: Optional[Dict] = None" } ]
import copy import inspect import time import warnings import numpy as np import torch import torch.distributed as dist from typing import Any, Callable, Dict, List, Optional, Tuple, Union from torch import nn from transformers import PreTrainedModel from transformers.generation.beam_constraints import DisjunctiveConstraint, PhrasalConstraint from transformers.generation.beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer from transformers.generation.logits_process import ( LogitsProcessorList, MinLengthLogitsProcessor, ) from transformers.generation.stopping_criteria import ( MaxLengthCriteria, StoppingCriteriaList, validate_stopping_criteria, ) from transformers.generation.utils import ( GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput) from transformers.generation.utils import ( GreedySearchOutput, GenerateOutput) from transformers.utils import ModelOutput, logging from transformers.generation.configuration_utils import GenerationConfig from pia.lookahead.common.lookahead_cache import LookaheadCache from pia.lookahead.common.lookahead_generation_utils import GenerationMode, LookaheadDecoderOnlyOutput
3,574
# -*- coding: utf-8 -*- """ Copyright (c) Ant Financial Service Group and its affiliates. """ from __future__ import print_function # from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled logger = logging.get_logger(__name__) class LookaheadPreTrainedModel(PreTrainedModel): _batch_generation = False _stream_generation = False def __init__(self, config): super().__init__(config=config) def _get_generation_mode( self, generation_config: GenerationConfig, assistant_model: Optional["PreTrainedModel"]
# -*- coding: utf-8 -*- """ Copyright (c) Ant Financial Service Group and its affiliates. """ from __future__ import print_function # from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled logger = logging.get_logger(__name__) class LookaheadPreTrainedModel(PreTrainedModel): _batch_generation = False _stream_generation = False def __init__(self, config): super().__init__(config=config) def _get_generation_mode( self, generation_config: GenerationConfig, assistant_model: Optional["PreTrainedModel"]
) -> GenerationMode:
1
2023-12-19 13:11:38+00:00
4k
Hammour-steak/GOUB
codes/models/modules/DenoisingUNet_arch.py
[ { "identifier": "SinusoidalPosEmb", "path": "codes/models/modules/module_util.py", "snippet": "class SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=device) * -emb)\n emb = x[:, None] * emb[None, :]\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb" }, { "identifier": "RandomOrLearnedSinusoidalPosEmb", "path": "codes/models/modules/module_util.py", "snippet": "class RandomOrLearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim, is_random = False):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered" }, { "identifier": "NonLinearity", "path": "codes/models/modules/module_util.py", "snippet": "def NonLinearity(inplace=False):\n return nn.SiLU(inplace)" }, { "identifier": "Upsample", "path": "codes/models/modules/module_util.py", "snippet": "def Upsample(dim, dim_out=None):\n return nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n nn.Conv2d(dim, default(dim_out, dim), 3, 1, 1)\n )" }, { "identifier": "Downsample", "path": "codes/models/modules/module_util.py", "snippet": "def Downsample(dim, dim_out=None):\n return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)" }, { "identifier": "default_conv", "path": "codes/models/modules/module_util.py", "snippet": "def default_conv(dim_in, dim_out, kernel_size=3, bias=False):\n return nn.Conv2d(dim_in, dim_out, kernel_size, padding=(kernel_size//2), bias=bias)" }, { "identifier": "ResBlock", "path": "codes/models/modules/module_util.py", "snippet": "class ResBlock(nn.Module):\n def __init__(self, conv, dim_in, dim_out, time_emb_dim=None, act=NonLinearity()):\n super(ResBlock, self).__init__()\n self.mlp = nn.Sequential(\n act, nn.Linear(time_emb_dim, dim_out * 2)\n ) if time_emb_dim else None\n\n self.block1 = Block(conv, dim_in, dim_out, act)\n self.block2 = Block(conv, dim_out, dim_out, act)\n self.res_conv = conv(dim_in, dim_out, 1) if dim_in != dim_out else nn.Identity()\n\n def forward(self, x, time_emb=None):\n scale_shift = None\n if exists(self.mlp) and exists(time_emb):\n time_emb = self.mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim=1)\n\n h = self.block1(x, scale_shift=scale_shift)\n h = self.block2(h)\n\n return h + self.res_conv(x)" }, { "identifier": "Upsampler", "path": "codes/models/modules/module_util.py", "snippet": "class Upsampler(nn.Sequential):\n def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):\n\n m = []\n if (scale & (scale - 1)) == 0: # Is scale = 2^n?\n for _ in range(int(math.log(scale, 2))):\n m.append(conv(n_feats, 4 * n_feats, 3, bias))\n m.append(nn.PixelShuffle(2))\n if bn:\n m.append(nn.BatchNorm2d(n_feats))\n if act == 'relu':\n m.append(nn.ReLU(True))\n elif act == 'prelu':\n m.append(nn.PReLU(n_feats))\n\n elif scale == 3:\n m.append(conv(n_feats, 9 * n_feats, 3, bias))\n m.append(nn.PixelShuffle(3))\n if bn:\n m.append(nn.BatchNorm2d(n_feats))\n if act == 'relu':\n m.append(nn.ReLU(True))\n elif act == 'prelu':\n m.append(nn.PReLU(n_feats))\n else:\n raise NotImplementedError\n\n super(Upsampler, self).__init__(*m)" }, { "identifier": "LinearAttention", "path": "codes/models/modules/module_util.py", "snippet": "class LinearAttention(nn.Module):\n def __init__(self, dim, heads=4, dim_head=32):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n hidden_dim = dim_head * heads\n\n self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)\n self.to_out = nn.Sequential(\n nn.Conv2d(hidden_dim, dim, 1),\n LayerNorm(dim)\n )\n\n def forward(self, x):\n b, c, h, w = x.shape\n qkv = self.to_qkv(x).chunk(3, dim = 1)\n q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h=self.heads), qkv)\n\n q = q.softmax(dim=-2)\n k = k.softmax(dim=-1)\n\n q = q * self.scale\n v = v / (h * w)\n\n context = torch.einsum('b h d n, b h e n -> b h d e', k, v)\n\n out = torch.einsum('b h d e, b h d n -> b h e n', context, q)\n out = rearrange(out, 'b h c (x y) -> b (h c) x y', h=self.heads, x=h, y=w)\n return self.to_out(out)" }, { "identifier": "Attention", "path": "codes/models/modules/module_util.py", "snippet": "class Attention(nn.Module):\n def __init__(self, dim, heads=4, dim_head=32):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n hidden_dim = dim_head * heads\n\n self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)\n self.to_out = nn.Conv2d(hidden_dim, dim, 1)\n\n def forward(self, x):\n b, c, h, w = x.shape\n qkv = self.to_qkv(x).chunk(3, dim=1)\n q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h=self.heads), qkv)\n\n q = q * self.scale\n\n sim = torch.einsum('b h d i, b h d j -> b h i j', q, k)\n attn = sim.softmax(dim=-1)\n out = torch.einsum('b h i j, b h d j -> b h i d', attn, v)\n\n out = rearrange(out, 'b h (x y) d -> b (h d) x y', x=h, y=w)\n return self.to_out(out)" }, { "identifier": "PreNorm", "path": "codes/models/modules/module_util.py", "snippet": "class PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.fn = fn\n self.norm = LayerNorm(dim)\n\n def forward(self, x):\n x = self.norm(x)\n return self.fn(x)" }, { "identifier": "Residual", "path": "codes/models/modules/module_util.py", "snippet": "class Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, *args, **kwargs):\n return self.fn(x, *args, **kwargs) + x" } ]
import torch import torch.nn as nn import torch.nn.functional as F import math import functools from .module_util import ( SinusoidalPosEmb, RandomOrLearnedSinusoidalPosEmb, NonLinearity, Upsample, Downsample, default_conv, ResBlock, Upsampler, LinearAttention, Attention, PreNorm, Residual)
2,537
class ConditionalUNet(nn.Module): def __init__(self, in_nc, out_nc, nf, depth=4, upscale=1): super().__init__() self.depth = depth self.upscale = upscale # not used block_class = functools.partial(ResBlock, conv=default_conv, act=NonLinearity()) self.init_conv = default_conv(in_nc*2, nf, 7) # time embeddings time_dim = nf * 4 self.random_or_learned_sinusoidal_cond = False if self.random_or_learned_sinusoidal_cond: learned_sinusoidal_dim = 16 sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, False) fourier_dim = learned_sinusoidal_dim + 1 else: sinu_pos_emb = SinusoidalPosEmb(nf) fourier_dim = nf self.time_mlp = nn.Sequential( sinu_pos_emb, nn.Linear(fourier_dim, time_dim), nn.GELU(), nn.Linear(time_dim, time_dim) ) # layers self.downs = nn.ModuleList([]) self.ups = nn.ModuleList([]) for i in range(depth): dim_in = nf * int(math.pow(2, i)) dim_out = nf * int(math.pow(2, i+1)) self.downs.append(nn.ModuleList([ block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim), block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim), Residual(PreNorm(dim_in, LinearAttention(dim_in))), Downsample(dim_in, dim_out) if i != (depth-1) else default_conv(dim_in, dim_out) ])) self.ups.insert(0, nn.ModuleList([ block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim), block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim), Residual(PreNorm(dim_out, LinearAttention(dim_out))),
class ConditionalUNet(nn.Module): def __init__(self, in_nc, out_nc, nf, depth=4, upscale=1): super().__init__() self.depth = depth self.upscale = upscale # not used block_class = functools.partial(ResBlock, conv=default_conv, act=NonLinearity()) self.init_conv = default_conv(in_nc*2, nf, 7) # time embeddings time_dim = nf * 4 self.random_or_learned_sinusoidal_cond = False if self.random_or_learned_sinusoidal_cond: learned_sinusoidal_dim = 16 sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, False) fourier_dim = learned_sinusoidal_dim + 1 else: sinu_pos_emb = SinusoidalPosEmb(nf) fourier_dim = nf self.time_mlp = nn.Sequential( sinu_pos_emb, nn.Linear(fourier_dim, time_dim), nn.GELU(), nn.Linear(time_dim, time_dim) ) # layers self.downs = nn.ModuleList([]) self.ups = nn.ModuleList([]) for i in range(depth): dim_in = nf * int(math.pow(2, i)) dim_out = nf * int(math.pow(2, i+1)) self.downs.append(nn.ModuleList([ block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim), block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim), Residual(PreNorm(dim_in, LinearAttention(dim_in))), Downsample(dim_in, dim_out) if i != (depth-1) else default_conv(dim_in, dim_out) ])) self.ups.insert(0, nn.ModuleList([ block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim), block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim), Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if i!=0 else default_conv(dim_out, dim_in)
3
2023-12-15 09:40:18+00:00
4k
eldar-eln-bigabid/airflow-aerospike-provider
tests/operators/test_aerospike.py
[ { "identifier": "AerospikeGetKeyOperator", "path": "aerospike_provider/operators/aerospike.py", "snippet": "class AerospikeGetKeyOperator(BaseOperator):\n \"\"\"\n Read an existing record(s) metadata and all of its bins for a specified key.\n\n :param namespace: namespace to use in aerospike db\n :param set: set name in the namespace\n :param key: key to get and return. can be a single key or a list of keys\n :param policy: which policy the key should be saved with. default `POLICY_KEY_SEND`\n :param aerospike_conn_id: aerospike connection to use, defaults to 'aerospike_default'\n \"\"\"\n\n template_fields: Sequence[str] = (\"key\",)\n template_ext: Sequence[str] = ()\n ui_color = \"#66c3ff\"\n\n def __init__(\n self,\n namespace: str,\n set: str,\n key: Union[List[str], str],\n policy: dict = {'key': aerospike.POLICY_KEY_SEND},\n aerospike_conn_id: str = \"aerospike_default\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.key = key\n self.namespace = namespace\n self.set = set\n self.key = key\n self.policy = policy\n self.aerospike_conn_id = aerospike_conn_id\n\n def execute(self, context: Context) -> list:\n with AerospikeHook(self.aerospike_conn_id) as hook:\n self.log.info('Fetching key')\n records = hook.get_record(key=self.key, namespace=self.namespace, set=self.set, policy=self.policy)\n parsed_records = self.parse_records(records=records)\n self.log.info('Got %s records', len(parsed_records))\n return parsed_records\n\n def parse_records(self, records: Union[List, tuple]) -> list:\n # Removing the `bytearray` object from records since object of type bytearray is not JSON serializable for Xcom.\n if isinstance(records, list):\n data = list(map(self.create_dict_from_record, records))\n elif isinstance(records, tuple):\n data = [self.create_dict_from_record(record=records)]\n else:\n raise ValueError(f\"Expecting 'list' or 'tuple', got: {type(records)}\")\n return data\n\n @staticmethod\n def create_dict_from_record(record: tuple) -> dict:\n try:\n return {\n \"namespace\": record[0][0],\n \"set\": record[0][1],\n \"key\": record[0][2],\n \"metadata\": record[1],\n \"bins\": record[2]\n }\n except IndexError:\n # Handling an error when there are no 'bins' the data\n return {\n \"namespace\": record[0][0],\n \"set\": record[0][1],\n \"key\": record[0][2],\n \"metadata\": record[1]\n }" }, { "identifier": "AerospikePutKeyOperator", "path": "aerospike_provider/operators/aerospike.py", "snippet": "class AerospikePutKeyOperator(BaseOperator):\n \"\"\"\n Create a new record, add or remove bins.\n\n This can also remove a record (if exists) using ` `{\"bin\": aerospuke.null() }`` if it's the last bin.\n\n :param key: key to save in the db.\n :param namespace: namespace to use in aerospike db\n :param set: set name in the namespace\n :param bins: bins name and data saved along with a key as key values. For example: `{\"bin\": value}`\n :param metadata: metadata about the key eg. ttl. For example: `{\"ttl\": 0}`\n :param policy: which policy the key should be saved with. default `POLICY_EXISTS_IGNORE`. ref: https://developer.aerospike.com/client/usage/atomic/update#policies\n :param aerospike_conn_id: aerospike connection to use, defaults to 'aerospike_default'\n \"\"\"\n\n template_fields: Sequence[str] = (\"key\", \"bins\", \"metadata\", )\n template_ext: Sequence[str] = ()\n ui_color = \"#66c3ff\"\n\n def __init__(\n self,\n namespace: str,\n set: str,\n key: str,\n bins: dict,\n metadata: Union[dict, Any] = None,\n policy: Dict[str, Any] = {'key': aerospike.POLICY_EXISTS_IGNORE},\n aerospike_conn_id: str = \"aerospike_default\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.key = key\n self.namespace = namespace\n self.set = set\n self.key = key\n self.bins = bins\n self.metadata = metadata\n self.policy = policy\n self.aerospike_conn_id = aerospike_conn_id\n\n def execute(self, context: Context) -> None:\n with AerospikeHook(self.aerospike_conn_id) as hook:\n self.log.info('Storing %s as key', self.key)\n hook.put(key=self.key, bins=self.bins, metadata=self.metadata, namespace=self.namespace, set=self.set, policy=self.policy)\n self.log.info('Stored key successfully')" } ]
import unittest import aerospike from unittest.mock import patch, Mock from aerospike_provider.operators.aerospike import AerospikeGetKeyOperator, AerospikePutKeyOperator
2,118
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class TestAerospikeGetKeyOperator(unittest.TestCase): def setUp(self): self.namespace = 'test_namespace' self.set = 'test_set' self.key = 'test_key' self.policy = { aerospike.POLICY_KEY_SEND } self.task_id = 'test_task' self.metadata = {'ttl': 1000, 'gen': 4} self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"} self.operator = AerospikeGetKeyOperator( namespace=self.namespace, set=self.set, key=self.key, policy=self.policy, task_id=self.task_id ) @patch('aerospike_provider.hooks.aerospike.AerospikeHook.get_conn') def test_execute(self, mock_hock_conn): mock_hock_conn.return_value = Mock() self.operator.parse_records = Mock() self.operator.parse_records.return_value = [1] self.operator.execute({}) mock_hock_conn.return_value.get_record.assert_called_once_with( namespace='test_namespace', set='test_set', key='test_key', policy={ aerospike.POLICY_KEY_SEND } ) def test_parse_records_as_tuple(self): mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins) mock_parsed = self.operator.parse_records(records=mock) expected = [{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}] assert mock_parsed == expected def test_parse_records_as_list(self): mock = [( (self.namespace, self.set, self.key), self.metadata, self.bins), ( (self.namespace, self.set, self.key), self.metadata, self.bins)] mock_parsed = self.operator.parse_records(records=mock) expected = [ {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}, {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins} ] assert mock_parsed == expected def test_parse_records_as_exception(self): mock = {} with self.assertRaises(ValueError): self.operator.parse_records(records=mock) def test_create_dict_from_record_with_bins(self): mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins) mock_result = self.operator.create_dict_from_record(record=mock) expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins} assert mock_result == expected def test_create_dict_from_record_no_bins(self): mock = ( (self.namespace, self.set, self.key), self.metadata) mock_result = self.operator.create_dict_from_record(record=mock) expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata} assert mock_result == expected class TestAerospikePutKeyOperator(unittest.TestCase): def setUp(self):
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class TestAerospikeGetKeyOperator(unittest.TestCase): def setUp(self): self.namespace = 'test_namespace' self.set = 'test_set' self.key = 'test_key' self.policy = { aerospike.POLICY_KEY_SEND } self.task_id = 'test_task' self.metadata = {'ttl': 1000, 'gen': 4} self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"} self.operator = AerospikeGetKeyOperator( namespace=self.namespace, set=self.set, key=self.key, policy=self.policy, task_id=self.task_id ) @patch('aerospike_provider.hooks.aerospike.AerospikeHook.get_conn') def test_execute(self, mock_hock_conn): mock_hock_conn.return_value = Mock() self.operator.parse_records = Mock() self.operator.parse_records.return_value = [1] self.operator.execute({}) mock_hock_conn.return_value.get_record.assert_called_once_with( namespace='test_namespace', set='test_set', key='test_key', policy={ aerospike.POLICY_KEY_SEND } ) def test_parse_records_as_tuple(self): mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins) mock_parsed = self.operator.parse_records(records=mock) expected = [{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}] assert mock_parsed == expected def test_parse_records_as_list(self): mock = [( (self.namespace, self.set, self.key), self.metadata, self.bins), ( (self.namespace, self.set, self.key), self.metadata, self.bins)] mock_parsed = self.operator.parse_records(records=mock) expected = [ {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}, {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins} ] assert mock_parsed == expected def test_parse_records_as_exception(self): mock = {} with self.assertRaises(ValueError): self.operator.parse_records(records=mock) def test_create_dict_from_record_with_bins(self): mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins) mock_result = self.operator.create_dict_from_record(record=mock) expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins} assert mock_result == expected def test_create_dict_from_record_no_bins(self): mock = ( (self.namespace, self.set, self.key), self.metadata) mock_result = self.operator.create_dict_from_record(record=mock) expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata} assert mock_result == expected class TestAerospikePutKeyOperator(unittest.TestCase): def setUp(self):
self.operator = AerospikePutKeyOperator(
1
2023-12-17 18:35:36+00:00
4k
Its-Haze/league-rpc-linux
league_rpc_linux/champion.py
[ { "identifier": "Colors", "path": "league_rpc_linux/colors.py", "snippet": "class Colors:\n \"\"\"\n Dataclass, storing the different colors that is used in the program.\n \"\"\"\n\n dred = \"\\033[31m\"\n dgreen = \"\\033[32m\"\n yellow = \"\\033[33m\"\n dblue = \"\\033[34m\"\n dmagenta = \"\\033[35m\"\n dcyan = \"\\033[36m\"\n lgrey = \"\\033[37m\"\n dgray = \"\\033[90m\"\n red = \"\\033[91m\"\n green = \"\\033[92m\"\n orange = \"\\033[93m\"\n blue = \"\\033[94m\"\n magenta = \"\\033[95m\"\n cyan = \"\\033[96m\"\n white = \"\\033[97m\"\n reset = \"\\033[0m\"\n\n @property\n def logo(self) -> str:\n \"\"\"Just prints the LEAGUE rpc logo, in your favorite Terminal Emulator.\"\"\"\n\n return rf\"\"\"\n {self.yellow} _ {self.dblue} _____ _____ _____ {self.reset}\n {self.yellow} | | {self.dblue}| __ \\| __ \\ / ____|{self.reset}\n {self.yellow} | | ___ __ _ __ _ _ _ ___ {self.dblue}| |__) | |__) | | {self.reset}\n {self.yellow} | | / _ \\/ _` |/ _` | | | |/ _ \\ {self.dblue}| _ /| ___/| | {self.reset}\n {self.yellow} | |___| __/ (_| | (_| | |_| | __/ {self.dblue}| | \\ \\| | | |____ {self.reset}\n {self.yellow} |______\\___|\\__,_|\\__, |\\__,_|\\___| {self.dblue}|_| \\_\\_| \\_____|{self.reset}\n {self.yellow} __/ | {self.reset}\n {self.yellow} |___/ {self.reset}\n \"\"\"" }, { "identifier": "ALL_GAME_DATA_URL", "path": "league_rpc_linux/const.py", "snippet": "ALL_GAME_DATA_URL = \"https://127.0.0.1:2999/liveclientdata/allgamedata\"" }, { "identifier": "BASE_SKIN_URL", "path": "league_rpc_linux/const.py", "snippet": "BASE_SKIN_URL = \"https://ddragon.leagueoflegends.com/cdn/img/champion/tiles/\"" }, { "identifier": "CHAMPION_NAME_CONVERT_MAP", "path": "league_rpc_linux/const.py", "snippet": "CHAMPION_NAME_CONVERT_MAP = {\n \"AurelionSol\": \"Aurelion Sol\",\n \"Belveth\": \"Bel'Veth\",\n \"Chogath\": \"Cho'Gath\",\n \"DrMundo\": \"Dr. Mundo\",\n \"JarvanIV\": \"Jarvan IV\",\n \"Kaisa\": \"Kai'Sa\",\n \"Khazix\": \"Kha'Zix\",\n \"KogMaw\": \"Kog'Maw\",\n \"KSante\": \"K'Sante\",\n \"LeeSin\": \"Lee Sin\",\n \"MasterYi\": \"Master Yi\",\n \"MissFortune\": \"Miss Fortune\",\n \"Nunu\": \"Nunu & Willump\",\n \"RekSai\": \"Rek'Sai\",\n \"Renata\": \"Renata Glasc\",\n \"TahmKench\": \"Tahm Kench\",\n \"TwistedFate\": \"Twisted Fate\",\n \"Velkoz\": \"Vel'Koz\",\n \"MonkeyKing\": \"Wukong\",\n \"XinZhao\": \"Xin Zhao\",\n}" }, { "identifier": "DDRAGON_CHAMPION_DATA", "path": "league_rpc_linux/const.py", "snippet": "DDRAGON_CHAMPION_DATA = (\n \"https://ddragon.leagueoflegends.com/cdn/{version}/data/en_US/champion/{name}.json\"\n)" }, { "identifier": "GAME_MODE_CONVERT_MAP", "path": "league_rpc_linux/const.py", "snippet": "GAME_MODE_CONVERT_MAP = {\n \"PRACTICETOOL\": \"Summoner's Rift (Custom)\",\n \"ARAM\": \"Howling Abyss (ARAM)\",\n \"CLASSIC\": \"Summoner's Rift\",\n \"TUTORIAL\": \"Summoner's Rift (Tutorial)\",\n \"URF\": \"Summoner's Rift (URF)\",\n \"NEXUSBLITZ\": \"Nexux Blitz\",\n \"CHERRY\": \"Arena\",\n \"TUTORIAL_MODULE_3\": \"Summoner's Rift (Tutorial)\",\n \"TUTORIAL_MODULE_2\": \"Summoner's Rift (Tutorial)\",\n \"TUTORIAL_MODULE_1\": \"Summoner's Rift (Tutorial)\",\n}" }, { "identifier": "get_gold", "path": "league_rpc_linux/kda.py", "snippet": "def get_gold() -> int:\n \"\"\"\n Get the current gold of your game.\n \"\"\"\n response = get_current_active_player_stats()\n\n if isinstance(response, Response):\n parsed_data = response.json()\n gold = int(parsed_data[\"currentGold\"])\n\n return gold\n return 0" }, { "identifier": "get_level", "path": "league_rpc_linux/kda.py", "snippet": "def get_level() -> int:\n \"\"\"\n Get the current Level of your game.\n \"\"\"\n response = get_current_active_player_stats()\n\n if isinstance(response, Response):\n parsed_data = response.json()\n level = int(parsed_data[\"level\"])\n\n return level\n return 0" }, { "identifier": "get_latest_version", "path": "league_rpc_linux/latest_version.py", "snippet": "def get_latest_version() -> str:\n response = requests.get(url=DDRAGON_API_VERSIONS, timeout=15)\n\n data = response.json()\n latest_version = data[0]\n return latest_version" }, { "identifier": "wait_until_exists", "path": "league_rpc_linux/polling.py", "snippet": "def wait_until_exists(\n url: str,\n custom_message: str = \"\",\n expected_response_code: int = 200,\n timeout: int = 30,\n n_sleep: float | int = 5, # Not needed, but good to have.\n n_total_amount: int = 20,\n startup: int = False, # Set to True on the first time it tries to poll the local api. (onGameStart)\n) -> requests.Response | None:\n \"\"\"\n Polling on the local riot api until success is returned.\n \"\"\"\n\n for _ in range(n_total_amount):\n try:\n response = requests.get(url, timeout=timeout, verify=False)\n if response.status_code != expected_response_code:\n time.sleep(n_sleep)\n continue\n break\n except (\n NewConnectionError,\n ConnectionError,\n requests.exceptions.ConnectionError,\n ):\n # These errors occur either before the api has started..\n # Or when the game has ended\n if startup:\n # Make sure we continue to poll the api during the start of a game.\n time.sleep(n_sleep)\n continue\n\n # When game ends, we don't care about polling the api.\n return None\n else:\n print(custom_message)\n return None\n return response" }, { "identifier": "get_summoner_name", "path": "league_rpc_linux/username.py", "snippet": "def get_summoner_name(with_discriminator: bool = False) -> str:\n \"\"\"\n Gets the current summoner name.\n\n if with_discriminator is True, the function will return a summoners name with #EUW / #EUNE etc\n Defaults to not include it.\n\n \"\"\"\n url = \"https://127.0.0.1:2999/liveclientdata/activeplayername\"\n if response := wait_until_exists(\n url=url,\n custom_message=\"\"\"\n Summoner name could not be found.\n Contact @haze.dev on discord, or submit a ticket on Github.\n \"\"\",\n ):\n name = str(response.json())\n return name if with_discriminator else name.split(\"#\", maxsplit=1)[0]\n\n return \"\"" } ]
from http import HTTPStatus from typing import Any, Optional from league_rpc_linux.colors import Colors from league_rpc_linux.const import ( ALL_GAME_DATA_URL, BASE_SKIN_URL, CHAMPION_NAME_CONVERT_MAP, DDRAGON_CHAMPION_DATA, GAME_MODE_CONVERT_MAP, ) from league_rpc_linux.kda import get_gold, get_level from league_rpc_linux.latest_version import get_latest_version from league_rpc_linux.polling import wait_until_exists from league_rpc_linux.username import get_summoner_name import requests import urllib3
2,322
urllib3.disable_warnings() def get_specific_champion_data(name: str) -> dict[str, Any]: response = requests.get( url=DDRAGON_CHAMPION_DATA.format_map( {"version": get_latest_version(), "name": name} ), timeout=15, ) return response.json() def gather_ingame_information() -> tuple[str, str, int, str, int, int]: """ Get the current playing champion name. """ all_game_data_url = ALL_GAME_DATA_URL your_summoner_name = get_summoner_name() champion_name: str | None = None skin_id: int | None = None skin_name: str | None = None game_mode: str | None = None # Set if the game mode was never found.. Maybe you are playing something new? level: int | None = None gold: int | None = None if response := wait_until_exists( url=all_game_data_url, custom_message="Did not find game data.. Will try again in 5 seconds", ): parsed_data = response.json() game_mode = GAME_MODE_CONVERT_MAP.get( parsed_data["gameData"]["gameMode"], parsed_data["gameData"]["gameMode"], ) if game_mode == "TFT": # If the currentGame is TFT.. gather the relevant information level = get_level() else: # If the gamemode is LEAGUE gather the relevant information. champion_name, skin_id, skin_name = gather_league_data( parsed_data=parsed_data, summoners_name=your_summoner_name ) if game_mode == "Arena": level, gold = get_level(), get_gold() print("-" * 50) if champion_name: print(
urllib3.disable_warnings() def get_specific_champion_data(name: str) -> dict[str, Any]: response = requests.get( url=DDRAGON_CHAMPION_DATA.format_map( {"version": get_latest_version(), "name": name} ), timeout=15, ) return response.json() def gather_ingame_information() -> tuple[str, str, int, str, int, int]: """ Get the current playing champion name. """ all_game_data_url = ALL_GAME_DATA_URL your_summoner_name = get_summoner_name() champion_name: str | None = None skin_id: int | None = None skin_name: str | None = None game_mode: str | None = None # Set if the game mode was never found.. Maybe you are playing something new? level: int | None = None gold: int | None = None if response := wait_until_exists( url=all_game_data_url, custom_message="Did not find game data.. Will try again in 5 seconds", ): parsed_data = response.json() game_mode = GAME_MODE_CONVERT_MAP.get( parsed_data["gameData"]["gameMode"], parsed_data["gameData"]["gameMode"], ) if game_mode == "TFT": # If the currentGame is TFT.. gather the relevant information level = get_level() else: # If the gamemode is LEAGUE gather the relevant information. champion_name, skin_id, skin_name = gather_league_data( parsed_data=parsed_data, summoners_name=your_summoner_name ) if game_mode == "Arena": level, gold = get_level(), get_gold() print("-" * 50) if champion_name: print(
f"{Colors.yellow}Champion name found {Colors.green}({CHAMPION_NAME_CONVERT_MAP.get(champion_name, champion_name)}),{Colors.yellow} continuing..{Colors.reset}"
3
2023-12-15 22:21:53+00:00
4k
huahuahuage/Bert-VITS2-Speech
onnx_infer/text/cleaner.py
[ { "identifier": "symbol_to_id", "path": "onnx_infer/text/symbols.py", "snippet": "" }, { "identifier": "text_normalize", "path": "onnx_infer/text/chinese.py", "snippet": "def text_normalize(text: str):\r\n \"\"\"\r\n 替换所有阿拉伯数字为中文,同时将中文符号替换为英文符号\r\n \"\"\"\r\n # 提取文本中所有的阿拉伯数字\r\n numbers = re.findall(r\"\\d+(?:\\.?\\d+)?\", text)\r\n for number in numbers:\r\n # 将阿拉伯数字转中文小写数字一百二十三\r\n text = text.replace(number, cn2an.an2cn(number), 1)\r\n # 替换所有中文标点符号为指定的英文符号: [\"!\", \"?\", \"…\", \",\", \".\", \"'\", \"-\"]\r\n text = replace_punctuation(text)\r\n return text\r" }, { "identifier": "text_normalize", "path": "onnx_infer/text/japanese.py", "snippet": "def text_normalize(text):\r\n res = unicodedata.normalize(\"NFKC\", text)\r\n res = japanese_convert_numbers_to_words(res)\r\n # res = \"\".join([i for i in res if is_japanese_character(i)])\r\n res = replace_punctuation(res)\r\n res = res.replace(\"゙\", \"\")\r\n return res\r" }, { "identifier": "text_normalize", "path": "onnx_infer/text/english.py", "snippet": "def text_normalize(text):\r\n text = normalize_numbers(text)\r\n text = replace_punctuation(text)\r\n text = re.sub(r\"([,;.\\?\\!])([\\w])\", r\"\\1 \\2\", text)\r\n return text\r" }, { "identifier": "g2p", "path": "onnx_infer/text/chinese.py", "snippet": "def g2p(self, segments_list: List[str]):\r\n phones_list = []\r\n tones_list = []\r\n word2ph = []\r\n for seg in segments_list:\r\n seg_cut = psg.lcut(seg)\r\n initials = []\r\n finals = []\r\n seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)\r\n for word, pos in seg_cut:\r\n if pos == \"eng\":\r\n continue\r\n sub_initials, sub_finals = self.__get_initials_finals(word)\r\n sub_finals = self.tone_modifier.modified_tone(word, pos, sub_finals)\r\n initials.append(sub_initials)\r\n finals.append(sub_finals)\r\n\r\n # assert len(sub_initials) == len(sub_finals) == len(word)\r\n initials = sum(initials, [])\r\n finals = sum(finals, [])\r\n #\r\n for c, v in zip(initials, finals):\r\n raw_pinyin = c + v\r\n # NOTE: post process for pypinyin outputs\r\n # we discriminate i, ii and iii\r\n if c == v:\r\n assert c in punctuation\r\n phone = [c]\r\n tone = \"0\"\r\n word2ph.append(1)\r\n else:\r\n v_without_tone = v[:-1]\r\n tone = v[-1]\r\n\r\n pinyin = c + v_without_tone\r\n assert tone in \"12345\"\r\n\r\n if c:\r\n # 多音节\r\n v_rep_map = {\r\n \"uei\": \"ui\",\r\n \"iou\": \"iu\",\r\n \"uen\": \"un\",\r\n }\r\n if v_without_tone in v_rep_map.keys():\r\n pinyin = c + v_rep_map[v_without_tone]\r\n else:\r\n # 单音节\r\n pinyin_rep_map = {\r\n \"ing\": \"ying\",\r\n \"i\": \"yi\",\r\n \"in\": \"yin\",\r\n \"u\": \"wu\",\r\n }\r\n if pinyin in pinyin_rep_map.keys():\r\n pinyin = pinyin_rep_map[pinyin]\r\n else:\r\n single_rep_map = {\r\n \"v\": \"yu\",\r\n \"e\": \"e\",\r\n \"i\": \"y\",\r\n \"u\": \"w\",\r\n }\r\n if pinyin[0] in single_rep_map.keys():\r\n pinyin = single_rep_map[pinyin[0]] + pinyin[1:]\r\n\r\n assert pinyin in self.pinyin_to_symbol_map.keys(), (\r\n pinyin,\r\n seg,\r\n raw_pinyin,\r\n )\r\n phone = self.pinyin_to_symbol_map[pinyin].split(\" \")\r\n word2ph.append(len(phone))\r\n\r\n phones_list += phone\r\n tones_list += [int(tone)] * len(phone)\r\n return phones_list, tones_list, word2ph\r" }, { "identifier": "g2p", "path": "onnx_infer/text/japanese.py", "snippet": "def g2p(norm_text):\r\n sep_text, sep_kata, acc = text2sep_kata(norm_text)\r\n sep_tokenized = []\r\n for i in sep_text:\r\n if i not in punctuation:\r\n # print('aaaa',tokenizer.tokenize(i))\r\n # sep_tokenized.append([f\"▁{i}\"])\r\n sep_tokenized.append(tokenizer.tokenize(i))\r\n else:\r\n sep_tokenized.append([i])\r\n\r\n sep_phonemes = handle_long([kata2phoneme(i) for i in sep_kata])\r\n # 异常处理,MeCab不认识的词的话会一路传到这里来,然后炸掉。目前来看只有那些超级稀有的生僻词会出现这种情况\r\n for i in sep_phonemes:\r\n for j in i:\r\n assert j in symbols, (sep_text, sep_kata, sep_phonemes)\r\n tones = align_tones(sep_phonemes, acc)\r\n\r\n word2ph = []\r\n for token, phoneme in zip(sep_tokenized, sep_phonemes):\r\n phone_len = len(phoneme)\r\n word_len = len(token)\r\n\r\n aaa = distribute_phone(phone_len, word_len)\r\n word2ph += aaa\r\n phones = [\"_\"] + [j for i in sep_phonemes for j in i] + [\"_\"]\r\n # tones = [0] + rearrange_tones(tones, phones[1:-1]) + [0]\r\n tones = [0] + tones + [0]\r\n word2ph = [1] + word2ph + [1]\r\n assert len(phones) == len(tones)\r\n return phones, tones, word2ph\r" }, { "identifier": "g2p", "path": "onnx_infer/text/english.py", "snippet": "def g2p(text):\r\n phones = []\r\n tones = []\r\n # word2ph = []\r\n words = sep_text(text)\r\n # print(words)\r\n # tokens = [f\"▁{i}\" for i in words]\r\n tokens = [tokenizer.tokenize(i) for i in words]\r\n # print(tokens)\r\n for word in words:\r\n if word.upper() in eng_dict:\r\n phns, tns = refine_syllables(eng_dict[word.upper()])\r\n phones.append([post_replace_ph(i) for i in phns])\r\n tones.append(tns)\r\n # word2ph.append(len(phns))\r\n else:\r\n phone_list = list(filter(lambda p: p != \" \", _g2p(word)))\r\n phns = []\r\n tns = []\r\n for ph in phone_list:\r\n if ph in arpa:\r\n ph, tn = refine_ph(ph)\r\n phns.append(ph)\r\n tns.append(tn)\r\n else:\r\n phns.append(ph)\r\n tns.append(0)\r\n phones.append([post_replace_ph(i) for i in phns])\r\n tones.append(tns)\r\n # word2ph.append(len(phns))\r\n # phones = [post_replace_ph(i) for i in phones]\r\n\r\n word2ph = []\r\n for token, phoneme in zip(tokens, phones):\r\n phone_len = len(phoneme)\r\n word_len = len(token)\r\n\r\n aaa = distribute_phone(phone_len, word_len)\r\n word2ph += aaa\r\n\r\n phones = [\"_\"] + [j for i in phones for j in i] + [\"_\"]\r\n tones = [0] + [j for i in tones for j in i] + [0]\r\n word2ph = [1] + word2ph + [1]\r\n assert len(phones) == len(tones), text\r\n assert len(phones) == sum(word2ph), text\r\n\r\n return phones, tones, word2ph" } ]
from .symbols import symbol_to_id, language_tone_start_map, language_id_map from typing import Callable from dataclasses import dataclass from .chinese import text_normalize as zh_text_normalize from .japanese import text_normalize as jp_text_normalize from .english import text_normalize as en_text_normalize from .chinese import g2p as zh_g2p from .japanese import g2p as jp_g2p from .english import g2p as en_g2p
2,181
# from text import cleaned_text_to_sequence @dataclass class TextNormalizeDict: """ 文本序列化 替换所有阿拉伯数字为对应语言,同时将符号替换为指定列表内的英文符号 """ ZH: Callable = zh_text_normalize JP: Callable = jp_text_normalize EN: Callable = en_text_normalize @dataclass class G2PDict: """ 文本序列化 """ ZH: Callable = zh_g2p
# from text import cleaned_text_to_sequence @dataclass class TextNormalizeDict: """ 文本序列化 替换所有阿拉伯数字为对应语言,同时将符号替换为指定列表内的英文符号 """ ZH: Callable = zh_text_normalize JP: Callable = jp_text_normalize EN: Callable = en_text_normalize @dataclass class G2PDict: """ 文本序列化 """ ZH: Callable = zh_g2p
JP: Callable = jp_g2p
0
2023-12-21 13:50:50+00:00
4k
haseeb-heaven/Gemini-Vision-Pro
script.py
[ { "identifier": "Logger", "path": "libs/logger.py", "snippet": "class Logger:\n _logger = None\n\n @staticmethod\n def get_logger(file_name):\n if Logger._logger is None:\n Logger._logger = Logger._setup_logger(file_name)\n return Logger._logger\n\n @staticmethod\n def _setup_logger(file_name):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n\n # Create a file handler\n file_handler = logging.FileHandler(file_name)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n # Create a stream handler to print log messages on the console\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n\n # Add both handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n\n return logger" }, { "identifier": "GeminiVision", "path": "libs/gemini_vision.py", "snippet": "class GeminiVision:\n def __init__(self,api_key=None,temperature=0.1,top_p=1,top_k=32,max_output_tokens=4096) -> None:\n self.logger = Logger.get_logger('gemini_vision_pro.log')\n self.logger.info(f\"Initializing Gemini Vision\")\n self.model = None\n self.api_key = api_key\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.max_output_tokens = max_output_tokens\n \n self.logger.info(f\"temperature: {self.temperature}\")\n self.logger.info(f\"top_p: {self.top_p}\")\n self.logger.info(f\"top_k: {self.top_k}\")\n self.logger.info(f\"max_output_tokens: {self.max_output_tokens}\")\n \n if self.api_key is None:\n self.logger.error(\"API key is not initialized\")\n\n # load the key from the .env file\n load_dotenv()\n api_key = os.getenv(\"GEMINI_API_KEY\")\n if not api_key:\n self.logger.error(\"No API key found in the .env file\")\n raise ValueError(\"No API key found in the .env file\")\n \n self.logger.info(f\"Gemini Vision configured success\")\n genai.configure(api_key=api_key)\n \n self.logger.info(f\"Setting up model\")\n self.setup_model()\n self.logger.info(f\"Model setup success\")\n\n def setup_model(self):\n try:\n # Set up the model\n generation_config = {\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"top_k\": self.top_k,\n \"max_output_tokens\": self.max_output_tokens,\n }\n\n self.model = genai.GenerativeModel(model_name=\"gemini-pro-vision\",generation_config=generation_config)\n except Exception as e:\n self.logger.error(f\"Error setting up model: {e}\")\n raise\n\n def generate_content(self, contents):\n self.logger.info(f\"Generating contents\")\n \n # Check model and contents for errors.\n if self.model is None:\n self.logger.error(\"Model is not initialized\")\n raise ValueError(\"Model is not initialized\")\n\n if contents is None:\n self.logger.error(\"Contents is not initialized\")\n raise ValueError(\"Contents is not initialized\")\n \n # Print out the contents list for debugging\n self.logger.info(f\"Contents: {contents}\")\n \n return self.model.generate_content(contents=contents)" }, { "identifier": "SpeechToText", "path": "libs/speech.py", "snippet": "class SpeechToText:\n \"\"\"\n A class that represents a speech-to-text converter.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the recognizer and the microphone.\n \"\"\"\n self.recognizer = sr.Recognizer()\n self.microphone = sr.Microphone()\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO)\n\n def listen_and_convert(self):\n \"\"\"\n Listen to the microphone and convert the speech to text.\n \"\"\"\n try:\n self.logger.info(\"Listening to the microphone...\")\n with self.microphone as source:\n audio = self.recognizer.listen(source)\n self.logger.info(\"Converting speech to text...\")\n text = self.recognizer.recognize_google(audio)\n self.logger.info(f\"Converted text: {text}\")\n return text\n except sr.UnknownValueError:\n self.logger.error(\"Google Speech Recognition could not understand the audio\")\n except sr.RequestError as e:\n self.logger.error(f\"Could not request results from Google Speech Recognition service: {str(e)}\")" }, { "identifier": "TextToSpeech", "path": "libs/voice.py", "snippet": "class TextToSpeech:\n \"\"\"\n A class that represents a text-to-speech converter.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the logger.\n \"\"\"\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO)\n\n def speak(self, text):\n \"\"\"\n Convert the given text to speech.\n \"\"\"\n try:\n self.logger.info(f\"Speaking the text: {text}\")\n tts = gTTS(text=text, lang='en')\n tts.save(\"speech.mp3\")\n os.system(\"mpg321 speech.mp3\")\n os.remove(\"speech.mp3\")\n except Exception as exception:\n self.logger.error(f\"An error occurred while trying to speak the text: {str(exception)}\")\n raise" }, { "identifier": "ImageCV2", "path": "libs/image_cv2.py", "snippet": "class ImageCV2:\n \n def __init__(self) -> None:\n # Set up logging\n self.logger = Logger.get_logger('gemini_vision.log')\n \n def open_webcam(self):\n cap = cv2.VideoCapture(0)\n if not cap.isOpened():\n self.logger.error(\"Cannot open webcam\")\n return None\n return cap\n\n def capture_image(self, cap):\n ret, frame = cap.read()\n self.logger.info(f\"Capturing image from webcam\")\n \n if not ret:\n self.logger.error(\"Cannot capture image\")\n return None\n\n self.logger.info(f\"Converting image PIL.Image\")\n # Convert the numpy.ndarray to a PIL.Image.Image\n image = Image.fromarray(frame)\n \n self.logger.info(f\"Converting image success\")\n return image\n \n def save_image(self, image, filename):\n self.logger.info(f\"Saving image to: {filename}\")\n \n # Convert the PIL.Image.Image back to a numpy.ndarray\n frame = np.array(image)\n \n # Save the image\n cv2.imwrite(filename, frame)\n \n def capture_image_from_webcam(self,image_name):\n self.logger.info(f\"Capturing image from webcam\")\n #time.sleep(5)\n \n cap = self.open_webcam()\n time.sleep(1)\n \n if cap is None:\n self.logger.error(\"Cannot open webcam\")\n return None\n\n image = self.capture_image(cap)\n \n # Check if frame is None\n if image is None:\n self.logger.error(\"Cannot capture image\")\n return None\n \n time.sleep(1)\n \n # Save the image\n self.save_image(image, image_name)\n self.logger.info(f\"Saved image to: {image_name}\")\n\n return image\n \n def show_webcam_feed(self):\n # Open the webcam (0 is the default webcam)\n cap = cv2.VideoCapture(0)\n\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow('Webcam Feed', frame)\n\n # Break the loop on 'q' key press\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything is done, release the capture and destroy the window\n cap.release()\n cv2.destroyAllWindows()\n \n def stop_webcam_feed(self,interval):\n time.sleep(interval)" } ]
import streamlit as st import cv2 import io import traceback import traceback from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration from PIL import Image from io import BytesIO from pathlib import Path from libs.logger import Logger from libs.gemini_vision import GeminiVision from libs.speech import SpeechToText from libs.voice import TextToSpeech from libs.image_cv2 import ImageCV2
3,088
""" Description: This is the amazing Google Gemini Vision Pro. This scans the image and using Gemini AI pro vision API it generates the descrption of the image. It also uses the speech to text and text to speech to speak the prompt and display the description of the image. It also uses the webcam to capture the image and display it. Features: 1. Webcam detection using WebRTC, OpenCV and PIL 2. Speech to text using Google Cloud Speech to Text API 3. Text to speech using Google Cloud Text to Speech API 4. Image processing using Gemini AI Pro Vision API 5. Logging using Python logging module 6. Error handling using Python exception handling Modules used: 1. Streamlit - Is is the Web App framework used to build the app 2. Streamlit Webrtc - It is used to capture the image from the webcam 3. OpenCV - It is used to capture the image from the webcam 4. PIL - It is image processing library used to convert the image. 5. gTTS - It is used to convert the text to speech 6. SpeechRecognition - It is used to convert the speech to text 7. google.cloud.speech - It is used to convert the speech to text Author: HeavenHM Date: 17-12-2023 Version: 1.0 """ # Initialize session state def init_session_state(): if 'api_key' not in st.session_state: st.session_state['api_key'] = '' if 'temperature' not in st.session_state: st.session_state['temperature'] = 0.1 if 'top_k' not in st.session_state: st.session_state['top_k'] = 32 if 'top_p' not in st.session_state: st.session_state['top_p'] = 1.0 if 'captured_image' not in st.session_state: st.session_state['captured_image'] = None if 'prompt' not in st.session_state: st.session_state['prompt'] = '' if 'api_key' not in st.session_state: st.session_state['api_key'] = '' if 'captured_image' not in st.session_state: st.session_state['captured_image'] = None if 'prompt' not in st.session_state: st.session_state['prompt'] = '' if "logger" not in st.session_state: st.session_state["logger"] = None if "tts" not in st.session_state: st.session_state["tts"] = None if "stt" not in st.session_state: st.session_state["stt"] = None if "gemini_vision" not in st.session_state: st.session_state["gemini_vision"] = None if "webrtc_ctx" not in st.session_state: st.session_state["webrtc_ctx"] = None if "response" not in st.session_state: st.session_state["response"] = None # Exception handling decorator def exception_handler(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exception: st.session_state.logger.error(f"An error occurred in {func.__name__}: {exception}") st.error(f"An error occurred: {exception}") st.session_state.logger.error(traceback.format_exc()) st.stop() return wrapper @exception_handler def validate_image(image_path): if not image_path.exists(): st.session_state.logger.error(f"Could not find image: {image_path}") raise FileNotFoundError(f"Could not find image: {image_path}") @exception_handler def process_image(): image_contents = [st.session_state['prompt'], st.session_state['captured_image']] st.session_state.logger.info(f"Image data is: {st.session_state['captured_image']}") response = st.session_state.gemini_vision.generate_content(image_contents) if 'error' in response: raise ValueError(f"An error occurred: {response}") else: if response.text: st.session_state.tts.speak(response.text) st.session_state.logger.info(f"Response: {response.text}") st.session_state.response = response.text @exception_handler def get_prompt_from_mic(): prompt = st.session_state.stt.listen_and_convert() return prompt @exception_handler def log_webrtc_context_states(webrtc_ctx): if webrtc_ctx is not None: # Log the state of the WebRTC context st.session_state.logger.info(f"WebRTC context: {webrtc_ctx}") st.session_state.logger.info(f"Is WebRTC playing: {webrtc_ctx.state.playing}") st.session_state.logger.info(f"Is audio receiver ready: {webrtc_ctx.audio_receiver}") st.session_state.logger.info(f"Is video receiver ready: {webrtc_ctx.video_receiver}") else: st.error("WebRTC context is None.") @exception_handler def capture_image(): st.session_state.logger.info("Attempting to capture image from webcam with ImageCV2...") # Capture the image from the webcam web_image = None
""" Description: This is the amazing Google Gemini Vision Pro. This scans the image and using Gemini AI pro vision API it generates the descrption of the image. It also uses the speech to text and text to speech to speak the prompt and display the description of the image. It also uses the webcam to capture the image and display it. Features: 1. Webcam detection using WebRTC, OpenCV and PIL 2. Speech to text using Google Cloud Speech to Text API 3. Text to speech using Google Cloud Text to Speech API 4. Image processing using Gemini AI Pro Vision API 5. Logging using Python logging module 6. Error handling using Python exception handling Modules used: 1. Streamlit - Is is the Web App framework used to build the app 2. Streamlit Webrtc - It is used to capture the image from the webcam 3. OpenCV - It is used to capture the image from the webcam 4. PIL - It is image processing library used to convert the image. 5. gTTS - It is used to convert the text to speech 6. SpeechRecognition - It is used to convert the speech to text 7. google.cloud.speech - It is used to convert the speech to text Author: HeavenHM Date: 17-12-2023 Version: 1.0 """ # Initialize session state def init_session_state(): if 'api_key' not in st.session_state: st.session_state['api_key'] = '' if 'temperature' not in st.session_state: st.session_state['temperature'] = 0.1 if 'top_k' not in st.session_state: st.session_state['top_k'] = 32 if 'top_p' not in st.session_state: st.session_state['top_p'] = 1.0 if 'captured_image' not in st.session_state: st.session_state['captured_image'] = None if 'prompt' not in st.session_state: st.session_state['prompt'] = '' if 'api_key' not in st.session_state: st.session_state['api_key'] = '' if 'captured_image' not in st.session_state: st.session_state['captured_image'] = None if 'prompt' not in st.session_state: st.session_state['prompt'] = '' if "logger" not in st.session_state: st.session_state["logger"] = None if "tts" not in st.session_state: st.session_state["tts"] = None if "stt" not in st.session_state: st.session_state["stt"] = None if "gemini_vision" not in st.session_state: st.session_state["gemini_vision"] = None if "webrtc_ctx" not in st.session_state: st.session_state["webrtc_ctx"] = None if "response" not in st.session_state: st.session_state["response"] = None # Exception handling decorator def exception_handler(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exception: st.session_state.logger.error(f"An error occurred in {func.__name__}: {exception}") st.error(f"An error occurred: {exception}") st.session_state.logger.error(traceback.format_exc()) st.stop() return wrapper @exception_handler def validate_image(image_path): if not image_path.exists(): st.session_state.logger.error(f"Could not find image: {image_path}") raise FileNotFoundError(f"Could not find image: {image_path}") @exception_handler def process_image(): image_contents = [st.session_state['prompt'], st.session_state['captured_image']] st.session_state.logger.info(f"Image data is: {st.session_state['captured_image']}") response = st.session_state.gemini_vision.generate_content(image_contents) if 'error' in response: raise ValueError(f"An error occurred: {response}") else: if response.text: st.session_state.tts.speak(response.text) st.session_state.logger.info(f"Response: {response.text}") st.session_state.response = response.text @exception_handler def get_prompt_from_mic(): prompt = st.session_state.stt.listen_and_convert() return prompt @exception_handler def log_webrtc_context_states(webrtc_ctx): if webrtc_ctx is not None: # Log the state of the WebRTC context st.session_state.logger.info(f"WebRTC context: {webrtc_ctx}") st.session_state.logger.info(f"Is WebRTC playing: {webrtc_ctx.state.playing}") st.session_state.logger.info(f"Is audio receiver ready: {webrtc_ctx.audio_receiver}") st.session_state.logger.info(f"Is video receiver ready: {webrtc_ctx.video_receiver}") else: st.error("WebRTC context is None.") @exception_handler def capture_image(): st.session_state.logger.info("Attempting to capture image from webcam with ImageCV2...") # Capture the image from the webcam web_image = None
web_cam = ImageCV2()
4
2023-12-16 23:24:46+00:00
4k
jaypyles/obsidian-to-bookstack
obsidian_to_bookstack/bookstack/bookstack.py
[ { "identifier": "console", "path": "obsidian_to_bookstack/console.py", "snippet": "" }, { "identifier": "con_hash", "path": "obsidian_to_bookstack/utils.py", "snippet": "def con_hash(key: str) -> int:\n \"\"\"Get a consistent hash of a key\"\"\"\n hash_obj = hashlib.md5(key.encode())\n hex_digest = hash_obj.hexdigest()\n return int(hex_digest, 16)" }, { "identifier": "Book", "path": "obsidian_to_bookstack/bookstack/artifacts.py", "snippet": "class Book:\n def __init__(\n self,\n name: str,\n shelf: Shelf | None = None,\n client: Client | None = None,\n chapters: List = [],\n path: str = \"\",\n details: Dict = {},\n from_client: bool = True,\n ) -> None:\n self.path = path\n self.name = name\n self.client = client\n self.shelf = shelf\n self.chapters = chapters\n self.details = details\n if from_client:\n self.pages = []\n else:\n self._set_pages()\n\n def __str__(self) -> str:\n return self.name\n\n def _set_pages(self):\n pages = []\n chapters = []\n\n for item in os.listdir(self.path):\n item_path = os.path.join(self.path, item)\n if os.path.isdir(item_path):\n chapters.append(\n Chapter(\n path=os.path.join(self.path, item),\n name=item,\n client=self.client,\n shelf=self.shelf,\n book=self,\n from_client=False,\n )\n )\n else:\n if os.path.splitext(item)[1] == \".md\":\n pages.append(\n Page(\n path=os.path.join(self.path, item),\n name=item,\n client=self.client,\n shelf=self.shelf,\n book=self,\n )\n )\n\n self.pages = pages\n self.chapters = chapters" }, { "identifier": "Chapter", "path": "obsidian_to_bookstack/bookstack/artifacts.py", "snippet": "class Chapter:\n def __init__(\n self,\n name: str,\n shelf: Shelf | None = None,\n book: Book | None = None,\n client: Client | None = None,\n path: str = \"\",\n details: Dict = {},\n from_client: bool = True,\n ) -> None:\n self.path = path\n self.name = name\n self.client = client\n self.shelf = shelf\n self.book = book\n self.details = details\n if from_client:\n self.pages = []\n else:\n self._set_pages()\n\n def __str__(self) -> str:\n return self.name\n\n def _set_pages(self):\n pages = []\n for page in os.listdir(self.path):\n if os.path.splitext(page)[1] == \".md\":\n p = Page(\n path=os.path.join(self.path, page),\n name=page,\n client=self.client,\n book=self.book,\n chapter=self,\n )\n pages.append(p)\n\n self.pages = pages" }, { "identifier": "Page", "path": "obsidian_to_bookstack/bookstack/artifacts.py", "snippet": "class Page:\n def __init__(\n self,\n name: str,\n path: str = \"\",\n client: Client | None = None,\n shelf: Shelf | None = None,\n book: Book | None = None,\n chapter: Chapter | None = None,\n details: Dict = {},\n ) -> None:\n self.path = path\n self.name = name\n self.client = client\n self.content = self._get_content() if self.path else \"\"\n self.shelf = shelf\n self.book = book\n self.chapter = chapter\n self.details = details\n\n def __str__(self) -> str:\n return self.name\n\n def _get_content(self):\n with open(self.path, \"r\") as f:\n return f.read()" }, { "identifier": "Shelf", "path": "obsidian_to_bookstack/bookstack/artifacts.py", "snippet": "class Shelf:\n def __init__(\n self,\n name: str,\n client: Client | None = None,\n from_client: bool = True,\n path: str = \"\",\n details: Dict = {},\n ) -> None:\n self.path = path\n self.name = name\n self.client = client\n if from_client:\n self.books = []\n else:\n self.books = self._set_books()\n self.client_books: list[dict] = []\n self.details = details\n\n def __str__(self) -> str:\n return self.name\n\n def _set_books(self):\n books = []\n for book in os.listdir(self.path):\n if os.path.isdir(os.path.join(self.path, book)) and not book.startswith(\n \".\"\n ):\n b = Book(\n path=os.path.join(self.path, book),\n name=book,\n client=self.client,\n shelf=self,\n from_client=False,\n )\n books.append(b)\n\n return books" }, { "identifier": "LocalClient", "path": "obsidian_to_bookstack/bookstack/client.py", "snippet": "class LocalClient(Client):\n ..." }, { "identifier": "RemoteClient", "path": "obsidian_to_bookstack/bookstack/client.py", "snippet": "class RemoteClient(Client):\n @abstractmethod\n def __init__(self) -> None:\n super().__init__()\n self.id = os.getenv(\"BOOKSTACK_TOKEN_ID\")\n self.secret = os.getenv(\"BOOKSTACK_TOKEN_SECRET\")\n self.base_url = os.getenv(\"BOOKSTACK_BASE_URL\")\n self.headers = {\"Authorization\": f\"Token {self.id}:{self.secret}\"}\n self.http = urllib3.PoolManager()\n\n def _make_request(\n self,\n request_type: RequestType,\n endpoint: BookstackAPIEndpoints | DetailedBookstackLink,\n body=None,\n json=None,\n ) -> urllib3.BaseHTTPResponse:\n \"\"\"Make a HTTP request to a Bookstack API Endpoint\"\"\"\n\n assert self.base_url\n\n request_url = self.base_url + endpoint.value\n resp = self.http.request(\n request_type.value, request_url, headers=self.headers, body=body, json=json\n )\n return resp\n\n def _get_from_client(self, endpoint: BookstackAPIEndpoints):\n \"\"\"Make a GET request to a Bookstack API Endpoint\"\"\"\n resp = self._make_request(RequestType.GET, endpoint)\n assert resp\n\n data = json.loads(resp.data.decode())\n return data[\"data\"]" } ]
import os import shutil import urllib3 from datetime import datetime, timedelta from typing import List from ..console import console from ..utils import con_hash from .artifacts import Book, Chapter, Page, Shelf from .client import LocalClient, RemoteClient from .collectors.local import * from .collectors.remote import * from .constants import *
2,469
class BookstackClient(RemoteClient): """Represents the remote Bookstack instance""" def __init__(self, verbose: bool) -> None: # if verbose is set, will issue logs super().__init__() self.verbose = verbose if self.verbose: console.log("Building remote client...") self.__set_collectors() self.__set_artifacts() self.__set_maps() def __set_collectors(self): self.shelf_collector = RemoteShelfCollector(self.verbose, self) self.book_collector = RemoteBookCollector(self.verbose, self) self.page_collector = RemotePageCollector(self.verbose, self) self.chapter_collector = RemoteChapterCollector(self.verbose, self) def __set_artifacts(self): self.shelves: List[Shelf] = self.shelf_collector.get_shelves() self.books: List[Book] = self.book_collector.get_books(self.shelves) self.pages: List[Page] = self.page_collector.get_pages(self.books) self.chapters: List[Chapter] = self.chapter_collector.get_chapters(self.books) def __set_maps(self): self.shelf_map = self._build_shelf_map() self.book_map = self._build_book_map() self.page_map = self._build_page_map() self.chapter_map = self._build_chapter_map() def _refresh(self): """Simply update the client""" self.http = urllib3.PoolManager() self.__set_collectors() self.__set_artifacts() self.__set_maps() def _build_shelf_map(self): """Build a map of all client shelves""" return {con_hash(shelf.name): shelf for shelf in self.shelves} def _build_book_map(self): """Build a map of all client books""" book_map = {} for book in self.books: if book.shelf: book_map[con_hash(book.name + book.shelf.name)] = book else: book_map[con_hash(book.name)] = book return book_map def _build_page_map(self): """Build a map of all client pages""" page_map = {} for page in self.pages: if page.chapter and page.book: page_map[ con_hash(page.name + page.book.name + page.chapter.name) ] = page elif page.book: page_map[con_hash(page.name + page.book.name)] = page else: page_map[con_hash(page.name)] = page return page_map def _build_chapter_map(self): """Build a map of all client chapters""" page_map = {} for chapter in self.chapters: if chapter.book: page_map[con_hash(chapter.name + chapter.book.name)] = chapter return page_map def _get_temp_book_map(self): """Get books from the client, but don't add to the client""" books = self._get_from_client(BookstackAPIEndpoints.BOOKS) return {book["name"]: book["id"] for book in books} def _retrieve_from_client_map(self, obj: Page | Shelf | Book | Chapter): """Retrieve the client version of the local object""" if isinstance(obj, Page): name = os.path.splitext(obj.name)[0] if obj.chapter and obj.book: return self.page_map[con_hash(name + obj.book.name + obj.chapter.name)] return ( self.page_map[con_hash(name + obj.book.name)] if obj.book else self.page_map[con_hash(name)] ) if isinstance(obj, Book): return ( self.book_map[con_hash(obj.name + obj.shelf.name)] if obj.shelf else self.book_map[con_hash(obj.name)] ) if isinstance(obj, Shelf): return self.shelf_map[con_hash(obj.name)] if isinstance(obj, Chapter): return self.chapter_map[con_hash(obj.name + obj.book.name)]
class BookstackClient(RemoteClient): """Represents the remote Bookstack instance""" def __init__(self, verbose: bool) -> None: # if verbose is set, will issue logs super().__init__() self.verbose = verbose if self.verbose: console.log("Building remote client...") self.__set_collectors() self.__set_artifacts() self.__set_maps() def __set_collectors(self): self.shelf_collector = RemoteShelfCollector(self.verbose, self) self.book_collector = RemoteBookCollector(self.verbose, self) self.page_collector = RemotePageCollector(self.verbose, self) self.chapter_collector = RemoteChapterCollector(self.verbose, self) def __set_artifacts(self): self.shelves: List[Shelf] = self.shelf_collector.get_shelves() self.books: List[Book] = self.book_collector.get_books(self.shelves) self.pages: List[Page] = self.page_collector.get_pages(self.books) self.chapters: List[Chapter] = self.chapter_collector.get_chapters(self.books) def __set_maps(self): self.shelf_map = self._build_shelf_map() self.book_map = self._build_book_map() self.page_map = self._build_page_map() self.chapter_map = self._build_chapter_map() def _refresh(self): """Simply update the client""" self.http = urllib3.PoolManager() self.__set_collectors() self.__set_artifacts() self.__set_maps() def _build_shelf_map(self): """Build a map of all client shelves""" return {con_hash(shelf.name): shelf for shelf in self.shelves} def _build_book_map(self): """Build a map of all client books""" book_map = {} for book in self.books: if book.shelf: book_map[con_hash(book.name + book.shelf.name)] = book else: book_map[con_hash(book.name)] = book return book_map def _build_page_map(self): """Build a map of all client pages""" page_map = {} for page in self.pages: if page.chapter and page.book: page_map[ con_hash(page.name + page.book.name + page.chapter.name) ] = page elif page.book: page_map[con_hash(page.name + page.book.name)] = page else: page_map[con_hash(page.name)] = page return page_map def _build_chapter_map(self): """Build a map of all client chapters""" page_map = {} for chapter in self.chapters: if chapter.book: page_map[con_hash(chapter.name + chapter.book.name)] = chapter return page_map def _get_temp_book_map(self): """Get books from the client, but don't add to the client""" books = self._get_from_client(BookstackAPIEndpoints.BOOKS) return {book["name"]: book["id"] for book in books} def _retrieve_from_client_map(self, obj: Page | Shelf | Book | Chapter): """Retrieve the client version of the local object""" if isinstance(obj, Page): name = os.path.splitext(obj.name)[0] if obj.chapter and obj.book: return self.page_map[con_hash(name + obj.book.name + obj.chapter.name)] return ( self.page_map[con_hash(name + obj.book.name)] if obj.book else self.page_map[con_hash(name)] ) if isinstance(obj, Book): return ( self.book_map[con_hash(obj.name + obj.shelf.name)] if obj.shelf else self.book_map[con_hash(obj.name)] ) if isinstance(obj, Shelf): return self.shelf_map[con_hash(obj.name)] if isinstance(obj, Chapter): return self.chapter_map[con_hash(obj.name + obj.book.name)]
class Bookstack(LocalClient):
6
2023-12-20 02:22:33+00:00
4k
lipku/metahuman-stream
nerf_triplane/renderer.py
[ { "identifier": "custom_meshgrid", "path": "nerf_triplane/utils.py", "snippet": "def custom_meshgrid(*args):\n # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid\n if pver.parse(torch.__version__) < pver.parse('1.10'):\n return torch.meshgrid(*args)\n else:\n return torch.meshgrid(*args, indexing='ij')" }, { "identifier": "get_audio_features", "path": "nerf_triplane/utils.py", "snippet": "def get_audio_features(features, att_mode, index):\n if att_mode == 0:\n return features[[index]]\n elif att_mode == 1:\n left = index - 8\n pad_left = 0\n if left < 0:\n pad_left = -left\n left = 0\n auds = features[left:index]\n if pad_left > 0:\n # pad may be longer than auds, so do not use zeros_like\n auds = torch.cat([torch.zeros(pad_left, *auds.shape[1:], device=auds.device, dtype=auds.dtype), auds], dim=0)\n return auds\n elif att_mode == 2:\n left = index - 4\n right = index + 4\n pad_left = 0\n pad_right = 0\n if left < 0:\n pad_left = -left\n left = 0\n if right > features.shape[0]:\n pad_right = right - features.shape[0]\n right = features.shape[0]\n auds = features[left:right]\n if pad_left > 0:\n auds = torch.cat([torch.zeros_like(auds[:pad_left]), auds], dim=0)\n if pad_right > 0:\n auds = torch.cat([auds, torch.zeros_like(auds[:pad_right])], dim=0) # [8, 16]\n return auds\n else:\n raise NotImplementedError(f'wrong att_mode: {att_mode}')" }, { "identifier": "euler_angles_to_matrix", "path": "nerf_triplane/utils.py", "snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str='XYZ') -> torch.Tensor:\n \"\"\"\n Convert rotations given as Euler angles in radians to rotation matrices.\n Args:\n euler_angles: Euler angles in radians as tensor of shape (..., 3).\n convention: Convention string of three uppercase letters from\n {\"X\", \"Y\", and \"Z\"}.\n Returns:\n Rotation matrices as tensor of shape (..., 3, 3).\n \"\"\"\n\n # print(euler_angles, euler_angles.dtype)\n\n if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:\n raise ValueError(\"Invalid input euler angles.\")\n if len(convention) != 3:\n raise ValueError(\"Convention must have 3 letters.\")\n if convention[1] in (convention[0], convention[2]):\n raise ValueError(f\"Invalid convention {convention}.\")\n for letter in convention:\n if letter not in (\"X\", \"Y\", \"Z\"):\n raise ValueError(f\"Invalid letter {letter} in convention string.\")\n matrices = [\n _axis_angle_rotation(c, e)\n for c, e in zip(convention, torch.unbind(euler_angles, -1))\n ]\n \n return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2])" }, { "identifier": "convert_poses", "path": "nerf_triplane/utils.py", "snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef convert_poses(poses):\n # poses: [B, 4, 4]\n # return [B, 3], 4 rot, 3 trans\n out = torch.empty(poses.shape[0], 6, dtype=torch.float32, device=poses.device)\n out[:, :3] = matrix_to_euler_angles(poses[:, :3, :3])\n out[:, 3:] = poses[:, :3, 3]\n return out" } ]
import math import trimesh import numpy as np import random import torch import torch.nn as nn import torch.nn.functional as F import raymarching from .utils import custom_meshgrid, get_audio_features, euler_angles_to_matrix, convert_poses
2,708
# color: [N, 3/4] print('[visualize points]', pc.shape, pc.dtype, pc.min(0), pc.max(0)) pc = trimesh.PointCloud(pc, color) # axis axes = trimesh.creation.axis(axis_length=4) # sphere sphere = trimesh.creation.icosphere(radius=1) trimesh.Scene([pc, axes, sphere]).show() class NeRFRenderer(nn.Module): def __init__(self, opt): super().__init__() self.opt = opt self.bound = opt.bound self.cascade = 1 + math.ceil(math.log2(opt.bound)) self.grid_size = 128 self.density_scale = 1 self.min_near = opt.min_near self.density_thresh = opt.density_thresh self.density_thresh_torso = opt.density_thresh_torso self.exp_eye = opt.exp_eye self.test_train = opt.test_train self.smooth_lips = opt.smooth_lips self.torso = opt.torso self.cuda_ray = opt.cuda_ray # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax) # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing. aabb_train = torch.FloatTensor([-opt.bound, -opt.bound/2, -opt.bound, opt.bound, opt.bound/2, opt.bound]) aabb_infer = aabb_train.clone() self.register_buffer('aabb_train', aabb_train) self.register_buffer('aabb_infer', aabb_infer) # individual codes self.individual_num = opt.ind_num self.individual_dim = opt.ind_dim if self.individual_dim > 0: self.individual_codes = nn.Parameter(torch.randn(self.individual_num, self.individual_dim) * 0.1) if self.torso: self.individual_dim_torso = opt.ind_dim_torso if self.individual_dim_torso > 0: self.individual_codes_torso = nn.Parameter(torch.randn(self.individual_num, self.individual_dim_torso) * 0.1) # optimize camera pose self.train_camera = self.opt.train_camera if self.train_camera: self.camera_dR = nn.Parameter(torch.zeros(self.individual_num, 3)) # euler angle self.camera_dT = nn.Parameter(torch.zeros(self.individual_num, 3)) # xyz offset # extra state for cuda raymarching # 3D head density grid density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H] density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8] self.register_buffer('density_grid', density_grid) self.register_buffer('density_bitfield', density_bitfield) self.mean_density = 0 self.iter_density = 0 # 2D torso density grid if self.torso: density_grid_torso = torch.zeros([self.grid_size ** 2]) # [H * H] self.register_buffer('density_grid_torso', density_grid_torso) self.mean_density_torso = 0 # step counter step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging... self.register_buffer('step_counter', step_counter) self.mean_count = 0 self.local_step = 0 # decay for enc_a if self.smooth_lips: self.enc_a = None def forward(self, x, d): raise NotImplementedError() # separated density and color query (can accelerate non-cuda-ray mode.) def density(self, x): raise NotImplementedError() def color(self, x, d, mask=None, **kwargs): raise NotImplementedError() def reset_extra_state(self): if not self.cuda_ray: return # density grid self.density_grid.zero_() self.mean_density = 0 self.iter_density = 0 # step counter self.step_counter.zero_() self.mean_count = 0 self.local_step = 0 def run_cuda(self, rays_o, rays_d, auds, bg_coords, poses, eye=None, index=0, dt_gamma=0, bg_color=None, perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs): # rays_o, rays_d: [B, N, 3], assumes B == 1 # auds: [B, 16] # index: [B] # return: image: [B, N, 3], depth: [B, N] prefix = rays_o.shape[:-1] rays_o = rays_o.contiguous().view(-1, 3) rays_d = rays_d.contiguous().view(-1, 3) bg_coords = bg_coords.contiguous().view(-1, 2) # only add camera offset at training! if self.train_camera and (self.training or self.test_train): dT = self.camera_dT[index] # [1, 3]
def sample_pdf(bins, weights, n_samples, det=False): # This implementation is from NeRF # bins: [B, T], old_z_vals # weights: [B, T - 1], bin weights. # return: [B, n_samples], new_z_vals # Get pdf weights = weights + 1e-5 # prevent nans pdf = weights / torch.sum(weights, -1, keepdim=True) cdf = torch.cumsum(pdf, -1) cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # Take uniform samples if det: u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples).to(weights.device) u = u.expand(list(cdf.shape[:-1]) + [n_samples]) else: u = torch.rand(list(cdf.shape[:-1]) + [n_samples]).to(weights.device) # Invert CDF u = u.contiguous() inds = torch.searchsorted(cdf, u, right=True) below = torch.max(torch.zeros_like(inds - 1), inds - 1) above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) inds_g = torch.stack([below, above], -1) # (B, n_samples, 2) matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) denom = (cdf_g[..., 1] - cdf_g[..., 0]) denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) t = (u - cdf_g[..., 0]) / denom samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) return samples def plot_pointcloud(pc, color=None): # pc: [N, 3] # color: [N, 3/4] print('[visualize points]', pc.shape, pc.dtype, pc.min(0), pc.max(0)) pc = trimesh.PointCloud(pc, color) # axis axes = trimesh.creation.axis(axis_length=4) # sphere sphere = trimesh.creation.icosphere(radius=1) trimesh.Scene([pc, axes, sphere]).show() class NeRFRenderer(nn.Module): def __init__(self, opt): super().__init__() self.opt = opt self.bound = opt.bound self.cascade = 1 + math.ceil(math.log2(opt.bound)) self.grid_size = 128 self.density_scale = 1 self.min_near = opt.min_near self.density_thresh = opt.density_thresh self.density_thresh_torso = opt.density_thresh_torso self.exp_eye = opt.exp_eye self.test_train = opt.test_train self.smooth_lips = opt.smooth_lips self.torso = opt.torso self.cuda_ray = opt.cuda_ray # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax) # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing. aabb_train = torch.FloatTensor([-opt.bound, -opt.bound/2, -opt.bound, opt.bound, opt.bound/2, opt.bound]) aabb_infer = aabb_train.clone() self.register_buffer('aabb_train', aabb_train) self.register_buffer('aabb_infer', aabb_infer) # individual codes self.individual_num = opt.ind_num self.individual_dim = opt.ind_dim if self.individual_dim > 0: self.individual_codes = nn.Parameter(torch.randn(self.individual_num, self.individual_dim) * 0.1) if self.torso: self.individual_dim_torso = opt.ind_dim_torso if self.individual_dim_torso > 0: self.individual_codes_torso = nn.Parameter(torch.randn(self.individual_num, self.individual_dim_torso) * 0.1) # optimize camera pose self.train_camera = self.opt.train_camera if self.train_camera: self.camera_dR = nn.Parameter(torch.zeros(self.individual_num, 3)) # euler angle self.camera_dT = nn.Parameter(torch.zeros(self.individual_num, 3)) # xyz offset # extra state for cuda raymarching # 3D head density grid density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H] density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8] self.register_buffer('density_grid', density_grid) self.register_buffer('density_bitfield', density_bitfield) self.mean_density = 0 self.iter_density = 0 # 2D torso density grid if self.torso: density_grid_torso = torch.zeros([self.grid_size ** 2]) # [H * H] self.register_buffer('density_grid_torso', density_grid_torso) self.mean_density_torso = 0 # step counter step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging... self.register_buffer('step_counter', step_counter) self.mean_count = 0 self.local_step = 0 # decay for enc_a if self.smooth_lips: self.enc_a = None def forward(self, x, d): raise NotImplementedError() # separated density and color query (can accelerate non-cuda-ray mode.) def density(self, x): raise NotImplementedError() def color(self, x, d, mask=None, **kwargs): raise NotImplementedError() def reset_extra_state(self): if not self.cuda_ray: return # density grid self.density_grid.zero_() self.mean_density = 0 self.iter_density = 0 # step counter self.step_counter.zero_() self.mean_count = 0 self.local_step = 0 def run_cuda(self, rays_o, rays_d, auds, bg_coords, poses, eye=None, index=0, dt_gamma=0, bg_color=None, perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs): # rays_o, rays_d: [B, N, 3], assumes B == 1 # auds: [B, 16] # index: [B] # return: image: [B, N, 3], depth: [B, N] prefix = rays_o.shape[:-1] rays_o = rays_o.contiguous().view(-1, 3) rays_d = rays_d.contiguous().view(-1, 3) bg_coords = bg_coords.contiguous().view(-1, 2) # only add camera offset at training! if self.train_camera and (self.training or self.test_train): dT = self.camera_dT[index] # [1, 3]
dR = euler_angles_to_matrix(self.camera_dR[index] / 180 * np.pi + 1e-8).squeeze(0) # [1, 3] --> [3, 3]
2
2023-12-19 01:32:46+00:00
4k
MingtaoGuo/AnimateAnyone_unofficial
aldm/ddim_hacked.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" } ]
import torch import numpy as np from tqdm import tqdm from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
3,145
mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ucg_schedule=ucg_schedule ) return samples, intermediates @torch.no_grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, ucg_schedule=None): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps elif timesteps is not None and not ddim_use_original_steps: subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? img = img_orig * mask + (1. - mask) * img if ucg_schedule is not None: assert len(ucg_schedule) == len(time_range) unconditional_guidance_scale = ucg_schedule[i] outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold) img, pred_x0 = outs if callback: callback(i) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.: model_output = self.model.apply_model(x, t, c) else: model_t = self.model.apply_model(x, t, c) model_uncond = self.model.apply_model(x, t, unconditional_conditioning) model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) if self.model.parameterization == "v": e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) else: e_t = model_output if score_corrector is not None: assert self.model.parameterization == "eps", 'not implemented' e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 if self.model.parameterization != "v": pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() else: pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: raise NotImplementedError() # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta,verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, ucg_schedule=None, **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") elif isinstance(conditioning, list): for ctmp in conditioning: if ctmp.shape[0] != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) print(f'Data shape for DDIM sampling is {size}, eta {eta}') samples, intermediates = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ucg_schedule=ucg_schedule ) return samples, intermediates @torch.no_grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, ucg_schedule=None): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps elif timesteps is not None and not ddim_use_original_steps: subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? img = img_orig * mask + (1. - mask) * img if ucg_schedule is not None: assert len(ucg_schedule) == len(time_range) unconditional_guidance_scale = ucg_schedule[i] outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold) img, pred_x0 = outs if callback: callback(i) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.: model_output = self.model.apply_model(x, t, c) else: model_t = self.model.apply_model(x, t, c) model_uncond = self.model.apply_model(x, t, unconditional_conditioning) model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) if self.model.parameterization == "v": e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) else: e_t = model_output if score_corrector is not None: assert self.model.parameterization == "eps", 'not implemented' e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 if self.model.parameterization != "v": pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() else: pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: raise NotImplementedError() # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
2
2023-12-16 03:31:33+00:00
4k
yasserben/CLOUDS
clouds/modeling/pixel_decoder/msdeformattn.py
[ { "identifier": "PositionEmbeddingSine", "path": "clouds/modeling/transformer_decoder/position_encoding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n\n def __init__(\n self, num_pos_feats=64, temperature=10000, normalize=False, scale=None\n ):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, x, mask=None):\n if mask is None:\n mask = torch.zeros(\n (x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool\n )\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack(\n (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4\n ).flatten(3)\n pos_y = torch.stack(\n (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4\n ).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos\n\n def __repr__(self, _repr_indent=4):\n head = \"Positional encoding \" + self.__class__.__name__\n body = [\n \"num_pos_feats: {}\".format(self.num_pos_feats),\n \"temperature: {}\".format(self.temperature),\n \"normalize: {}\".format(self.normalize),\n \"scale: {}\".format(self.scale),\n ]\n # _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "MSDeformAttn", "path": "clouds/modeling/pixel_decoder/ops/modules/ms_deform_attn.py", "snippet": "class MSDeformAttn(nn.Module):\n def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):\n \"\"\"\n Multi-Scale Deformable Attention Module\n :param d_model hidden dimension\n :param n_levels number of feature levels\n :param n_heads number of attention heads\n :param n_points number of sampling points per attention head per feature level\n \"\"\"\n super().__init__()\n if d_model % n_heads != 0:\n raise ValueError(\n \"d_model must be divisible by n_heads, but got {} and {}\".format(\n d_model, n_heads\n )\n )\n _d_per_head = d_model // n_heads\n # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation\n if not _is_power_of_2(_d_per_head):\n warnings.warn(\n \"You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 \"\n \"which is more efficient in our CUDA implementation.\"\n )\n\n self.im2col_step = 128\n\n self.d_model = d_model\n self.n_levels = n_levels\n self.n_heads = n_heads\n self.n_points = n_points\n\n self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)\n self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)\n self.value_proj = nn.Linear(d_model, d_model)\n self.output_proj = nn.Linear(d_model, d_model)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n constant_(self.sampling_offsets.weight.data, 0.0)\n thetas = torch.arange(self.n_heads, dtype=torch.float32) * (\n 2.0 * math.pi / self.n_heads\n )\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\n grid_init = (\n (grid_init / grid_init.abs().max(-1, keepdim=True)[0])\n .view(self.n_heads, 1, 1, 2)\n .repeat(1, self.n_levels, self.n_points, 1)\n )\n for i in range(self.n_points):\n grid_init[:, :, i, :] *= i + 1\n with torch.no_grad():\n self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))\n constant_(self.attention_weights.weight.data, 0.0)\n constant_(self.attention_weights.bias.data, 0.0)\n xavier_uniform_(self.value_proj.weight.data)\n constant_(self.value_proj.bias.data, 0.0)\n xavier_uniform_(self.output_proj.weight.data)\n constant_(self.output_proj.bias.data, 0.0)\n\n def forward(\n self,\n query,\n reference_points,\n input_flatten,\n input_spatial_shapes,\n input_level_start_index,\n input_padding_mask=None,\n ):\n \"\"\"\n :param query (N, Length_{query}, C)\n :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area\n or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes\n :param input_flatten (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l, C)\n :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]\n :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]\n :param input_padding_mask (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l), True for padding elements, False for non-padding elements\n\n :return output (N, Length_{query}, C)\n \"\"\"\n N, Len_q, _ = query.shape\n N, Len_in, _ = input_flatten.shape\n assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in\n\n value = self.value_proj(input_flatten)\n if input_padding_mask is not None:\n value = value.masked_fill(input_padding_mask[..., None], float(0))\n value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)\n sampling_offsets = self.sampling_offsets(query).view(\n N, Len_q, self.n_heads, self.n_levels, self.n_points, 2\n )\n attention_weights = self.attention_weights(query).view(\n N, Len_q, self.n_heads, self.n_levels * self.n_points\n )\n attention_weights = F.softmax(attention_weights, -1).view(\n N, Len_q, self.n_heads, self.n_levels, self.n_points\n )\n # N, Len_q, n_heads, n_levels, n_points, 2\n if reference_points.shape[-1] == 2:\n offset_normalizer = torch.stack(\n [input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1\n )\n sampling_locations = (\n reference_points[:, :, None, :, None, :]\n + sampling_offsets / offset_normalizer[None, None, None, :, None, :]\n )\n elif reference_points.shape[-1] == 4:\n sampling_locations = (\n reference_points[:, :, None, :, None, :2]\n + sampling_offsets\n / self.n_points\n * reference_points[:, :, None, :, None, 2:]\n * 0.5\n )\n else:\n raise ValueError(\n \"Last dim of reference_points must be 2 or 4, but get {} instead.\".format(\n reference_points.shape[-1]\n )\n )\n try:\n output = MSDeformAttnFunction.apply(\n value,\n input_spatial_shapes,\n input_level_start_index,\n sampling_locations,\n attention_weights,\n self.im2col_step,\n )\n except:\n # CPU\n output = ms_deform_attn_core_pytorch(\n value, input_spatial_shapes, sampling_locations, attention_weights\n )\n # # For FLOPs calculation only\n # output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)\n output = self.output_proj(output)\n return output" } ]
import logging import numpy as np import fvcore.nn.weight_init as weight_init import torch import copy from typing import Callable, Dict, List, Optional, Tuple, Union from torch import nn from torch.nn import functional as F from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_ from torch.cuda.amp import autocast from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.modeling import SEM_SEG_HEADS_REGISTRY from ..transformer_decoder.position_encoding import PositionEmbeddingSine from .ops.modules import MSDeformAttn
3,030
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/pixel_decoder/msdeformattn.py """ def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(f"activation should be relu/gelu, not {activation}.") def build_pixel_decoder(cfg, input_shape): """ Build a pixel decoder from `cfg.MODEL.ONE_FORMER.PIXEL_DECODER_NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) forward_features = getattr(model, "forward_features", None) if not callable(forward_features): raise ValueError( "Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. " f"Please implement forward_features for {name} to only return mask features." ) return model # MSDeformAttn Transformer encoder in deformable detr class MSDeformAttnTransformerEncoderOnly(nn.Module): def __init__( self, d_model=256, nhead=8, num_encoder_layers=6, dim_feedforward=1024, dropout=0.1, activation="relu", num_feature_levels=4, enc_n_points=4, ): super().__init__() self.d_model = d_model self.nhead = nhead encoder_layer = MSDeformAttnTransformerEncoderLayer( d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points, ) self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers) self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) self._reset_parameters() def _reset_parameters(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules():
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/pixel_decoder/msdeformattn.py """ def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(f"activation should be relu/gelu, not {activation}.") def build_pixel_decoder(cfg, input_shape): """ Build a pixel decoder from `cfg.MODEL.ONE_FORMER.PIXEL_DECODER_NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) forward_features = getattr(model, "forward_features", None) if not callable(forward_features): raise ValueError( "Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. " f"Please implement forward_features for {name} to only return mask features." ) return model # MSDeformAttn Transformer encoder in deformable detr class MSDeformAttnTransformerEncoderOnly(nn.Module): def __init__( self, d_model=256, nhead=8, num_encoder_layers=6, dim_feedforward=1024, dropout=0.1, activation="relu", num_feature_levels=4, enc_n_points=4, ): super().__init__() self.d_model = d_model self.nhead = nhead encoder_layer = MSDeformAttnTransformerEncoderLayer( d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points, ) self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers) self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) self._reset_parameters() def _reset_parameters(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules():
if isinstance(m, MSDeformAttn):
1
2023-12-15 15:40:58+00:00
4k
ASK-03/Reverse-Chain
main.py
[ { "identifier": "FinalAPISelector", "path": "modules.py", "snippet": "class FinalAPISelector(ReverseChainBaseClass):\n def __init__(self, model: str, temperature: float) -> None:\n super(FinalAPISelector, self).__init__(model, temperature)\n self.template = \"\"\"\n We have below APIs that are similar to query:\n =====\n {context}\n =====\n If someone is saying: \"{query}\"\n Search for words like summarize, prioritize, my id, current sprint and select the api according to that.\n Try to divide the query in smaller tasks just like a human would do,\n now which final API should we use for this instruction? if no api can be used just return None in answer\n Only return API name in answer, donot return anything else.\n return the answer as a json object where key is api_name and key is the api name and a key data_source and value as the source of the file.\n Never give argument_name as the api name. \n \"\"\"\n\n def select_api_from_query(self, query: str, db) -> str:\n context = self.get_context_from_retriver(query, db)\n prompt = self.get_prompt(query, context=context)\n response = self.llm(prompt)\n return response" }, { "identifier": "ArgumentExtractor", "path": "modules.py", "snippet": "class ArgumentExtractor(ReverseChainBaseClass):\n def __init__(self, model: str, temperature: float) -> None:\n super(ArgumentExtractor, self).__init__(model, temperature)\n self.template = \"\"\"\n You are an argument extractor. For each argument, you need to\n determine whether you can extract the value from user input\n directly or you need to use an API to get the value. The output\n should be in Json format, key is the argument, and value is the\n value of argument. Importantly, return None if you cannot get\n value.\n Give arguments that can be given to the API in context, if not found an \n arguments value in the query return None.\n The api documentation is as below, use the context of the API to extract\n arguments that can be extracted from the user input and feeded in the API.\n if API doesnot use any arguments then, just return an empty json object:\n \n Context:\n {context}\n ......\n Now, Let's start.\n =>\n If someone is saying: \"{query}\"\n IMPORTANT:\n Donot try to make arguments if they are not present in the query, just return Null in place of the value.\n if the query contains key words like current sprint then use get_sprint_id\n if the query contains key words like my id them use who_am_is\n Arguments :\n \"\"\"\n\n def get_arguments_from_query(self, query: str, db, api_documentation):\n prompt = self.get_prompt(query, api_documentation)\n response = self.llm(prompt)\n return response" }, { "identifier": "SubAPISelector", "path": "modules.py", "snippet": "class SubAPISelector(ReverseChainBaseClass):\n def __init__(self, model: str, temperature: float) -> None:\n super().__init__(model, temperature)\n self.template = \"\"\"\n Required argument: {required_argument} \n context: {context}\n Given the context above, give the API name that can give the reqiured_argument as output.\n if no api can be used just return None in answer\n Only return API name in answer, donot return anything else.\n return the answer as a json object where key is api_name and key is the api name and a key data_source and value as the source of the file.\n \"\"\"\n\n def get_api_from_argument(self, db, required_argument: str) -> str:\n context = self.get_context_from_retriver(required_argument, db)\n prompt = self.get_prompt(context=context, required_argument=required_argument)\n response = self.llm(prompt)\n return response\n \n def get_context_from_retriver(self, query: str, db):\n return db.retrieve_using_similarity_search(query, top_k=5)\n\n def get_prompt(self, context: str, required_argument: str) -> str:\n prompt = PromptTemplate(input_variables=[\"context\", \"required_argument\"], template=self.template)\n return prompt.format(context=context, required_argument=required_argument) " }, { "identifier": "VectorDataBase", "path": "retriever.py", "snippet": "class VectorDataBase(FAISS):\n def __init__(self) -> None:\n self.embeddings_model = HuggingFaceEmbeddings(\n model_name=EMBEDDING_MODEL, model_kwargs={\"device\": \"cpu\"}\n )\n self.data_directory = os.path.join(DATA_PATH, \"api_documentation\")\n self.db = None\n \n def load_db(self):\n self.db = FAISS.load_local(FAISS_DATA_PATH, self.embeddings_model)\n\n def txt_loader(self) -> DirectoryLoader:\n loader = DirectoryLoader(\n path=self.data_directory, glob=\"*.txt\", loader_cls=TextLoader\n )\n\n return loader\n\n def create_vector_db(self):\n loader = self.txt_loader()\n documents = loader.load()\n splitter = RecursiveCharacterTextSplitter(\n chunk_size=CHUNK_SIZE,\n chunk_overlap=CHUNK_OVERLAP,\n )\n text = splitter.split_documents(documents)\n\n self.db = FAISS.from_documents(documents=text, embedding=self.embeddings_model)\n\n self.db.save_local(FAISS_DATA_PATH)\n\n return self.db\n\n def retrieve_using_similarity_search(self, query: str, top_k: int = 5):\n if self.db is not None:\n return self.db.similarity_search(query, k=top_k)" }, { "identifier": "Executor", "path": "executor.py", "snippet": "class Executor:\n def __init__(self):\n pass\n\n def run(self, function_json) -> Dict[str, Any]:\n function_name = function_json.get(\"api_name\")\n function_args = function_json.get(\"arguments\")\n\n if function_name == \"get_sprint_id\":\n return server.get_sprint_id()\n elif function_name == \"get_similar_work_items\":\n work_id = function_args.get(\"work_id\")\n return server.get_similar_work_items(work_id)\n elif function_name == \"add_work_items_to_sprint\":\n work_ids = function_args.get(\"work_ids\")\n sprint_id = function_args.get(\"sprint_id\")\n return server.add_work_items_to_sprint(work_ids, sprint_id)\n elif function_name == \"create_actionable_tasks_from_text\":\n text = function_args.get(\"text\")\n return server.create_actionable_tasks_from_text(text)\n elif function_name == \"prioritize_objects\":\n objects = function_args.get(\"objects\")\n return server.prioritize_objects(objects)\n elif function_name == \"search_object_by_name\":\n object_name = function_args.get(\"object_name\")\n return server.search_object_by_name(object_name)\n elif function_name == \"summarize_objects\":\n objects = function_args.get(\"objects\")\n return server.summarize_objects(objects)\n elif function_name == \"who_am_i\":\n return server.who_am_i()\n elif function_name == \"work_list\":\n return server.work_list()\n else:\n return {\n \"status\": 200,\n \"message\": \"Successfully ran function: \" + function_name,\n }" }, { "identifier": "ResultFormatter", "path": "result_formatter.py", "snippet": "class ResultFormatter(ResultFormatterBaseClass):\n def __init__(self, model, temperature):\n super(ResultFormatter, self).__init__(model, temperature)\n self.template = \"\"\"\n on being given the context, use the given context of the tools and arguments and output the response.\n The output should be a list of JSONs conforming following jsonschema:\n\n Unset\n {{\n \"type\": \"array\",\n \"items\": {{\n \"type\": \"object\",\n \"properties\": {{\n \"tool_name\": {{ \"type\": \"string\" }},\n \"arguments\": {{\n \"type\": \"array\",\n \"items\": {{\n \"type\": \"object\",\n \"properties\": {{\n \"argument_name\": {{ \"type\": \"string\" }},\n \"argument_value\": {{ \"type\": \"string\" }}\n }},\n \"required\": [\"argument_name\", \"argument_value\"]\n }} \n }}\n }},\n \"required\": [\"tool_name\", \"arguments\"]\n }}\n }}\n\n In the above json schema, replace these values accordingly.\n Donot use those arguments whose value is RequiredFalse\n Context:\n {context}\n\n Result: \n \"\"\"\n\n def get_prompt(self, context: str) -> str:\n prompt = PromptTemplate(input_variables=[\"context\"], template=self.template)\n return prompt.format(context=context)\n\n def _format(self, context):\n prompt = self.get_prompt(context=context)\n response = self.llm(prompt)\n return response\n\n def run(self, context):\n formatted_result = self._format(context)\n return formatted_result" } ]
from modules import FinalAPISelector, ArgumentExtractor, SubAPISelector from retriever import VectorDataBase from executor import Executor from result_formatter import ResultFormatter from configparser import ConfigParser from collections import deque import os import json import logging import warnings
2,223
warnings.filterwarnings("ignore") config = ConfigParser() config.read("config.ini") DATA_PATH = config["faiss"]["data"] OPENAI_SECRET_KEY = config["openai"]["secret_key"] MODEL = config["openai"]["model"] TEMPERATURE = float(config["openai"]["temperature"]) QUERY = config["query"]["query"] os.environ["OPENAI_API_KEY"] = OPENAI_SECRET_KEY if __name__ == "__main__":
warnings.filterwarnings("ignore") config = ConfigParser() config.read("config.ini") DATA_PATH = config["faiss"]["data"] OPENAI_SECRET_KEY = config["openai"]["secret_key"] MODEL = config["openai"]["model"] TEMPERATURE = float(config["openai"]["temperature"]) QUERY = config["query"]["query"] os.environ["OPENAI_API_KEY"] = OPENAI_SECRET_KEY if __name__ == "__main__":
vector_db = VectorDataBase()
3
2023-12-15 19:19:01+00:00
4k
linyq2117/TagCLIP
classify.py
[ { "identifier": "scoremap2bbox", "path": "utils.py", "snippet": "def scoremap2bbox(scoremap, threshold, multi_contour_eval=False):\n height, width = scoremap.shape\n scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)\n _, thr_gray_heatmap = cv2.threshold(\n src=scoremap_image,\n thresh=int(threshold * np.max(scoremap_image)),\n maxval=255,\n type=cv2.THRESH_BINARY)\n contours = cv2.findContours(\n image=thr_gray_heatmap,\n mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_SIMPLE)[_CONTOUR_INDEX]\n\n if len(contours) == 0:\n return np.asarray([[0, 0, 0, 0]]), 1\n\n if not multi_contour_eval:\n contours = [max(contours, key=cv2.contourArea)]\n\n estimated_boxes = []\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n x0, y0, x1, y1 = x, y, x + w, y + h\n x1 = min(x1, width - 1)\n y1 = min(y1, height - 1)\n estimated_boxes.append([x0, y0, x1, y1])\n\n return np.asarray(estimated_boxes), len(contours)" }, { "identifier": "parse_xml_to_dict", "path": "utils.py", "snippet": "def parse_xml_to_dict(xml):\n \"\"\"\n Args:\n xml: xml tree obtained by parsing XML file contents using lxml.etree\n\n Returns:\n Python dictionary holding XML contents.\n \"\"\"\n\n if len(xml) == 0:\n return {xml.tag: xml.text}\n\n result = {}\n for child in xml:\n child_result = parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}" }, { "identifier": "_convert_image_to_rgb", "path": "utils.py", "snippet": "def _convert_image_to_rgb(image):\n return image.convert(\"RGB\")" }, { "identifier": "compute_AP", "path": "utils.py", "snippet": "def compute_AP(predictions, labels):\n num_class = predictions.size(1)\n ap = torch.zeros(num_class).to(predictions.device)\n empty_class = 0\n for idx_cls in range(num_class):\n prediction = predictions[:, idx_cls]\n label = labels[:, idx_cls]\n #mask = label.abs() == 1\n if (label > 0).sum() == 0:\n empty_class += 1\n continue\n binary_label = torch.clamp(label, min=0, max=1)\n sorted_pred, sort_idx = prediction.sort(descending=True)\n sorted_label = binary_label[sort_idx]\n tmp = (sorted_label == 1).float()\n tp = tmp.cumsum(0)\n fp = (sorted_label != 1).float().cumsum(0)\n num_pos = binary_label.sum()\n rec = tp/num_pos\n prec = tp/(tp+fp)\n ap_cls = (tmp*prec).sum()/num_pos\n ap[idx_cls].copy_(ap_cls)\n return ap" }, { "identifier": "compute_F1", "path": "utils.py", "snippet": "def compute_F1(predictions, labels, mode_F1, k_val, use_relative=False):\n if k_val >= 1:\n idx = predictions.topk(dim=1, k=k_val)[1]\n predictions.fill_(0)\n predictions.scatter_(dim=1, index=idx, src=torch.ones(predictions.size(0), k_val, dtype=predictions.dtype).to(predictions.device))\n else:\n if use_relative:\n ma = predictions.max(dim=1)[0]\n mi = predictions.min(dim=1)[0]\n step = ma - mi\n thres = mi + k_val * step\n \n for i in range(predictions.shape[0]):\n predictions[i][predictions[i] > thres[i]] = 1\n predictions[i][predictions[i] <= thres[i]] = 0\n else:\n predictions[predictions > k_val] = 1\n predictions[predictions <= k_val] = 0\n \n if mode_F1 == 'overall':\n predictions = predictions.bool()\n labels = labels.bool()\n TPs = ( predictions & labels).sum()\n FPs = ( predictions & ~labels).sum()\n FNs = (~predictions & labels).sum()\n eps = 1.e-9\n Ps = TPs / (TPs + FPs + eps)\n Rs = TPs / (TPs + FNs + eps)\n p = Ps.mean()\n r = Rs.mean()\n f1 = 2*p*r/(p+r)\n \n \n elif mode_F1 == 'category':\n # calculate P and R\n predictions = predictions.bool()\n labels = labels.bool()\n TPs = ( predictions & labels).sum(axis=0)\n FPs = ( predictions & ~labels).sum(axis=0)\n FNs = (~predictions & labels).sum(axis=0)\n eps = 1.e-9\n Ps = TPs / (TPs + FPs + eps)\n Rs = TPs / (TPs + FNs + eps)\n p = Ps.mean()\n r = Rs.mean()\n f1 = 2*p*r/(p+r)\n \n elif mode_F1 == 'sample':\n # calculate P and R\n predictions = predictions.bool()\n labels = labels.bool()\n TPs = ( predictions & labels).sum(axis=1)\n FPs = ( predictions & ~labels).sum(axis=1)\n FNs = (~predictions & labels).sum(axis=1)\n eps = 1.e-9\n Ps = TPs / (TPs + FPs + eps)\n Rs = TPs / (TPs + FNs + eps)\n p = Ps.mean()\n r = Rs.mean()\n f1 = 2*p*r/(p+r)\n\n return f1, p, r" }, { "identifier": "_transform_resize", "path": "utils.py", "snippet": "def _transform_resize(h, w):\n return Compose([\n #Resize(n_px, interpolation=BICUBIC),\n Resize((h,w), interpolation=BICUBIC),\n #CenterCrop(n_px),\n #RandomHorizontalFlip(1.0),\n _convert_image_to_rgb,\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])" }, { "identifier": "class_names_voc", "path": "clip_text.py", "snippet": "BACKGROUND_CATEGORY_VOC = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','keyboard','helmet',\n 'cloud','house','mountain','ocean','road','rock','street','valley','bridge','sign',\n ]\nBACKGROUND_CATEGORY_COCO = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','helmet',\n 'cloud','house','mountain','ocean','road','rock','street','valley','bridge',\n ]" } ]
import clip import torch import cv2 import numpy as np import pickle import os import math import torch.nn.functional as F import os import argparse import warnings from PIL import Image from tqdm import tqdm from lxml import etree from utils import scoremap2bbox, parse_xml_to_dict, _convert_image_to_rgb, compute_AP, compute_F1, _transform_resize from clip_text import class_names_voc, BACKGROUND_CATEGORY_VOC, class_names_coco, BACKGROUND_CATEGORY_COCO, class_names_coco_stuff182_dict, coco_stuff_182_to_27
3,150
candidate_cls_list = [] logits_refined = logits.clone() logits_max = torch.max(logits, dim=0)[0] for tempid,tempv in enumerate(logits_max): if tempv > 0: candidate_cls_list.append(tempid) for ccls in candidate_cls_list: temp_logits = logits[:,ccls] temp_logits = temp_logits - temp_logits.min() temp_logits = temp_logits / temp_logits.max() mask = temp_logits mask = mask.reshape(h // patch_size, w // patch_size) box, cnt = scoremap2bbox(mask.detach().cpu().numpy(), threshold=temp_logits.mean(), multi_contour_eval=True) aff_mask = torch.zeros((mask.shape[0],mask.shape[1])).to(device) for i_ in range(cnt): x0_, y0_, x1_, y1_ = box[i_] aff_mask[y0_:y1_, x0_:x1_] = 1 aff_mask = aff_mask.view(1,mask.shape[0] * mask.shape[1]) trans_mat = attn_weight * aff_mask logits_refined_ccls = torch.matmul(trans_mat, logits_coarse[:,ccls:ccls+1]) logits_refined[:, ccls] = logits_refined_ccls.squeeze() return logits_refined def cwr(logits, logits_max, h, w, image, text_features): patch_size = 16 input_size = 224 stride = input_size // patch_size candidate_cls_list = [] ma = logits.max() mi = logits.min() step = ma - mi if args.dataset == 'cocostuff': thres_abs = 0.1 else: thres_abs = 0.5 thres = mi + thres_abs*step for tempid,tempv in enumerate(logits_max): if tempv > thres: candidate_cls_list.append(tempid) for ccls in candidate_cls_list: temp_logits = logits[:,ccls] temp_logits = temp_logits - temp_logits.min() temp_logits = temp_logits / temp_logits.max() mask = temp_logits > 0.5 mask = mask.reshape(h // patch_size, w // patch_size) horizontal_indicies = np.where(np.any(mask.cpu().numpy(), axis=0))[0] vertical_indicies = np.where(np.any(mask.cpu().numpy(), axis=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] x2 += 1 y2 += 1 else: x1, x2, y1, y2 = 0, 0, 0, 0 y1 = max(y1, 0) x1 = max(x1, 0) y2 = min(y2, mask.shape[-2] - 1) x2 = min(x2, mask.shape[-1] - 1) if x1 == x2 or y1 == y2: return logits_max mask = mask[y1:y2, x1:x2] mask = mask.float() mask = mask[None, None, :, :] mask = F.interpolate(mask, size=(stride, stride), mode="nearest") mask = mask.squeeze() mask = mask.reshape(-1).bool() image_cut = image[:, :, int(y1*patch_size):int(y2*patch_size), int(x1*patch_size):int(x2*patch_size)] image_cut = F.interpolate(image_cut, size=(input_size, input_size), mode="bilinear", align_corners=False) cls_attn = 1 - torch.ones((stride*stride+1, stride*stride+1)) for j in range(1, cls_attn.shape[1]): if not mask[j - 1]: cls_attn[0, j] = -1000 image_features = model.encode_image_tagclip(image_cut, input_size, input_size, attn_mask=cls_attn)[0] image_features = image_features / image_features.norm(dim=-1, keepdim=True) logit_scale = model.logit_scale.exp() cur_logits = logit_scale * image_features @ text_features.t() cur_logits = cur_logits[:, 0, :] cur_logits = cur_logits.softmax(dim=-1).squeeze() cur_logits_norm = cur_logits[ccls] logits_max[ccls] = 0.5 * logits_max[ccls] + (1 - 0.5) * cur_logits_norm return logits_max def classify(): pred_label_id = [] gt_label_id = [] with torch.no_grad(): text_features = clip.encode_text_with_prompt_ensemble(model, class_names, device) text_features = text_features / text_features.norm(dim=-1, keepdim=True) for im_idx, im in enumerate(tqdm(image_list)): image_path = os.path.join(args.img_root, im) label_id_list = all_label_list[im_idx] label_id_list = [int(lid) for lid in label_id_list] if args.dataset == 'cocostuff': label_id_list = [coco_stuff_182_to_171[int(lid)] for lid in label_id_list] gt_label_id.append(label_id_list) pil_img = Image.open(image_path) array_img = np.array(pil_img) ori_height, ori_width = array_img.shape[:2] if len(array_img.shape) == 2: array_img = np.stack([array_img, array_img, array_img], axis=2) pil_img = Image.fromarray(np.uint8(array_img)) if model_type == 'clip': patch_size = 16
warnings.filterwarnings("ignore") def mask_attn(logits_coarse, logits, h, w, attn_weight): patch_size = 16 candidate_cls_list = [] logits_refined = logits.clone() logits_max = torch.max(logits, dim=0)[0] for tempid,tempv in enumerate(logits_max): if tempv > 0: candidate_cls_list.append(tempid) for ccls in candidate_cls_list: temp_logits = logits[:,ccls] temp_logits = temp_logits - temp_logits.min() temp_logits = temp_logits / temp_logits.max() mask = temp_logits mask = mask.reshape(h // patch_size, w // patch_size) box, cnt = scoremap2bbox(mask.detach().cpu().numpy(), threshold=temp_logits.mean(), multi_contour_eval=True) aff_mask = torch.zeros((mask.shape[0],mask.shape[1])).to(device) for i_ in range(cnt): x0_, y0_, x1_, y1_ = box[i_] aff_mask[y0_:y1_, x0_:x1_] = 1 aff_mask = aff_mask.view(1,mask.shape[0] * mask.shape[1]) trans_mat = attn_weight * aff_mask logits_refined_ccls = torch.matmul(trans_mat, logits_coarse[:,ccls:ccls+1]) logits_refined[:, ccls] = logits_refined_ccls.squeeze() return logits_refined def cwr(logits, logits_max, h, w, image, text_features): patch_size = 16 input_size = 224 stride = input_size // patch_size candidate_cls_list = [] ma = logits.max() mi = logits.min() step = ma - mi if args.dataset == 'cocostuff': thres_abs = 0.1 else: thres_abs = 0.5 thres = mi + thres_abs*step for tempid,tempv in enumerate(logits_max): if tempv > thres: candidate_cls_list.append(tempid) for ccls in candidate_cls_list: temp_logits = logits[:,ccls] temp_logits = temp_logits - temp_logits.min() temp_logits = temp_logits / temp_logits.max() mask = temp_logits > 0.5 mask = mask.reshape(h // patch_size, w // patch_size) horizontal_indicies = np.where(np.any(mask.cpu().numpy(), axis=0))[0] vertical_indicies = np.where(np.any(mask.cpu().numpy(), axis=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] x2 += 1 y2 += 1 else: x1, x2, y1, y2 = 0, 0, 0, 0 y1 = max(y1, 0) x1 = max(x1, 0) y2 = min(y2, mask.shape[-2] - 1) x2 = min(x2, mask.shape[-1] - 1) if x1 == x2 or y1 == y2: return logits_max mask = mask[y1:y2, x1:x2] mask = mask.float() mask = mask[None, None, :, :] mask = F.interpolate(mask, size=(stride, stride), mode="nearest") mask = mask.squeeze() mask = mask.reshape(-1).bool() image_cut = image[:, :, int(y1*patch_size):int(y2*patch_size), int(x1*patch_size):int(x2*patch_size)] image_cut = F.interpolate(image_cut, size=(input_size, input_size), mode="bilinear", align_corners=False) cls_attn = 1 - torch.ones((stride*stride+1, stride*stride+1)) for j in range(1, cls_attn.shape[1]): if not mask[j - 1]: cls_attn[0, j] = -1000 image_features = model.encode_image_tagclip(image_cut, input_size, input_size, attn_mask=cls_attn)[0] image_features = image_features / image_features.norm(dim=-1, keepdim=True) logit_scale = model.logit_scale.exp() cur_logits = logit_scale * image_features @ text_features.t() cur_logits = cur_logits[:, 0, :] cur_logits = cur_logits.softmax(dim=-1).squeeze() cur_logits_norm = cur_logits[ccls] logits_max[ccls] = 0.5 * logits_max[ccls] + (1 - 0.5) * cur_logits_norm return logits_max def classify(): pred_label_id = [] gt_label_id = [] with torch.no_grad(): text_features = clip.encode_text_with_prompt_ensemble(model, class_names, device) text_features = text_features / text_features.norm(dim=-1, keepdim=True) for im_idx, im in enumerate(tqdm(image_list)): image_path = os.path.join(args.img_root, im) label_id_list = all_label_list[im_idx] label_id_list = [int(lid) for lid in label_id_list] if args.dataset == 'cocostuff': label_id_list = [coco_stuff_182_to_171[int(lid)] for lid in label_id_list] gt_label_id.append(label_id_list) pil_img = Image.open(image_path) array_img = np.array(pil_img) ori_height, ori_width = array_img.shape[:2] if len(array_img.shape) == 2: array_img = np.stack([array_img, array_img, array_img], axis=2) pil_img = Image.fromarray(np.uint8(array_img)) if model_type == 'clip': patch_size = 16
preprocess = _transform_resize(int(np.ceil(int(ori_height) / patch_size) * patch_size), int(np.ceil(int(ori_width) / patch_size) * patch_size))
5
2023-12-21 03:20:47+00:00
4k
cypypccpy/dynamic_handover
dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/backbones/mlp.py
[ { "identifier": "BACKBONES", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py", "snippet": "BACKBONES = Registry('backbone')" }, { "identifier": "ConvModule", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/modules/conv_module.py", "snippet": "class ConvModule(nn.Module):\n \"\"\"A conv block that bundles conv/norm/activation layers.\n\n This block simplifies the usage of convolution layers, which are commonly\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\n It is based upon three build methods: `build_conv_layer()`,\n `build_norm_layer()` and `build_activation_layer()`.\n\n Besides, we add some additional features in this module.\n 1. Automatically set `bias` of the conv layer.\n 2. Spectral norm is supported.\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\n supports zero and circular padding, and we add \"reflect\" padding mode.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int | tuple[int]): Same as nn.Conv2d.\n stride (int | tuple[int]): Same as nn.Conv2d.\n padding (int | tuple[int]): Same as nn.Conv2d.\n dilation (int | tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if\n `norm_cfg` is None, otherwise False. Default: \"auto\".\n conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU').\n inplace (bool): Whether to use inplace mode for activation. Default: True.\n with_spectral_norm (bool): Whether use spectral norm in conv module. Default: False.\n padding_mode (str): If the `padding_mode` has not been supported by current `Conv2d` in PyTorch, we will use our\n own padding layer instead. Default: 'zeros'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a sequence of \"conv\", \"norm\" and \"act\".\n Common examples are (\"conv\", \"norm\", \"act\") and (\"act\", \"conv\", \"norm\"). Default: ('conv', 'norm', 'act').\n \"\"\"\n\n _abbr_ = 'conv_block'\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias='auto',\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n inplace=True,\n with_spectral_norm=False,\n padding_mode='zeros',\n order=('conv', 'norm', 'act')):\n super(ConvModule, self).__init__()\n assert conv_cfg is None or isinstance(conv_cfg, dict)\n assert norm_cfg is None or isinstance(norm_cfg, dict)\n assert act_cfg is None or isinstance(act_cfg, dict)\n assert isinstance(order, tuple) and len(order) == 3\n assert set(order) == set(['conv', 'norm', 'act'])\n\n official_padding_mode = ['zeros', 'reflect', 'replicate', 'circular'] # Pytorch >= 1.7.1\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.inplace = inplace\n self.with_spectral_norm = with_spectral_norm\n self.with_explicit_padding = padding_mode not in official_padding_mode\n self.order = order\n\n self.with_norm = norm_cfg is not None\n self.with_activation = act_cfg is not None\n # if the conv layer is before a norm layer, bias is unnecessary.\n if bias == 'auto':\n bias = not self.with_norm\n self.with_bias = bias\n\n if self.with_norm and self.with_bias:\n warnings.warn('ConvModule has norm and bias at the same time')\n\n if self.with_explicit_padding:\n pad_cfg = dict(type=padding_mode)\n self.padding_layer = build_padding_layer(pad_cfg, padding)\n\n # reset padding to 0 for conv module\n conv_padding = 0 if self.with_explicit_padding else padding\n\n # build convolution layer\n self.conv = build_conv_layer(\n conv_cfg,\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=conv_padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n # export the attributes of self.conv to a higher level for convenience\n self.in_channels = self.conv.in_channels\n self.out_channels = self.conv.out_channels\n self.kernel_size = self.conv.kernel_size\n self.stride = self.conv.stride\n self.padding = padding\n self.dilation = self.conv.dilation\n self.transposed = self.conv.transposed\n self.output_padding = self.conv.output_padding\n self.groups = self.conv.groups\n\n if self.with_spectral_norm:\n self.conv = nn.utils.spectral_norm(self.conv)\n\n # build normalization layers\n if self.with_norm:\n # norm layer is after conv layer\n if order.index('norm') > order.index('conv'):\n norm_channels = out_channels\n else:\n norm_channels = in_channels\n self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)\n self.add_module(self.norm_name, norm)\n\n # build activation layer\n if self.with_activation:\n act_cfg_ = act_cfg.copy()\n # nn.Tanh has no 'inplace' argument\n if act_cfg_['type'] in ['ELU', 'Hardsigmoid', 'Hardtanh', 'Hardswish', 'ReLU', 'LeakyReLU', 'ReLU6',\n 'RReLU', 'SELU', 'CELU', 'SiLU', 'Threshold']:\n act_cfg_.setdefault('inplace', inplace)\n self.activate = build_activation_layer(act_cfg_)\n # Use msra init by default\n self.init_weights()\n\n @property\n def norm(self):\n return getattr(self, self.norm_name)\n\n def init_weights(self):\n # 1. It is mainly for customized conv layers with their own initialization manners by calling their own\n # ``init_weights()``, and we do not want ConvModule to override the initialization.\n # 2. For customized conv layers without their own initialization manners (that is, they don't have their own\n # ``init_weights()``) and PyTorch's conv layers, they will be initialized by this method with default\n # ``kaiming_init``.\n # Note: For PyTorch's conv layers, they will be overwritten by our initialization implementation using\n # default ``kaiming_init``.\n if not hasattr(self.conv, 'init_weights'):\n if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':\n nonlinearity = 'leaky_relu'\n a = self.act_cfg.get('negative_slope', 0.01)\n else:\n nonlinearity = 'relu'\n a = 0\n kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)\n if self.with_norm:\n constant_init(self.norm, 1, bias=0)\n\n def forward(self, x, activate=True, norm=True):\n for layer in self.order:\n if layer == 'conv':\n if self.with_explicit_padding:\n x = self.padding_layer(x)\n x = self.conv(x)\n elif layer == 'norm' and norm and self.with_norm:\n x = self.norm(x)\n elif layer == 'act' and activate and self.with_activation:\n x = self.activate(x)\n return x" }, { "identifier": "build_init", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/modules/weight_init.py", "snippet": "def build_init(cfg, *args, **kwargs):\n if not isinstance(cfg, dict):\n raise TypeError('cfg must be a dict')\n if 'type' not in cfg:\n raise KeyError('the cfg dict must contain the key \"type\"')\n cfg_ = cfg.copy()\n init_type = cfg_.pop('type')\n if init_type not in INIT:\n raise KeyError(f'Unrecognized norm type {init_type}')\n else:\n init_func = INIT.get(init_type)\n kwargs.update(cfg_)\n init_func_ret = lambda _: init_func(_, *args, **kwargs)\n return init_func_ret" }, { "identifier": "build_activation_layer", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/modules/activation.py", "snippet": "def build_activation_layer(cfg):\n \"\"\"Build activation layer.\n Args:\n cfg (dict): The activation layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate an activation layer.\n Returns:\n nn.Module: Created activation layer.\n \"\"\"\n return build_from_cfg(cfg, ACTIVATION_LAYERS)" }, { "identifier": "build_norm_layer", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/modules/norm.py", "snippet": "def build_norm_layer(cfg, num_features, postfix=''):\n \"\"\"Build normalization layer.\n\n Args:\n cfg (dict): The norm layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate a norm layer.\n - requires_grad (bool, optional): Whether stop gradient updates.\n num_features (int): Number of input channels.\n postfix (int | str): The postfix to be appended into norm abbreviation\n to create named layer.\n\n Returns:\n (str, nn.Module): The first element is the layer name consisting of abbreviation and postfix, e.g., bn1, gn.\n The second element is the created norm layer.\n \"\"\"\n if not isinstance(cfg, dict):\n raise TypeError('cfg must be a dict')\n if 'type' not in cfg:\n raise KeyError('the cfg dict must contain the key \"type\"')\n cfg_ = cfg.copy()\n\n layer_type = cfg_.pop('type')\n if layer_type not in NORM_LAYERS:\n raise KeyError(f'Unrecognized norm type {layer_type}')\n\n norm_layer = NORM_LAYERS.get(layer_type)\n abbr = infer_abbr(norm_layer)\n\n assert isinstance(postfix, (int, str))\n name = abbr + str(postfix)\n\n requires_grad = cfg_.pop('requires_grad', True)\n cfg_.setdefault('eps', 1e-5)\n if layer_type != 'GN':\n layer = norm_layer(num_features, **cfg_)\n if layer_type == 'SyncBN':\n layer._specify_ddp_gpu_num(1)\n else:\n assert 'num_groups' in cfg_\n layer = norm_layer(num_channels=num_features, **cfg_)\n\n for param in layer.parameters():\n param.requires_grad = requires_grad\n\n return name, layer" } ]
import torch.nn as nn from torch.nn.modules.batchnorm import _BatchNorm from algorithms.utils.mani_skill_learn.utils.meta import get_root_logger from algorithms.utils.mani_skill_learn.utils.torch import load_checkpoint from ..builder import BACKBONES from ..modules import ConvModule, build_init from ..modules import build_activation_layer, build_norm_layer
3,334
@BACKBONES.register_module() class LinearMLP(nn.Module): def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True, pretrained=None, linear_init_cfg=None, norm_init_cfg=None): super(LinearMLP, self).__init__() self.mlp = nn.Sequential() for i in range(len(mlp_spec) - 1): if i == len(mlp_spec) - 2 and inactivated_output: act_cfg = None norm_cfg = None else: act_cfg = dict(type='ReLU') bias_i = norm_cfg is None if bias == 'auto' else bias # print(mlp_spec[i], mlp_spec[i + 1], bias_i) self.mlp.add_module(f'linear{i}', nn.Linear(mlp_spec[i], mlp_spec[i + 1], bias=bias_i)) if norm_cfg: self.mlp.add_module(f'norm{i}', build_norm_layer(norm_cfg, mlp_spec[i + 1])[1]) if act_cfg: self.mlp.add_module(f'act{i}', build_activation_layer(act_cfg)) self.init_weights(pretrained, linear_init_cfg, norm_init_cfg) def forward(self, input): input = input return self.mlp(input) def init_weights(self, pretrained=None, linear_init_cfg=None, norm_init_cfg=None): if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: linear_init = build_init(linear_init_cfg) if linear_init_cfg else None norm_init = build_init(norm_init_cfg) if norm_init_cfg else None for m in self.modules(): if isinstance(m, nn.Linear) and linear_init: linear_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)) and norm_init: norm_init(m) else: raise TypeError('pretrained must be a str or None') @BACKBONES.register_module() class ConvMLP(nn.Module): def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True, pretrained=None, conv_init_cfg=None, norm_init_cfg=None): super(ConvMLP, self).__init__() self.mlp = nn.Sequential() for i in range(len(mlp_spec) - 1): if i == len(mlp_spec) - 2 and inactivated_output: act_cfg = None else: act_cfg = dict(type='ReLU') self.mlp.add_module( f'layer{i}',
@BACKBONES.register_module() class LinearMLP(nn.Module): def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True, pretrained=None, linear_init_cfg=None, norm_init_cfg=None): super(LinearMLP, self).__init__() self.mlp = nn.Sequential() for i in range(len(mlp_spec) - 1): if i == len(mlp_spec) - 2 and inactivated_output: act_cfg = None norm_cfg = None else: act_cfg = dict(type='ReLU') bias_i = norm_cfg is None if bias == 'auto' else bias # print(mlp_spec[i], mlp_spec[i + 1], bias_i) self.mlp.add_module(f'linear{i}', nn.Linear(mlp_spec[i], mlp_spec[i + 1], bias=bias_i)) if norm_cfg: self.mlp.add_module(f'norm{i}', build_norm_layer(norm_cfg, mlp_spec[i + 1])[1]) if act_cfg: self.mlp.add_module(f'act{i}', build_activation_layer(act_cfg)) self.init_weights(pretrained, linear_init_cfg, norm_init_cfg) def forward(self, input): input = input return self.mlp(input) def init_weights(self, pretrained=None, linear_init_cfg=None, norm_init_cfg=None): if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: linear_init = build_init(linear_init_cfg) if linear_init_cfg else None norm_init = build_init(norm_init_cfg) if norm_init_cfg else None for m in self.modules(): if isinstance(m, nn.Linear) and linear_init: linear_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)) and norm_init: norm_init(m) else: raise TypeError('pretrained must be a str or None') @BACKBONES.register_module() class ConvMLP(nn.Module): def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True, pretrained=None, conv_init_cfg=None, norm_init_cfg=None): super(ConvMLP, self).__init__() self.mlp = nn.Sequential() for i in range(len(mlp_spec) - 1): if i == len(mlp_spec) - 2 and inactivated_output: act_cfg = None else: act_cfg = dict(type='ReLU') self.mlp.add_module( f'layer{i}',
ConvModule(
1
2023-12-16 16:49:38+00:00
4k
video-db/videodb-python
videodb/video.py
[ { "identifier": "play_stream", "path": "videodb/_utils/_video.py", "snippet": "def play_stream(url: str):\n \"\"\"Play a stream url in the browser/ notebook\n\n :param str url: The url of the stream\n :return: The player url if the stream is opened in the browser or the iframe if the stream is opened in the notebook\n \"\"\"\n player = f\"{PLAYER_URL}?url={url}\"\n opend = web.open(player)\n if not opend:\n try:\n from IPython.display import IFrame\n\n player_width = 800\n player_height = 400\n return IFrame(player, player_width, player_height)\n except ImportError:\n return player\n return player" }, { "identifier": "ApiPath", "path": "videodb/_constants.py", "snippet": "class ApiPath:\n collection = \"collection\"\n upload = \"upload\"\n video = \"video\"\n stream = \"stream\"\n thumbnail = \"thumbnail\"\n upload_url = \"upload_url\"\n transcription = \"transcription\"\n index = \"index\"\n search = \"search\"\n compile = \"compile\"\n workflow = \"workflow\"" }, { "identifier": "SearchType", "path": "videodb/_constants.py", "snippet": "class SearchType:\n semantic = \"semantic\"" }, { "identifier": "IndexType", "path": "videodb/_constants.py", "snippet": "class IndexType:\n semantic = \"semantic\"" }, { "identifier": "Workflows", "path": "videodb/_constants.py", "snippet": "class Workflows:\n add_subtitles = \"add_subtitles\"" }, { "identifier": "SearchFactory", "path": "videodb/search.py", "snippet": "class SearchFactory:\n def __init__(self, _connection):\n self._connection = _connection\n\n def get_search(self, type: str):\n if type not in search_type:\n raise SearchError(\n f\"Invalid search type: {type}. Valid search types are: {list(search_type.keys())}\"\n )\n return search_type[type](self._connection)" }, { "identifier": "SearchResult", "path": "videodb/search.py", "snippet": "class SearchResult:\n def __init__(self, _connection, **kwargs):\n self._connection = _connection\n self.shots = []\n self.stream_url = None\n self.player_url = None\n self.collection_id = \"default\"\n self._results = kwargs.get(\"results\", [])\n self._format_results()\n\n def _format_results(self):\n for result in self._results:\n self.collection_id = result.get(\"collection_id\")\n for doc in result.get(\"docs\"):\n self.shots.append(\n Shot(\n self._connection,\n result.get(\"video_id\"),\n result.get(\"length\"),\n result.get(\"title\"),\n doc.get(\"start\"),\n doc.get(\"end\"),\n doc.get(\"text\"),\n doc.get(\"score\"),\n )\n )\n\n def __repr__(self) -> str:\n return (\n f\"SearchResult(\"\n f\"collection_id={self.collection_id}, \"\n f\"stream_url={self.stream_url}, \"\n f\"player_url={self.player_url}, \"\n f\"shots={self.shots})\"\n )\n\n def get_shots(self) -> List[Shot]:\n return self.shots\n\n def compile(self) -> str:\n \"\"\"Compile the search result shots into a stream url\n\n :raises SearchError: If no shots are found in the search results\n :return: The stream url\n :rtype: str\n \"\"\"\n if self.stream_url:\n return self.stream_url\n elif self.shots:\n compile_data = self._connection.post(\n path=f\"{ApiPath.compile}\",\n data=[\n {\n \"video_id\": shot.video_id,\n \"collection_id\": self.collection_id,\n \"shots\": [(shot.start, shot.end)],\n }\n for shot in self.shots\n ],\n )\n self.stream_url = compile_data.get(\"stream_url\")\n self.player_url = compile_data.get(\"player_url\")\n return self.stream_url\n\n else:\n raise SearchError(\"No shots found in search results to compile\")\n\n def play(self) -> str:\n \"\"\"Generate a stream url for the shot and open it in the default browser\n\n :return: The stream url\n :rtype: str\n \"\"\"\n self.compile()\n return play_stream(self.stream_url)" }, { "identifier": "Shot", "path": "videodb/shot.py", "snippet": "class Shot:\n \"\"\"A shot is a part of a video that contains a specific scene\"\"\"\n\n def __init__(\n self,\n _connection,\n video_id: str,\n video_length: float,\n video_title: str,\n start: float,\n end: float,\n text: Optional[str] = None,\n search_score: Optional[int] = None,\n ) -> None:\n self._connection = _connection\n self.video_id = video_id\n self.video_length = video_length\n self.video_title = video_title\n self.start = start\n self.end = end\n self.text = text\n self.search_score = search_score\n self.stream_url = None\n self.player_url = None\n\n def __repr__(self) -> str:\n return (\n f\"Shot(\"\n f\"video_id={self.video_id}, \"\n f\"video_title={self.video_title}, \"\n f\"start={self.start}, \"\n f\"end={self.end}, \"\n f\"text={self.text}, \"\n f\"search_score={self.search_score}, \"\n f\"stream_url={self.stream_url}, \"\n f\"player_url={self.player_url})\"\n )\n\n def __getitem__(self, key):\n \"\"\"Get an item from the shot object\"\"\"\n return self.__dict__[key]\n\n def generate_stream(self) -> str:\n \"\"\"Generate a stream url for the shot\n\n :return: The stream url\n :rtype: str\n \"\"\"\n\n if self.stream_url:\n return self.stream_url\n else:\n stream_data = self._connection.post(\n path=f\"{ApiPath.video}/{self.video_id}/{ApiPath.stream}\",\n data={\n \"timeline\": [(self.start, self.end)],\n \"length\": self.video_length,\n },\n )\n self.stream_url = stream_data.get(\"stream_url\")\n self.player_url = stream_data.get(\"player_url\")\n return self.stream_url\n\n def play(self) -> str:\n \"\"\"Generate a stream url for the shot and open it in the default browser/ notebook\n\n :return: The stream url\n :rtype: str\n \"\"\"\n self.generate_stream()\n return play_stream(self.stream_url)" } ]
from typing import Optional from videodb._utils._video import play_stream from videodb._constants import ( ApiPath, SearchType, IndexType, Workflows, ) from videodb.search import SearchFactory, SearchResult from videodb.shot import Shot
1,948
class Video: def __init__(self, _connection, id: str, collection_id: str, **kwargs) -> None: self._connection = _connection self.id = id self.collection_id = collection_id self.stream_url = kwargs.get("stream_url", None) self.player_url = kwargs.get("player_url", None) self.name = kwargs.get("name", None) self.description = kwargs.get("description", None) self.thumbnail_url = kwargs.get("thumbnail_url", None) self.length = float(kwargs.get("length", 0.0)) self.transcript = kwargs.get("transcript", None) self.transcript_text = kwargs.get("transcript_text", None) def __repr__(self) -> str: return ( f"Video(" f"id={self.id}, " f"collection_id={self.collection_id}, " f"stream_url={self.stream_url}, " f"player_url={self.player_url}, " f"name={self.name}, " f"description={self.description}, " f"thumbnail_url={self.thumbnail_url}, " f"length={self.length})" ) def __getitem__(self, key): return self.__dict__[key] def search( self, query: str, search_type: Optional[str] = SearchType.semantic, result_threshold: Optional[int] = None, score_threshold: Optional[int] = None, dynamic_score_percentage: Optional[int] = None, ) -> SearchResult:
class Video: def __init__(self, _connection, id: str, collection_id: str, **kwargs) -> None: self._connection = _connection self.id = id self.collection_id = collection_id self.stream_url = kwargs.get("stream_url", None) self.player_url = kwargs.get("player_url", None) self.name = kwargs.get("name", None) self.description = kwargs.get("description", None) self.thumbnail_url = kwargs.get("thumbnail_url", None) self.length = float(kwargs.get("length", 0.0)) self.transcript = kwargs.get("transcript", None) self.transcript_text = kwargs.get("transcript_text", None) def __repr__(self) -> str: return ( f"Video(" f"id={self.id}, " f"collection_id={self.collection_id}, " f"stream_url={self.stream_url}, " f"player_url={self.player_url}, " f"name={self.name}, " f"description={self.description}, " f"thumbnail_url={self.thumbnail_url}, " f"length={self.length})" ) def __getitem__(self, key): return self.__dict__[key] def search( self, query: str, search_type: Optional[str] = SearchType.semantic, result_threshold: Optional[int] = None, score_threshold: Optional[int] = None, dynamic_score_percentage: Optional[int] = None, ) -> SearchResult:
search = SearchFactory(self._connection).get_search(search_type)
5
2023-12-18 15:20:04+00:00
4k
IDEA-CCNL/Real-Gemini
test/test_tool.py
[ { "identifier": "Text2MusicTool", "path": "real_gemini/tools/music_tool.py", "snippet": "class Text2MusicTool(object):\n _name_ = \"Text2Music\"\n _description_ = \"这个工具是从文本生成音乐的调用接口,它可以根据一段文字,生成符合这段文字内容的音乐风格。本工具的输入是一段文本指令。This tool is an API that generates music from text. It can create music that matches the style of the given text content. The input for this tool is a text command.\"\n _return_direct_ = True\n\n def __init__(self):\n self.translator = ChatOpenAI(\n model=\"gpt-3.5-turbo\",\n max_tokens=256)\n self.host = os.getenv(\"MUSIC_SERVER_HOST\")\n self.port = os.getenv(\"MUSIC_SERVER_PORT\")\n \n def inference(self, input_str: str):\n messages = []\n messages.append(\n SystemMessage(\n content=[\n {\"type\": \"text\", \"text\": \"你是一个翻译专家,请将我输入的中文翻译成英文。\"}\n ]\n )\n )\n messages.append(HumanMessage(content=input_str))\n\n response_msg = self.translator.invoke(messages)\n input_str_en = response_msg.content\n # print(input_str_en)\n\n url = f\"http://{self.host}:{self.port}/text_to_music\"\n data = {\"text\": input_str_en}\n music_response = requests.post(url, data=data)\n music_response = music_response.json()\n\n # write to file\n save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n save_dir = os.path.join(save_dir, \"test\", \"outputs\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n md5 = hashlib.md5()\n md5.update(input_str_en.encode('utf-8'))\n filename = os.path.join(save_dir, md5.hexdigest() + \".wav\")\n \n raw_data = music_response[\"audio\"]\n sampling_rate = music_response[\"sampling_rate\"]\n scipy.io.wavfile.write(\n filename,\n rate=sampling_rate,\n data=np.frombuffer(base64.b64decode(raw_data), np.float32),\n )\n print(\"music filename:\", filename)\n\n result = {\"text\": \"好的,为你生成了一段音乐。\", \"audio\": filename}\n return json.dumps(result, ensure_ascii=False)" }, { "identifier": "TaiyiGeneralTool", "path": "real_gemini/tools/image_generation_tool.py", "snippet": "class TaiyiGeneralTool(object):\n _name_ = \"taiyi general image generation\"\n _description_ = \"Taiyi General的API,用于从文本生成图像。当你需要从文本描述生成图像时非常有用。输入应该是文本,即图像描述。A wrapper around Taiyi General API for text to image generation. Useful for when you need to generate images from a text description. Input should be text, i.e, an image description.\"\n _return_direct_ = True\n\n def __init__(self):\n self.prompter = ChatOpenAI(\n model=\"gpt-3.5-turbo\",\n max_tokens=256)\n self.host = os.getenv(\"IMAGE_GENERATION_SERVER_HOST\")\n self.port = os.getenv(\"IMAGE_GENERATION_SERVER_PORT\")\n \n def _upgrade_prompt(self, prompt):\n messages = []\n messages.append(\n SystemMessage(\n content=[\n {\"type\": \"text\", \"text\": \"我正在使用一个Stable Diffusion的AI图像生成工具,我想让你充当我的prompt优化生成器。在我想生成的主题后,请帮我添加各种关键词,使得我的主题的描述更加详细,添加的关键词包括:主体、背景效果、风格、拍摄方式。例如,如果我输入“跑车”,你将生成关键词,如:“跑车,高清,4k,真实细致的跑车摄影,速度动态模糊,赛车场,城市环境,风景道路,戏剧性的天空”\"}\n ]\n )\n )\n messages.append(HumanMessage(content=prompt))\n\n response_msg = self.prompter.invoke(messages)\n new_prompt = response_msg.content\n return new_prompt\n\n def inference(self, inputs):\n url = f\"http://{self.host}:{self.port}/taiyi_xl_general_base64/\"\n headers = {\"Content-Type\": \"application/json\"}\n new_prompt = self._upgrade_prompt(inputs)\n print(\"new prompt:\", new_prompt)\n data = {\"prompt\": new_prompt}\n response = requests.post(url, headers=headers, data=json.dumps(data))\n response = response.json()\n b64_image = response[\"image_base64\"]\n \n # write to file\n save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n save_dir = os.path.join(save_dir, \"test\", \"outputs\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n md5 = hashlib.md5()\n md5.update(inputs.encode('utf-8'))\n filename = os.path.join(save_dir, md5.hexdigest() + \".png\")\n save_or_show_image(b64_image, filename)\n \n print(\"image filename:\", filename)\n\n result = {\"text\": \"好的,我用太乙为你生成了一张图片。\", \"image\": filename}\n return json.dumps(result, ensure_ascii=False)" }, { "identifier": "WeatherTool", "path": "real_gemini/tools/weather_tool.py", "snippet": "class WeatherTool(object):\n _name_ = \"WeatherAPI\"\n _description_ = \"这个工具是查询当前和未来天气的调用接口,它可以根据一段文字,这个文字包含一个城市,这个接口可以查询这个城市的天气,注意,本工具的输入是一个字符串。This tool is a weather query API that can retrieve the current and future weather based on a given text, which includes a city name. The API is capable of querying the weather for the specified city. Please note that the input for this tool is a string.\"\n _return_direct_ = False\n\n def __init__(self):\n self.gaode_api_key = os.getenv(\"GAODE_API_KEY\")\n \n def inference(self, input_str: str):\n city_name = input_str\n district_name = input_str\n params = self._get_params(city_name, district_name)\n return self._process_response(self._results(params))\n\n def _get_params(self, city_name: str, district_name: str) -> Dict[str, str]:\n \"\"\"Get parameters for GaoDeAPI.\"\"\"\n adcode = self._get_adcode(city_name, district_name)\n params = {\n \"api_key\": self.gaode_api_key,\n \"adcode\": adcode\n }\n print(params)\n return params\n\n def _results(self, params: dict) -> dict:\n \"\"\"Run query through GaoDeAPI and return the raw result.\"\"\"\n # # with HiddenPrints():\n response = requests.get(\"https://restapi.amap.com/v3/weather/weatherInfo?\", {\n \"key\": self.gaode_api_key,\n \"city\": params[\"adcode\"],\n \"extensions\": \"all\",\n \"output\": \"JSON\"\n })\n res = json.loads(response.content)\n return res\n\n def _process_response(self, res: dict) -> str:\n \"\"\"Process response from GaoDeAPI.\"\"\"\n if res[\"status\"] == '0':\n return \"输入的城市信息可能有误或未提供城市信息\"\n if res[\"forecasts\"] is None or len(res[\"forecasts\"]) == 0:\n return \"输入的城市信息可能有误或未提供城市信息\"\n res[\"currentTime\"] = datetime.datetime.now()\n return json.dumps(res[\"forecasts\"], ensure_ascii=False)\n\n def _get_adcode(self, city_name: str, district_name: str) -> str:\n \"\"\"Obtain the regional code of a city based on its name and district/county name.\"\"\"\n # 读取Excel文件\n work_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n df = pd.read_excel(\n os.path.join(work_dir, \"test/AMap_adcode_citycode.xlsx\"), sheet_name=\"sheet1\"\n )\n # print(df)\n # 将所有NaN值转换成0\n df = df.dropna()\n if district_name is not None and district_name != '':\n # 根据'city_name'列检索数据\n result = df[df['中文名'].str.contains(district_name)]\n json_data = result.to_json(orient='records', force_ascii=False)\n # 解析 JSON 数据\n json_array = json.loads(json_data)\n\n # 如果区域名称为空,用城市名称去查\n if (district_name is None or district_name == '') and city_name != '':\n # 根据'city_name'列检索数据\n result = df[df['中文名'].str.contains(city_name)]\n json_data = result.to_json(orient='records', force_ascii=False)\n # 解析 JSON 数据\n json_array = json.loads(json_data)\n\n # 如果没数据直接返回空\n if len(json_array) == 0:\n # 根据'citycode'列检索数据\n result = df[df['中文名'].str.contains(city_name)]\n json_data = result.to_json(orient='records', force_ascii=False)\n # 解析 JSON 数据\n json_array = json.loads(json_data)\n\n # 如果只有一条直接返回\n if len(json_array) == 1:\n return json_array[0]['adcode']\n\n # 如果有多条再根据district_name进行检索\n if len(json_array) > 1:\n for obj in json_array:\n if district_name is not None and district_name != '' and district_name in obj['中文名']:\n return obj['adcode']\n if city_name in obj['district_name']:\n return obj['adcode']\n return \"输入的城市信息可能有误或未提供城市信息\"" }, { "identifier": "TTSTool", "path": "real_gemini/tools/tts_tool.py", "snippet": "class TTSTool(object):\n _name_ = \"Text To Speech\"\n _description_ = \"这个工具是从文本转语音的调用接口,它可以根据一段文字,生成符合这段文本的wav语音。本工具的输入是一段文本指令。This tool is a text-to-speech API interface, which can generate a wav voice consistent with a piece of text based on it. The input of this tool is a piece of text command.\"\n _return_direct_ = True\n\n def __init__(self):\n self.host = os.getenv(\"TTS_SERVER_HOST\")\n self.port = os.getenv(\"TTS_SERVER_PORT\")\n \n def inference(self, input_str: str):\n\n url = f\"http://{self.host}:{self.port}/tts\"\n data = {\"prompt\": input_str}\n response = requests.post(url, data=data)\n response = response.json()\n\n audio_array = np.frombuffer(base64.b64decode(response[\"audio\"]), np.float32)\n rate = response[\"sample_rate\"]\n\n # write to file\n # save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n # save_dir = os.path.join(save_dir, \"test\", \"outputs\")\n # md5 = hashlib.md5()\n # md5.update(input_str.encode('utf-8'))\n # filename = os.path.join(save_dir, md5.hexdigest() + \".wav\")\n # torchaudio.save(filename, audio_array, rate)\n\n return audio_array, rate, convert_to_wav_bytes(audio_array, rate)" } ]
import os import sys import json import argparse from real_gemini.tools.music_tool import Text2MusicTool from real_gemini.tools.image_generation_tool import TaiyiGeneralTool from real_gemini.tools.weather_tool import WeatherTool from real_gemini.tools.tts_tool import TTSTool from dotenv import load_dotenv
2,979
#encoding=utf8 sys.path.append(os.path.dirname(os.path.dirname(__file__))) TOOL_DICT = { "music": Text2MusicTool, "image": TaiyiGeneralTool, "weather": WeatherTool,
#encoding=utf8 sys.path.append(os.path.dirname(os.path.dirname(__file__))) TOOL_DICT = { "music": Text2MusicTool, "image": TaiyiGeneralTool, "weather": WeatherTool,
"tts": TTSTool,
3
2023-12-15 04:09:37+00:00
4k
aiim-research/GRETEL
src/explainer/ensemble/aggregators/top_select.py
[ { "identifier": "Explainer", "path": "src/core/explainer_base.py", "snippet": "class Explainer(Configurable, metaclass=ABCMeta):\n \n def __init__(self, context: Context, local_config):\n self.dataset = retake_dataset(local_config)\n self.oracle = retake_oracle(local_config)\n super().__init__(context, local_config)\n \n\n @abstractmethod\n def explain(self, instance):\n pass\n\n def check_configuration(self):\n super().check_configuration()\n self.local_config['parameters']['fold_id'] = self.local_config['parameters'].get('fold_id', -1)\n self.fold_id = self.local_config['parameters']['fold_id'] " }, { "identifier": "GraphInstance", "path": "src/dataset/instances/graph.py", "snippet": "class GraphInstance(DataInstance):\n\n def __init__(self, id, label, data, node_features=None, edge_features=None, edge_weights=None, graph_features=None, dataset=None):\n super().__init__(id, label, data, dataset=dataset)\n self.node_features = self.__init_node_features(node_features).astype(np.float32)\n self.edge_features = self.__init_edge_features(edge_features).astype(np.float32)\n self.edge_weights = self.__init_edge_weights(edge_weights).astype(np.float32)\n self.graph_features = graph_features\n self._nx_repr = None\n\n def __deepcopy__(self, memo):\n # Fields that are being shallow copied\n _dataset = self._dataset\n\n # Fields that are deep copied\n _new_id = deepcopy(self.id, memo)\n _new_label = deepcopy(self.label, memo)\n _data = deepcopy(self.data, memo)\n _node_features = deepcopy(self.node_features, memo)\n _edge_features = deepcopy(self.edge_features, memo)\n _edge_weights = deepcopy(self.edge_weights, memo)\n _graph_features = deepcopy(self.graph_features, memo)\n return GraphInstance(_new_id, _new_label, _data, _node_features, _edge_features, _edge_weights, _graph_features)\n\n def get_nx(self):\n if not self._nx_repr:\n self._nx_repr = self._build_nx()\n return deepcopy(self._nx_repr)\n \n def __init_node_features(self, node_features):\n return np.zeros((self.data.shape[0], 1)) if isinstance(node_features, (str, type(None))) else node_features\n\n def __init_edge_features(self, edge_features):\n edges = np.nonzero(self.data)\n return np.ones((len(edges[0]), 1)) if isinstance(edge_features, (str, type(None))) else edge_features\n \n def __init_edge_weights(self, edge_weights):\n edges = np.nonzero(self.data)\n return np.ones(len(edges[0])) if edge_weights is None else edge_weights\n \n def _build_nx(self):\n nx_repr = nx.from_numpy_array(self.data)\n nx_repr.add_nodes_from([node, {'node_features': self.node_features[node]}] for node in nx_repr.nodes())\n edges = list(nx_repr.edges)\n nx_repr.add_edges_from([(edge[0], edge[1], {'edge_features': self.edge_features[i], 'weight': self.edge_weights[i]}) for i, edge in enumerate(edges)])\n return nx_repr\n \n @property\n def num_edges(self):\n nx_repr = self.get_nx()\n return nx_repr.number_of_edges()\n \n @property\n def num_nodes(self):\n return len(self.data)\n \n def nodes(self):\n return [ i for i in range(self.data.shape[0])]\n\n def neighbors(self, node):\n return [i for i in self.data[node,:] if i != 0]\n \n def degree(self,node):\n return len(self.neighbors(node))\n \n def degrees(self):\n return [ len(self.neighbors(y)) for y in self.nodes()]" }, { "identifier": "ExplanationAggregator", "path": "src/explainer/ensemble/aggregators/base.py", "snippet": "class ExplanationAggregator(Configurable):\n\n def init(self):\n self.dataset: Dataset = retake_dataset(self.local_config)\n self.oracle: Oracle = retake_oracle(self.local_config)\n \n inject_dataset(self.local_config['parameters']['node_feature_aggregator'], self.dataset)\n inject_oracle(self.local_config['parameters']['node_feature_aggregator'], self.oracle)\n \n \n self.node_feature_aggregator: NodeFeatureAggregator = get_instance_kvargs(self.local_config['parameters']['node_feature_aggregator']['class'],\n {'context':self.context,'local_config': self.local_config['parameters']['node_feature_aggregator']})\n super().init()\n\n def aggregate(self, instance: DataInstance, explanations: List[DataInstance]):\n aggregated_instance = self.real_aggregate(instance, explanations)\n # we need to combine:\n # 1) node features\n # 2) edge features\n # 3) graph features\n adj = aggregated_instance.data\n edges = np.nonzero(adj)\n # if there's at least one edge that the aggreagtor produced\n # then get the features of the incident nodes\n if edges[0].size:\n node_features = self.node_feature_aggregator.aggregate(\n np.array(list(range(adj.shape[0]))), \n explanations\n )\n\n cf_candidate = GraphInstance(id=instance.id,\n label=1-instance.label,\n data=adj,\n node_features=node_features,\n dataset=instance._dataset)\n\n for manipulator in cf_candidate._dataset.manipulators:\n manipulator._process_instance(cf_candidate)\n else:\n cf_candidate = deepcopy(instance)\n \n return cf_candidate\n \n \n def real_aggregate(self, instance: DataInstance, explanations: List[DataInstance]):\n pass\n \n \n def check_configuration(self):\n super().check_configuration()\n \n if 'node_feature_aggregator' not in self.local_config['parameters']:\n init_dflts_to_of(self.local_config,\n 'node_feature_aggregator',\n 'src.explainer.ensemble.aggregators.nodes.average.AverageAggregator')" }, { "identifier": "GraphEditDistanceMetric", "path": "src/evaluation/evaluation_metric_ged.py", "snippet": "class GraphEditDistanceMetric(EvaluationMetric):\n \"\"\"Provides a graph edit distance function for graphs where nodes are already matched, \n thus eliminating the need of performing an NP-Complete graph matching.\n \"\"\"\n\n def __init__(self, node_insertion_cost=1.0, node_deletion_cost=1.0, edge_insertion_cost=1.0,\n edge_deletion_cost=1.0, undirected=True, config_dict=None) -> None:\n super().__init__(config_dict)\n self._name = 'Graph_Edit_Distance'\n self._node_insertion_cost = node_insertion_cost\n self._node_deletion_cost = node_deletion_cost\n self._edge_insertion_cost = edge_insertion_cost\n self._edge_deletion_cost = edge_deletion_cost\n self.undirected = undirected\n \n\n def evaluate(self, instance_1 , instance_2 , oracle : Oracle=None, explainer : Explainer=None, dataset = None):\n # G1 = instance_1.graph\n # G2 = instance_2.graph\n\n # edit_distance = 0.0\n\n # for n in G1.nodes:\n # if not G2.has_node(n):\n # edit_distance += self._node_deletion_cost\n\n # for n in G2.nodes:\n # if not G1.has_node(n):\n # edit_distance += self._node_insertion_cost\n\n # for e in G1.edges:\n # if not G2.has_edge(*e):\n # edit_distance += self._edge_deletion_cost\n\n # for e in G2.edges:\n # if not G1.has_edge(*e):\n # edit_distance += self._edge_insertion_cost\n\n # return edit_distance\n \n # Implementation for numpy matrices\n A_g1 = instance_1.data\n A_g2 = instance_2.data\n\n # Bardh idea ----------------------------------------------------------\n\n # result = float(np.sum(np.absolute(A_g1 - A_g2)))\n # return result\n # ---------------------------------------------------------------------\n\n # Get the difference in the number of nodes\n nodes_diff_count = abs(A_g1.shape[0] - A_g2.shape[0])\n\n # Get the shape of the matrices\n shape_A_g1 = A_g1.shape\n shape_A_g2 = A_g2.shape\n\n # Find the minimum dimensions of the matrices\n min_shape = (min(shape_A_g1[0], shape_A_g2[0]), min(shape_A_g1[1], shape_A_g2[1]))\n\n # Initialize an empty list to store the differences\n edges_diff = []\n\n # Iterate over the common elements of the matrices\n for i in range(min_shape[0]):\n for j in range(min_shape[1]):\n if A_g1[i,j] != A_g2[i,j]:\n edges_diff.append((i,j))\n\n # If the matrices have different shapes, loop through the remaining cells in the larger matrix (the matrixes are square shaped)\n if shape_A_g1 != shape_A_g2:\n max_shape = np.maximum(shape_A_g1, shape_A_g2)\n\n for i in range(min_shape[0], max_shape[0]):\n for j in range(min_shape[1], max_shape[1]):\n if shape_A_g1 > shape_A_g2:\n edge_val = A_g1[i,j]\n else:\n edge_val = A_g2[i,j]\n\n # Only add non-zero cells to the list\n if edge_val != 0: \n edges_diff.append((i, j))\n\n edges_diff_count = len(edges_diff)\n if self.undirected:\n edges_diff_count /= 2\n\n return nodes_diff_count + edges_diff_count" }, { "identifier": "get_instance_kvargs", "path": "src/core/factory_base.py", "snippet": "def get_instance_kvargs(kls, param):\n GLogger.getLogger().info(\"Instantiating: \"+kls)\n return get_class(kls)(**param)" }, { "identifier": "get_dflts_to_of", "path": "src/utils/cfg_utils.py", "snippet": "def get_dflts_to_of(snippet, key, kls, *args, **kwargs):\n __add_dflts_to_of(snippet, key, kls, empty_cfg_for, *args, **kwargs)" }, { "identifier": "init_dflts_to_of", "path": "src/utils/cfg_utils.py", "snippet": "def init_dflts_to_of(snippet, key, kls, *args, **kwargs):\n __add_dflts_to_of(snippet, key, kls, generate_default_for,*args, **kwargs)" }, { "identifier": "inject_dataset", "path": "src/utils/cfg_utils.py", "snippet": "def inject_dataset(cfg, dataset):\n cfg['dataset']= dataset" }, { "identifier": "inject_oracle", "path": "src/utils/cfg_utils.py", "snippet": "def inject_oracle(cfg, oracle):\n cfg['oracle']= oracle" }, { "identifier": "retake_oracle", "path": "src/utils/cfg_utils.py", "snippet": "def retake_oracle(cfg):\n return cfg['oracle']" }, { "identifier": "retake_dataset", "path": "src/utils/cfg_utils.py", "snippet": "def retake_dataset(cfg):\n return cfg['dataset']" } ]
import copy import sys import numpy as np from abc import ABC from typing import List from src.core.explainer_base import Explainer from src.dataset.instances.graph import GraphInstance from src.explainer.ensemble.aggregators.base import ExplanationAggregator from src.evaluation.evaluation_metric_ged import GraphEditDistanceMetric from src.core.factory_base import get_instance_kvargs from src.utils.cfg_utils import get_dflts_to_of, init_dflts_to_of, inject_dataset, inject_oracle, retake_oracle, retake_dataset
2,781
class ExplanationTopSelect(ExplanationAggregator): def init(self): super().init() self.distance_metric = get_instance_kvargs(self.local_config['parameters']['distance_metric']['class'], self.local_config['parameters']['distance_metric']['parameters'])
class ExplanationTopSelect(ExplanationAggregator): def init(self): super().init() self.distance_metric = get_instance_kvargs(self.local_config['parameters']['distance_metric']['class'], self.local_config['parameters']['distance_metric']['parameters'])
def real_aggregate(self, org_instance: GraphInstance, explanations: List[GraphInstance]):
1
2023-12-15 16:34:16+00:00
4k
modelscope/scepter
scepter/modules/annotator/midas_op.py
[ { "identifier": "BaseAnnotator", "path": "scepter/modules/annotator/base_annotator.py", "snippet": "class BaseAnnotator(BaseModel, metaclass=ABCMeta):\n para_dict = {}\n\n def __init__(self, cfg, logger=None):\n super().__init__(cfg, logger=logger)\n\n @torch.no_grad()\n @torch.inference_mode\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n @staticmethod\n def get_config_template():\n return dict_to_yaml('ANNOTATORS',\n __class__.__name__,\n BaseAnnotator.para_dict,\n set_name=True)" }, { "identifier": "MiDaSInference", "path": "scepter/modules/annotator/midas/api.py", "snippet": "class MiDaSInference(nn.Module):\n MODEL_TYPES_TORCH_HUB = ['DPT_Large', 'DPT_Hybrid', 'MiDaS_small']\n MODEL_TYPES_ISL = [\n 'dpt_large',\n 'dpt_hybrid',\n 'midas_v21',\n 'midas_v21_small',\n ]\n\n def __init__(self, model_type, model_path):\n super().__init__()\n assert (model_type in self.MODEL_TYPES_ISL)\n model, _ = load_model(model_type, model_path)\n self.model = model\n self.model.train = disabled_train\n\n def forward(self, x):\n with torch.no_grad():\n prediction = self.model(x)\n return prediction" }, { "identifier": "ANNOTATORS", "path": "scepter/modules/annotator/registry.py", "snippet": "ANNOTATORS = Registry('ANNOTATORS', build_func=build_annotator)" }, { "identifier": "resize_image", "path": "scepter/modules/annotator/utils.py", "snippet": "def resize_image(input_image, resolution):\n H, W, C = input_image.shape\n H = float(H)\n W = float(W)\n k = float(resolution) / min(H, W)\n H *= k\n W *= k\n H = int(np.round(H / 64.0)) * 64\n W = int(np.round(W / 64.0)) * 64\n img = cv2.resize(\n input_image, (W, H),\n interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)\n return img, k" }, { "identifier": "resize_image_ori", "path": "scepter/modules/annotator/utils.py", "snippet": "def resize_image_ori(h, w, image, k):\n img = cv2.resize(\n image, (w, h),\n interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)\n return img" }, { "identifier": "dict_to_yaml", "path": "scepter/modules/utils/config.py", "snippet": "def dict_to_yaml(module_name, name, json_config, set_name=False):\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n convert std dict to yaml\n :param module_name:\n :param json_config:\n :return:\n '''\n def convert_yaml_style(level=1,\n name='ENV',\n description='ENV PARA',\n default='',\n type_name='',\n is_sys=False):\n new_line = ''\n new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\\n'.format(\n '\\t' * (level - 1), name.upper(), description, type_name,\n f'\\'{default}\\'' if isinstance(default, str) else default)\n if is_sys:\n if name == '-':\n new_line += '{}{}\\n'.format('\\t' * (level - 1), name.upper())\n else:\n new_line += '{}{}:\\n'.format('\\t' * (level - 1), name.upper())\n else:\n # if isinstance(default, str):\n # default = f'\\'{default}\\''\n if default is None:\n new_line += '{}# {}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n else:\n new_line += '{}{}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n return new_line\n\n def parse_dict(json_config,\n level_num,\n parent_key,\n set_name=False,\n name='',\n parent_type='dict'):\n yaml_str = ''\n # print(level_num, json_config)\n if isinstance(json_config, dict):\n if 'value' in json_config:\n value = json_config['value']\n if isinstance(value, dict):\n assert len(value) < 1\n value = None\n description = json_config.get('description', '')\n yaml_str += convert_yaml_style(level=level_num - 1,\n name=parent_key,\n description=description,\n default=value,\n type_name=type(value).__name__)\n return True, yaml_str\n else:\n if len(json_config) < 1:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default='',\n type_name='')\n level_num += 1\n for k, v in json_config.items():\n if k == 'description':\n continue\n if isinstance(v, dict):\n is_final, new_yaml_str = parse_dict(v,\n level_num,\n k,\n parent_type='dict')\n if not is_final and parent_type == 'dict':\n description = v.get('description', '')\n yaml_str += convert_yaml_style(\n level=level_num - 1,\n name=k,\n description=description,\n default='',\n type_name='',\n is_sys=True)\n if not is_final and parent_type == 'list':\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=k,\n type_name='')\n yaml_str += new_yaml_str\n elif isinstance(v, list):\n base_yaml_str = convert_yaml_style(level=level_num - 1,\n name=k,\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += base_yaml_str\n for tup in v:\n is_final, new_yaml_str = parse_dict(\n tup, level_num, '-', parent_type='list')\n if not is_final:\n yaml_str += convert_yaml_style(level=level_num,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += new_yaml_str\n else:\n raise KeyError(\n f'json config {json_config} must be a dict of list'\n )\n\n elif isinstance(json_config, list):\n level_num += 1\n for tup in json_config:\n is_final, new_yaml_str = parse_dict(tup, level_num, '-')\n if not is_final:\n\n yaml_str += convert_yaml_style(level=level_num - 1,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n if set_name:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n raise KeyError(f'json config {json_config} must be a dict')\n return False, yaml_str\n\n if isinstance(json_config, dict):\n first_dict, sec_dict, third_dict = {}, {}, {}\n for key, value in json_config.items():\n if isinstance(value, dict) and len(value) > 0:\n first_dict[key] = value\n elif isinstance(value, dict) and len(value) == 0:\n sec_dict[key] = value\n elif isinstance(value, list):\n third_dict[key] = value\n else:\n raise f'Config {json_config} is illegal'\n json_config = {}\n json_config.update(first_dict)\n json_config.update(sec_dict)\n json_config.update(third_dict)\n\n yaml_str = f'[{module_name}] module yaml examples:\\n'\n level_num = 1\n base_yaml_str = convert_yaml_style(level=level_num,\n name=module_name,\n description='',\n default='',\n type_name='',\n is_sys=True)\n level_num += 1\n\n is_final, new_yaml_str = parse_dict(json_config,\n level_num,\n module_name,\n set_name=isinstance(json_config, list)\n and set_name,\n name=name)\n if not is_final:\n yaml_str += base_yaml_str\n if set_name and not isinstance(json_config, list):\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n yaml_str += new_yaml_str[1:]\n\n return yaml_str" }, { "identifier": "we", "path": "scepter/modules/utils/distribute.py", "snippet": " def set_random_seed(seed):\ndef get_dist_info():\ndef gather_data(data):\ndef gather_list(data):\ndef gather_picklable(data):\ndef _gather_picklable_custom(data):\ndef gather_gpu_tensors(tensor, all_recv=False, is_cat=True):\ndef broadcast(tensor, src, group=None, **kwargs):\ndef barrier():\ndef get_global_gloo_group():\ndef reduce_scatter(output,\n input_list,\n op=dist.ReduceOp.SUM,\n group=None,\n **kwargs):\ndef all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef _serialize_to_tensor(data):\ndef _unserialize_from_tensor(recv_data):\ndef send(tensor, dst, group=None, **kwargs):\ndef recv(tensor, src=None, group=None, **kwargs):\ndef isend(tensor, dst, group=None, **kwargs):\ndef irecv(tensor, src=None, group=None, **kwargs):\ndef scatter(data, scatter_list=None, src=0, group=None, **kwargs):\ndef shared_random_seed():\ndef mp_worker(gpu, ngpus_per_node, cfg, fn, pmi_rank, world_size, work_env):\n def __init__(self):\n def init_env(self, config, fn, logger=None):\n def get_env(self):\n def set_env(self, we_env):\n def __str__(self):\nclass Workenv(object):" }, { "identifier": "FS", "path": "scepter/modules/utils/file_system.py", "snippet": "FS = FileSystem()" } ]
from abc import ABCMeta from einops import rearrange from PIL import Image from scepter.modules.annotator.base_annotator import BaseAnnotator from scepter.modules.annotator.midas.api import MiDaSInference from scepter.modules.annotator.registry import ANNOTATORS from scepter.modules.annotator.utils import resize_image, resize_image_ori from scepter.modules.utils.config import dict_to_yaml from scepter.modules.utils.distribute import we from scepter.modules.utils.file_system import FS import numpy as np import torch
2,873
# -*- coding: utf-8 -*- # Midas Depth Estimation # From https://github.com/isl-org/MiDaS # MIT LICENSE @ANNOTATORS.register_class() class MidasDetector(BaseAnnotator, metaclass=ABCMeta): def __init__(self, cfg, logger=None): super().__init__(cfg, logger=logger) pretrained_model = cfg.get('PRETRAINED_MODEL', None) if pretrained_model: with FS.get_from(pretrained_model, wait_finish=True) as local_path: self.model = MiDaSInference(model_type='dpt_hybrid', model_path=local_path) self.a = cfg.get('A', np.pi * 2.0) self.bg_th = cfg.get('BG_TH', 0.1) @torch.no_grad() @torch.inference_mode() @torch.autocast('cuda', enabled=False) def forward(self, image): if isinstance(image, Image.Image): image = np.array(image) elif isinstance(image, torch.Tensor): image = image.detach().cpu().numpy() elif isinstance(image, np.ndarray): image = image.copy() else: raise f'Unsurpport datatype{type(image)}, only surpport np.ndarray, torch.Tensor, Pillow Image.' image_depth = image h, w, c = image.shape
# -*- coding: utf-8 -*- # Midas Depth Estimation # From https://github.com/isl-org/MiDaS # MIT LICENSE @ANNOTATORS.register_class() class MidasDetector(BaseAnnotator, metaclass=ABCMeta): def __init__(self, cfg, logger=None): super().__init__(cfg, logger=logger) pretrained_model = cfg.get('PRETRAINED_MODEL', None) if pretrained_model: with FS.get_from(pretrained_model, wait_finish=True) as local_path: self.model = MiDaSInference(model_type='dpt_hybrid', model_path=local_path) self.a = cfg.get('A', np.pi * 2.0) self.bg_th = cfg.get('BG_TH', 0.1) @torch.no_grad() @torch.inference_mode() @torch.autocast('cuda', enabled=False) def forward(self, image): if isinstance(image, Image.Image): image = np.array(image) elif isinstance(image, torch.Tensor): image = image.detach().cpu().numpy() elif isinstance(image, np.ndarray): image = image.copy() else: raise f'Unsurpport datatype{type(image)}, only surpport np.ndarray, torch.Tensor, Pillow Image.' image_depth = image h, w, c = image.shape
image_depth, k = resize_image(image_depth,
3
2023-12-21 02:01:48+00:00
4k
YyzHarry/shortcut-ood-fairness
learning/algorithms.py
[ { "identifier": "networks", "path": "models/networks.py", "snippet": "class Identity(nn.Module):\nclass MLP(nn.Module):\nclass PretrainedImageModel(torch.nn.Module):\nclass ResNet(PretrainedImageModel):\nclass TimmModel(PretrainedImageModel):\nclass HubModel(PretrainedImageModel):\nclass ImportedModel(PretrainedImageModel):\n def __init__(self):\n def forward(self, x):\n def __init__(self, n_inputs, n_outputs, hparams):\n def forward(self, x):\n def forward(self, x):\n def train(self, mode=True):\n def freeze_bn(self):\n def __init__(self, input_shape, hparams, pretrained=True, freeze_bn=False):\n def __init__(self, name, input_shape, hparams, pretrained=True, freeze_bn=False):\n def __init__(self, name1, name2, input_shape, hparams, pretrained=True, freeze_bn=False):\n def __init__(self, network, n_outputs, input_shape, hparams, pretrained=True, freeze_bn=False):\ndef replace_module_prefix(state_dict, prefix, replace_with=\"\"):\ndef get_torchvision_state_dict(url):\ndef imagenet_resnet50_ssl(URL):\ndef load_swag(URL):\ndef Featurizer(data_type, input_shape, hparams):\ndef Classifier(in_features, out_features, is_nonlinear=False):\nSIMCLR_RN50_URL = \"https://dl.fbaipublicfiles.com/vissl/model_zoo/\" \\\n \"simclr_rn50_800ep_simclr_8node_resnet_16_07_20.7e8feed1/model_final_checkpoint_phase799.torch\"\nBARLOWTWINS_RN50_URL = \"https://dl.fbaipublicfiles.com/vissl/model_zoo/\" \\\n \"barlow_twins/barlow_twins_32gpus_4node_imagenet1k_1000ep_resnet50.torch\"" }, { "identifier": "joint_dro", "path": "learning/joint_dro.py", "snippet": "GEOMETRIES = ('cvar')\nMIN_REL_DIFFERENCE = 1e-5\ndef cvar_value(p, v, reg):\ndef bisection(eta_min, eta_max, f, tol=1e-6, max_iter=500):\n def __init__(self, size, reg, geometry, tol=1e-4, max_iter=1000, debugging=False):\n def best_response(self, v):\n def p(eta):\n def bisection_target(eta):\n def p(eta):\n def bisection_target(eta):\n def p(eta):\n def bisection_target(eta):\n def p(eta):\n def bisection_target(eta):\n def forward(self, v):\nclass RobustLoss(torch.nn.Module):" }, { "identifier": "get_optimizers", "path": "learning/optimizers.py", "snippet": "def get_bert_optim(network, lr, weight_decay):\ndef get_sgd_optim(network, lr, weight_decay):\ndef get_adam_optim(network, lr, weight_decay):" }, { "identifier": "mixup_data", "path": "utils/misc.py", "snippet": "def mixup_data(x, y, alpha=1., device=\"cpu\"):\n lam = np.random.beta(alpha, alpha) if alpha > 0 else 1\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).to(device)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n\n return mixed_x, y_a, y_b, lam" } ]
import torch import torch.nn as nn import torch.nn.functional as F import torch.autograd as autograd import copy import numpy as np from transformers import get_scheduler from models import networks from learning import joint_dro from learning.optimizers import get_optimizers from utils.misc import mixup_data
1,675
'ERM', 'StratifiedERM', # subgroup methods 'GroupDRO', 'IRM', 'CVaRDRO', 'JTT', 'LISA', 'DFR', # data augmentation 'Mixup', # domain generalization methods 'MMD', 'CORAL', 'DANN', 'CDANN', # imbalanced learning methods 'ReSample', 'ReWeight', 'SqrtReWeight', 'CBLoss', 'Focal', 'LDAM', 'BSoftmax', 'CRT', 'ReWeightCRT', 'VanillaCRT', # flat minima optimizer 'MA', 'SAM', # attribute balancing 'GroupDROAttr', 'ReSampleAttr', 'ReWeightAttr', ] def get_algorithm_class(algorithm_name): """Return the algorithm class with the given name.""" if algorithm_name not in globals(): raise NotImplementedError("Algorithm not found: {}".format(algorithm_name)) return globals()[algorithm_name] class Algorithm(torch.nn.Module): """ A subclass of Algorithm implements a subgroup robustness algorithm. Subclasses should implement the following: - _init_model() - _compute_loss() - update() - return_feats() - predict() """ def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None): super(Algorithm, self).__init__() self.hparams = hparams self.data_type = data_type self.num_classes = num_classes self.num_attributes = num_attributes self.num_examples = num_examples def _init_model(self): raise NotImplementedError def _compute_loss(self, i, x, y, a, step): raise NotImplementedError def update(self, minibatch, step): """Perform one update step.""" raise NotImplementedError def return_feats(self, x): raise NotImplementedError def predict(self, x): raise NotImplementedError def return_groups(self, y, a): """Given a list of (y, a) tuples, return indexes of samples belonging to each subgroup""" idx_g, idx_samples = [], [] all_g = y * self.num_attributes + a for g in all_g.unique(): idx_g.append(g) idx_samples.append(all_g == g) return zip(idx_g, idx_samples) @staticmethod def return_attributes(all_a): """Given a list of attributes, return indexes of samples belonging to each attribute""" idx_a, idx_samples = [], [] for a in all_a.unique(): idx_a.append(a) idx_samples.append(all_a == a) return zip(idx_a, idx_samples) class ERM(Algorithm): """Empirical Risk Minimization (ERM)""" def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None): super(ERM, self).__init__( data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes) self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams) self.classifier = networks.Classifier( self.featurizer.n_outputs, num_classes, self.hparams['nonlinear_classifier'] ) self.network = nn.Sequential(self.featurizer, self.classifier) self._init_model() def _init_model(self): self.clip_grad = (self.data_type == "text" and self.hparams["optimizer"] == "adamw") if self.data_type in ["images", "tabular"]:
ALGORITHMS = [ 'ERM', 'StratifiedERM', # subgroup methods 'GroupDRO', 'IRM', 'CVaRDRO', 'JTT', 'LISA', 'DFR', # data augmentation 'Mixup', # domain generalization methods 'MMD', 'CORAL', 'DANN', 'CDANN', # imbalanced learning methods 'ReSample', 'ReWeight', 'SqrtReWeight', 'CBLoss', 'Focal', 'LDAM', 'BSoftmax', 'CRT', 'ReWeightCRT', 'VanillaCRT', # flat minima optimizer 'MA', 'SAM', # attribute balancing 'GroupDROAttr', 'ReSampleAttr', 'ReWeightAttr', ] def get_algorithm_class(algorithm_name): """Return the algorithm class with the given name.""" if algorithm_name not in globals(): raise NotImplementedError("Algorithm not found: {}".format(algorithm_name)) return globals()[algorithm_name] class Algorithm(torch.nn.Module): """ A subclass of Algorithm implements a subgroup robustness algorithm. Subclasses should implement the following: - _init_model() - _compute_loss() - update() - return_feats() - predict() """ def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None): super(Algorithm, self).__init__() self.hparams = hparams self.data_type = data_type self.num_classes = num_classes self.num_attributes = num_attributes self.num_examples = num_examples def _init_model(self): raise NotImplementedError def _compute_loss(self, i, x, y, a, step): raise NotImplementedError def update(self, minibatch, step): """Perform one update step.""" raise NotImplementedError def return_feats(self, x): raise NotImplementedError def predict(self, x): raise NotImplementedError def return_groups(self, y, a): """Given a list of (y, a) tuples, return indexes of samples belonging to each subgroup""" idx_g, idx_samples = [], [] all_g = y * self.num_attributes + a for g in all_g.unique(): idx_g.append(g) idx_samples.append(all_g == g) return zip(idx_g, idx_samples) @staticmethod def return_attributes(all_a): """Given a list of attributes, return indexes of samples belonging to each attribute""" idx_a, idx_samples = [], [] for a in all_a.unique(): idx_a.append(a) idx_samples.append(all_a == a) return zip(idx_a, idx_samples) class ERM(Algorithm): """Empirical Risk Minimization (ERM)""" def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None): super(ERM, self).__init__( data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes) self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams) self.classifier = networks.Classifier( self.featurizer.n_outputs, num_classes, self.hparams['nonlinear_classifier'] ) self.network = nn.Sequential(self.featurizer, self.classifier) self._init_model() def _init_model(self): self.clip_grad = (self.data_type == "text" and self.hparams["optimizer"] == "adamw") if self.data_type in ["images", "tabular"]:
self.optimizer = get_optimizers[self.hparams['optimizer']](
2
2023-12-15 04:10:31+00:00
4k
RomGai/BrainVis
dc_ldm/modules/diffusionmodules/openaimodel.py
[ { "identifier": "checkpoint", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" }, { "identifier": "conv_nd", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "dc_ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None, cond_scale=1.):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim,cond_scale=cond_scale)\n for d in range(depth)]\n )\n\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c')\n for block in self.transformer_blocks:\n x = block(x, context=context)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)\n x = self.proj_out(x)\n return x + x_in" } ]
from abc import abstractmethod from functools import partial from typing import Iterable from dc_ldm.modules.diffusionmodules.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, ) from dc_ldm.modules.attention import SpatialTransformer from omegaconf.listconfig import ListConfig import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F import torch
2,860
if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for downsampling. """ def __init__( self, channels, emb_channels, dropout, out_channels=None, use_conv=False, use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential(
# dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0].contiguous() class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for downsampling. """ def __init__( self, channels, emb_channels, dropout, out_channels=None, use_conv=False, use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential(
normalization(channels),
5
2023-12-16 12:52:14+00:00
4k
Rajeshwaran2001/DRM-Media-Tool
decrypter.py
[ { "identifier": "FileMergerDialog", "path": "file_merger_dialog.py", "snippet": "class FileMergerDialog(QDialog):\n def __init__(self, debug_logger, info_logger, folder_path, parent=None):\n super().__init__(parent)\n\n self.folder_path = folder_path\n\n self.setWindowTitle(\"Files Merger\")\n self.setGeometry(100, 100, 600, 300)\n\n self.layout = QVBoxLayout()\n\n self.file_table_label = QLabel(\"Files in Directory:\")\n self.file_table_widget = QTableWidget()\n self.file_table_widget.setColumnCount(\n 3) # Added a column for checkboxes\n self.file_table_widget.setHorizontalHeaderLabels(\n [\"File Name\", \"Select\", \"Type\"])\n\n self.merge_button = QPushButton(\"Merge\")\n self.merge_button.clicked.connect(self.merge_files)\n\n self.layout.addWidget(self.file_table_label)\n self.layout.addWidget(self.file_table_widget)\n self.layout.addWidget(self.merge_button)\n\n self.setLayout(self.layout)\n\n self.populate_file_table()\n self.file_table_widget.setColumnWidth(0, 400)\n self.debug_logger = debug_logger\n self.info_logger = info_logger\n\n def populate_file_table(self):\n # Clear existing items in the table widget\n self.file_table_widget.setRowCount(0)\n\n try:\n # List only video and audio files in the specified directory\n video_files = [file for file in os.listdir(\n self.folder_path) if file.lower().endswith(('.mp4', '.mkv', '.avi', '.webm'))]\n audio_files = [file for file in os.listdir(\n self.folder_path) if file.lower().endswith(('.mp3', '.wav', '.ogg', '.m4a', '.webm'))]\n\n # Add video files to the table widget\n for idx, file in enumerate(video_files):\n self.add_file_to_table(idx, file, \"Video\")\n\n # Add audio files to the table widget\n for idx, file in enumerate(audio_files, start=len(video_files)):\n self.add_file_to_table(idx, file, \"Audio\")\n\n except FileNotFoundError:\n # Handle the case where the specified directory does not exist\n self.file_table_widget.setRowCount(1)\n self.file_table_widget.setItem(\n 0, 2, QTableWidgetItem(\"Directory not found\"))\n\n def add_file_to_table(self, idx, file, file_type):\n self.file_table_widget.insertRow(idx)\n\n # Center-align the content in the first column\n item_file_name = QTableWidgetItem(file)\n item_file_name.setTextAlignment(0x0004 | 0x0080) # AlignCenter\n self.file_table_widget.setItem(idx, 0, item_file_name)\n\n # Create a widget for the checkbox and center-align it\n checkbox_widget = QWidget()\n checkbox_layout = QHBoxLayout(checkbox_widget)\n checkbox_layout.addStretch(3)\n checkbox = QCheckBox()\n checkbox.setChecked(False)\n checkbox_layout.addWidget(checkbox)\n checkbox_layout.addStretch(3)\n\n # Set the widget with the centered checkbox in the second column\n self.file_table_widget.setCellWidget(idx, 1, checkbox_widget)\n\n # Set the file type in the third column\n self.file_table_widget.setItem(idx, 2, QTableWidgetItem(file_type))\n\n def merge_files(self):\n selected_files = []\n metadata = {}\n for row in range(self.file_table_widget.rowCount()):\n checkbox = self.file_table_widget.cellWidget(\n row, 1).layout().itemAt(1).widget()\n if checkbox.isChecked():\n file_name = self.file_table_widget.item(row, 0).text()\n file_type = self.file_table_widget.item(row, 2).text()\n selected_files.append((file_name, file_type))\n\n # Check if there are at least one video and one audio file selected\n if any(file_type == 'Video' for (_, file_type) in selected_files) and \\\n any(file_type == 'Audio' for (_, file_type) in selected_files):\n\n # Get all files in the directory ending with .info.json\n info_files = [file for file in os.listdir(\n self.folder_path) if file.endswith('.info.json')]\n img_files = [file for file in os.listdir(\n self.folder_path) if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))]\n language_mapping = {\n 'en': 'eng',\n 'eng': 'eng',\n 'english': 'eng',\n 'ta': 'tam',\n 'tamil': 'tam',\n 'tam': 'tam'\n }\n\n # Define language codes\n language_codes = list(language_mapping.keys())\n suffixes = tuple(f'.{code}.vtt' for code in language_codes)\n subtitle_files = [file for file in os.listdir(\n self.folder_path) if file.endswith(suffixes)]\n thumbnail_file = None # Initialize with a default value\n # print(subtitle_files)\n\n if not info_files:\n show_error_message(self, \"Error: No Metadata files found.\")\n self.debug_logger.debug(\"Error: No Metadata files found.\")\n else:\n # Assume the first found .info.json file\n # print(selected_files)\n info_file_name = info_files[0]\n info_file_path = os.path.join(self.folder_path, info_file_name)\n with open(info_file_path, 'r', encoding='utf-8') as info_file:\n metadata = json.load(info_file)\n if not subtitle_files:\n show_error_message(self, \"Error: No Subtitle files found.\")\n self.debug_logger.debug(\"Error: No Subtitle files found.\")\n if img_files:\n thumbnail_file = os.path.join(self.folder_path, img_files[0])\n\n else:\n print(\"No matching files found.\")\n self.debug_logger.debug(\"No matching files found.\")\n\n # Build the ffmpeg command\n ffmpeg_command = ('ffmpeg ')\n\n # Lists to store input options for video, audio, and subtitle\n video_inputs = []\n audio_inputs = []\n subtitle_inputs = []\n # Initialize an array to store metadata strings\n metadata_strings_array = []\n\n for file_info in selected_files:\n input_file_path = os.path.join(self.folder_path, file_info[0])\n if file_info[1] == 'Video':\n video_inputs.append(f'-i \"{input_file_path}\" ')\n # Extract the extension from the video file\n extension = os.path.splitext(input_file_path)[1]\n elif file_info[1] == 'Audio':\n audio_inputs.append(f'-i \"{input_file_path}\" ')\n\n # Add subtitle inputs from the provided list //join([f'-map {i + 1}:a' for i in range(len(audio_inputs))]) +\n for i, subtitle_file in enumerate(subtitle_files):\n for code in language_codes:\n if f'.{code}.' in subtitle_file:\n language_code = language_mapping.get(code, 'Unknown')\n if language_code.lower() == 'eng':\n title = 'English'\n elif language_code.lower() == 'tam':\n title = 'Tamil'\n else:\n title = 'Unknown'\n metadata_strings = f'-metadata:s:s:{i} language=\"{language_code}\" -metadata:s:s:{i} title=\"{title}\" '\n metadata_strings_array.append(metadata_strings)\n subtitle_inputs.append(\n f'-i \"{os.path.join(self.folder_path, subtitle_file)}\" ')\n break\n\n # Combine the video, audio, and subtitle input options\n ffmpeg_command += ''.join(video_inputs) + \\\n ''.join(audio_inputs) + ''.join(subtitle_inputs)\n # print(subtitle_file)\n\n # Prepare the output file name\n episode_name = metadata.get(\n \"episode\", os.path.basename(self.folder_path))\n release_year = metadata.get(\"release_year\", \"\")\n release_year_suffix = f' ({release_year})' if release_year else ''\n output_file = f'{episode_name.replace(\":\", \" \")} {release_year_suffix}{extension}'\n # Handle the case where the file already exists\n co = 1\n while os.path.exists(os.path.join(self.folder_path, output_file)):\n # Replace spaces with underscores and colons with empty spaces\n episode_name = metadata.get(\n \"episode\", os.path.basename(self.folder_path))\n release_year = metadata.get(\"release_year\", \"\")\n release_year_suffix = f' ({release_year})' if release_year else ''\n\n output_file = f'{episode_name.replace(\":\", \" \")} {release_year_suffix} ({co}){extension}'\n co += 1\n\n # Determine subtitle codec based on video format\n if extension.lower() == '.mkv':\n subtitle_codec = 'srt'\n elif extension.lower() == '.mp4':\n subtitle_codec = 'mov_text'\n else:\n subtitle_codec = 'webvtt'\n\n # Convert the genres to a string with semicolons as separators\n # print(subtitle_codec)\n if \"genre\" in metadata:\n genre_string = ';'.join(metadata[\"genre\"])\n # Rest of your code using genre_string\n else:\n genre_string = \"\"\n\n if thumbnail_file is not None:\n ffmpeg_command += f'-i \"{thumbnail_file}\" '\n\n # Add metadata and subtitle merging options to the ffmpeg command\n ffmpeg_command += (\n f'-c:v copy '\n f'-c:a copy '\n f'-c:s {subtitle_codec} ' # Use the determined subtitle codec\n f'-c:v:1 png '\n )\n ffmpeg_command += ''.join(metadata_strings_array)\n\n if not info_files:\n ffmpeg_command += (\n f'-metadata genre=\"{genre_string}\" '\n f'-metadata handler_name=\"Amazon Prime Video\" '\n f'-metadata encoder=\"FFmpeg\" '\n )\n else:\n ffmpeg_command += (\n f'-metadata title=\"{metadata[\"episode\"]}\" '\n f'-metadata comment=\"{metadata[\"description\"]}\" '\n f'-metadata copyright=\"{metadata[\"extractor_key\"]}\" '\n f'-metadata Artist=\"{metadata[\"extractor_key\"]}\" '\n f'-metadata date=\"{metadata[\"release_year\"]}\" '\n f'-metadata genre=\"{genre_string}\" '\n f'-metadata handler_name=\"Amazon Prime Video\" '\n f'-metadata encoder=\"FFmpeg\" '\n )\n\n ffmpeg_command += (\n # Map all video, audio, and subtitle streams\n f'-map 0:v ' + ' '.join([f'-map {i + 1}:a' for i in range(len(audio_inputs))]) +\n ' ' + ' '.join([f'-map {i + 1 + len(audio_inputs)}:s' for i in range(len(subtitle_inputs))]) +\n ' ' + ' '.join([f'-map { 1 + (len(audio_inputs) + len(subtitle_inputs))}']\n ) if thumbnail_file is not None else ''\n )\n\n ffmpeg_command += (\n ' -disposition:v:1 attached_pic ' # Set subtitle stream as default\n f'\"{os.path.join(self.folder_path, output_file)}\"'\n )\n\n # Run the ffmpeg command\n try:\n # print(ffmpeg_command)\n subprocess.run(ffmpeg_command, shell=True, check=True)\n # Assuming you have access to the close method of your window\n self.close()\n success_message = \"Files Merged successfully.\"\n show_success_message(self, success_message)\n self.info_logger.info(success_message)\n except subprocess.CalledProcessError as e:\n # Handle the error if ffmpeg command fails\n show_error_message(self, f\"Error during merging: {e}\")\n self.debug_logger.debug(f\"Error during merging: {e}\")\n return False, f\"Error during merging: {e}\"\n else:\n show_error_message(\n self, \"Please select at least two files for merging.\")\n self.debug_logger.debug(\n \"Please select at least two files for merging.\")" }, { "identifier": "show_error_message", "path": "helper/message.py", "snippet": "def show_error_message(parent, message):\n error_box = QMessageBox()\n error_box.setIcon(QMessageBox.Critical)\n error_box.setWindowTitle(\"Error\")\n error_box.setText(message)\n error_box.setWindowIcon(parent.windowIcon())\n error_box.exec_()" }, { "identifier": "show_success_message", "path": "helper/message.py", "snippet": "def show_success_message(parent, message):\n success_box = QMessageBox()\n success_box.setIcon(QMessageBox.Information)\n success_box.setWindowTitle(\"Success\")\n success_box.setText(message)\n success_box.setWindowIcon(parent.windowIcon())\n success_box.exec_()" } ]
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QFileDialog, QListWidget from file_merger_dialog import FileMergerDialog from helper.message import show_error_message, show_success_message import os import sqlite3 import subprocess
3,468
class Decrypter(QWidget): def __init__(self, debug_logger, info_logger): super().__init__() self.init_ui() self.conn = None # Database connection self.cursor = None # Database cursor self.debug_logger = debug_logger self.info_logger = info_logger self.create_database() def init_ui(self): layout = QVBoxLayout() # Create a horizontal layout for the "Select Folder" and folder path select_folder_layout = QHBoxLayout() select_folder_label = QLabel("Select Folder:") select_button = QPushButton("Select Folder") select_button.clicked.connect(self.browse_folder) self.folder_path_lineedit = QLineEdit() select_folder_layout.addWidget(select_folder_label) select_folder_layout.addWidget(select_button) select_folder_layout.addWidget(self.folder_path_lineedit) layout.addLayout(select_folder_layout) # Create horizontal layout for buttons (Check Folder, GetKeys, Decrypt) buttons_layout = QHBoxLayout() check_folder_button = QPushButton("Check Folder") check_folder_button.clicked.connect(self.check_folder_existence) buttons_layout.addWidget(check_folder_button) get_keys_button = QPushButton("Get Keys from DB") get_keys_button.clicked.connect(self.get_keys_from_db) buttons_layout.addWidget(get_keys_button) decrypt_button = QPushButton("Decrypt") decrypt_button.clicked.connect(self.decrypt_files) buttons_layout.addWidget(decrypt_button) merge_button = QPushButton("Media Merger") merge_button.clicked.connect(self.merger) buttons_layout.addWidget(merge_button) layout.addLayout(buttons_layout) # Create a QListWidget for displaying search results layout.addWidget(QLabel("Search Results:")) self.search_result_list = QListWidget() layout.addWidget(self.search_result_list) self.setLayout(layout) # Add these methods to handle button clicks def browse_folder(self): folder_path = QFileDialog.getExistingDirectory(self, "Select Folder") if folder_path: self.folder_path_lineedit.setText(folder_path) # self.search_database(folder_path) def check_folder_existence(self): folder_path = self.folder_path_lineedit.text() if os.path.exists(folder_path): show_success_message(self, "Folder exists.") self.info_logger.info("Folder exists.") else:
class Decrypter(QWidget): def __init__(self, debug_logger, info_logger): super().__init__() self.init_ui() self.conn = None # Database connection self.cursor = None # Database cursor self.debug_logger = debug_logger self.info_logger = info_logger self.create_database() def init_ui(self): layout = QVBoxLayout() # Create a horizontal layout for the "Select Folder" and folder path select_folder_layout = QHBoxLayout() select_folder_label = QLabel("Select Folder:") select_button = QPushButton("Select Folder") select_button.clicked.connect(self.browse_folder) self.folder_path_lineedit = QLineEdit() select_folder_layout.addWidget(select_folder_label) select_folder_layout.addWidget(select_button) select_folder_layout.addWidget(self.folder_path_lineedit) layout.addLayout(select_folder_layout) # Create horizontal layout for buttons (Check Folder, GetKeys, Decrypt) buttons_layout = QHBoxLayout() check_folder_button = QPushButton("Check Folder") check_folder_button.clicked.connect(self.check_folder_existence) buttons_layout.addWidget(check_folder_button) get_keys_button = QPushButton("Get Keys from DB") get_keys_button.clicked.connect(self.get_keys_from_db) buttons_layout.addWidget(get_keys_button) decrypt_button = QPushButton("Decrypt") decrypt_button.clicked.connect(self.decrypt_files) buttons_layout.addWidget(decrypt_button) merge_button = QPushButton("Media Merger") merge_button.clicked.connect(self.merger) buttons_layout.addWidget(merge_button) layout.addLayout(buttons_layout) # Create a QListWidget for displaying search results layout.addWidget(QLabel("Search Results:")) self.search_result_list = QListWidget() layout.addWidget(self.search_result_list) self.setLayout(layout) # Add these methods to handle button clicks def browse_folder(self): folder_path = QFileDialog.getExistingDirectory(self, "Select Folder") if folder_path: self.folder_path_lineedit.setText(folder_path) # self.search_database(folder_path) def check_folder_existence(self): folder_path = self.folder_path_lineedit.text() if os.path.exists(folder_path): show_success_message(self, "Folder exists.") self.info_logger.info("Folder exists.") else:
show_error_message(self, "Folder does not exist.")
1
2023-12-18 11:50:40+00:00
4k
gmum/ViewingDirectionGaussianSplatting
scene/gaussian_model.py
[ { "identifier": "inverse_sigmoid", "path": "utils/general_utils.py", "snippet": "def inverse_sigmoid(x):\n return torch.log(x/(1-x))" }, { "identifier": "get_expon_lr_func", "path": "utils/general_utils.py", "snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper" }, { "identifier": "build_rotation", "path": "utils/general_utils.py", "snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n\n R[:, 0, 0] = 1 - 2 * (y*y + z*z)\n R[:, 0, 1] = 2 * (x*y - r*z)\n R[:, 0, 2] = 2 * (x*z + r*y)\n R[:, 1, 0] = 2 * (x*y + r*z)\n R[:, 1, 1] = 1 - 2 * (x*x + z*z)\n R[:, 1, 2] = 2 * (y*z - r*x)\n R[:, 2, 0] = 2 * (x*z - r*y)\n R[:, 2, 1] = 2 * (y*z + r*x)\n R[:, 2, 2] = 1 - 2 * (x*x + y*y)\n return R" }, { "identifier": "mkdir_p", "path": "utils/system_utils.py", "snippet": "def mkdir_p(folder_path):\n # Creates a directory. equivalent to using mkdir -p on the command line\n try:\n makedirs(folder_path)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and path.isdir(folder_path):\n pass\n else:\n raise" }, { "identifier": "RGB2SH", "path": "utils/sh_utils.py", "snippet": "def RGB2SH(rgb):\n return (rgb - 0.5) / C0" }, { "identifier": "BasicPointCloud", "path": "utils/graphics_utils.py", "snippet": "class BasicPointCloud(NamedTuple):\n points : np.array\n colors : np.array\n normals : np.array" }, { "identifier": "strip_symmetric", "path": "utils/general_utils.py", "snippet": "def strip_symmetric(sym):\n return strip_lowerdiag(sym)" }, { "identifier": "build_scaling_rotation", "path": "utils/general_utils.py", "snippet": "def build_scaling_rotation(s, r):\n L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device=\"cuda\")\n R = build_rotation(r)\n\n L[:,0,0] = s[:,0]\n L[:,1,1] = s[:,1]\n L[:,2,2] = s[:,2]\n\n L = R @ L\n return L" }, { "identifier": "MLP", "path": "scene/nerf_model.py", "snippet": "class MLP(nn.Module):\n def __init__(self, input_size, output_size, target = \"\"):\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.target = target\n self.slope = 0.01\n W = 32\n self.main = nn.Sequential(\n nn.Linear(self.input_size, W*2),\n nn.LeakyReLU(self.slope),\n nn.Linear(W*2, W),\n nn.LeakyReLU(self.slope),\n nn.Linear(W, W),\n )\n self.rotation = nn.Sequential(nn.Linear(W, self.output_size), nn.Sigmoid())\n #self.out = nn.Sequential(nn.Linear(W, W), nn.Sigmoid())\n self.alpha = nn.Sequential(nn.Linear(W, 1), nn.Tanh())\n\n def forward(self, x, rotations, scales, y):\n x = x.view(x.size(0), -1) \n x = torch.nn.functional.normalize(x)\n x = torch.concat([x, y, rotations, scales], dim=1)\n x = self.main(x)\n #if self.target == \"rotation\":\n # return self.rotation(x)\n return self.alpha(x)" }, { "identifier": "Embedder", "path": "scene/nerf_model.py", "snippet": "class Embedder:\n def __init__(self):\n self.include_input = False\n self.input_dims = 3\n self.max_freq_log2 = 3\n self.num_freqs = 4\n self.log_sampling = True\n self.periodic_fns = [torch.sin, torch.cos]\n self.create_embedding_fn()\n \n def create_embedding_fn(self):\n embed_fns = []\n out_dim = 0\n if self.include_input:\n embed_fns.append(lambda x : x)\n out_dim += self.input_dims\n \n max_freq = self.max_freq_log2\n N_freqs = self.num_freqs = 4\n \n if self.log_sampling:\n freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)\n else:\n freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)\n \n for freq in freq_bands:\n for p_fn in self.periodic_fns:\n embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))\n out_dim += self.input_dims\n \n self.embed_fns = embed_fns\n self.out_dim = out_dim\n \n def embed(self, inputs):\n return torch.cat([fn(inputs) for fn in self.embed_fns], -1)" } ]
import torch import numpy as np import os from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation from torch import nn from utils.system_utils import mkdir_p from plyfile import PlyData, PlyElement from utils.sh_utils import RGB2SH from simple_knn._C import distCUDA2 from utils.graphics_utils import BasicPointCloud from utils.general_utils import strip_symmetric, build_scaling_rotation from scene.nerf_model import MLP, Embedder
2,609
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): L = build_scaling_rotation(scaling_modifier * scaling, rotation) actual_covariance = L @ L.transpose(1, 2) symm = strip_symmetric(actual_covariance) return symm self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation self.opacity_activation = torch.sigmoid self.inverse_opacity_activation = inverse_sigmoid self.rotation_activation = torch.nn.functional.normalize def __init__(self, sh_degree : int): self.active_sh_degree = 0 self.max_sh_degree = sh_degree self._xyz = torch.empty(0) self._features_dc = torch.empty(0) self._features_rest = torch.empty(0) self._scaling = torch.empty(0) self._rotation = torch.empty(0) self._opacity = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) #self._mlp_r: MLP = None self._mlp: MLP = None self.optimizer = None self.nn_optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self.setup_functions() def capture(self): return ( self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self._mlp.state_dict(), self.mlp_optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) self._mlp.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) @property def get_rotation(self): return self.rotation_activation(self._rotation) @property def get_xyz(self): return self._xyz @property def get_features(self): features_dc = self._features_dc features_rest = self._features_rest return torch.cat((features_dc, features_rest), dim=1) @property def get_opacity(self): return self.opacity_activation(self._opacity) def get_covariance(self, scaling_modifier = 1): return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) def oneupSHdegree(self): if self.active_sh_degree < self.max_sh_degree: self.active_sh_degree += 1
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): L = build_scaling_rotation(scaling_modifier * scaling, rotation) actual_covariance = L @ L.transpose(1, 2) symm = strip_symmetric(actual_covariance) return symm self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation self.opacity_activation = torch.sigmoid self.inverse_opacity_activation = inverse_sigmoid self.rotation_activation = torch.nn.functional.normalize def __init__(self, sh_degree : int): self.active_sh_degree = 0 self.max_sh_degree = sh_degree self._xyz = torch.empty(0) self._features_dc = torch.empty(0) self._features_rest = torch.empty(0) self._scaling = torch.empty(0) self._rotation = torch.empty(0) self._opacity = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) #self._mlp_r: MLP = None self._mlp: MLP = None self.optimizer = None self.nn_optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self.setup_functions() def capture(self): return ( self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self._mlp.state_dict(), self.mlp_optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) self._mlp.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) @property def get_rotation(self): return self.rotation_activation(self._rotation) @property def get_xyz(self): return self._xyz @property def get_features(self): features_dc = self._features_dc features_rest = self._features_rest return torch.cat((features_dc, features_rest), dim=1) @property def get_opacity(self): return self.opacity_activation(self._opacity) def get_covariance(self, scaling_modifier = 1): return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) def oneupSHdegree(self): if self.active_sh_degree < self.max_sh_degree: self.active_sh_degree += 1
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
5
2023-12-21 10:09:17+00:00
4k
tonnetonne814/PL-Bert-VITS2
models.py
[ { "identifier": "get_padding", "path": "commons.py", "snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)" }, { "identifier": "init_weights", "path": "commons.py", "snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)" } ]
import copy import math import torch import attentions import commons import modules import monotonic_align from torch import nn from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm from commons import get_padding, init_weights
2,274
): super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.n_flows = n_flows self.gin_channels = gin_channels self.flows = nn.ModuleList() for i in range(n_flows): self.flows.append( modules.ResidualCouplingLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) def forward(self, x, x_mask, g=None, reverse=False): if not reverse: for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) else: for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse) return x class PosteriorEncoder(nn.Module): def __init__( self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.pre = nn.Conv1d(in_channels, hidden_channels, 1) self.enc = modules.WN( hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( x.dtype ) x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask m, logs = torch.split(stats, self.out_channels, dim=1) z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask class Generator(torch.nn.Module): def __init__( self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0, ): super(Generator, self).__init__() self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) self.conv_pre = Conv1d( initial_channel, upsample_initial_channel, 7, 1, padding=3 ) resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): self.ups.append( weight_norm( ConvTranspose1d( upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2, ) ) ) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = upsample_initial_channel // (2 ** (i + 1)) for j, (k, d) in enumerate( zip(resblock_kernel_sizes, resblock_dilation_sizes) ): self.resblocks.append(resblock(ch, k, d)) self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
AVAILABLE_FLOW_TYPES = [ "pre_conv", "pre_conv2", "fft", "mono_layer_inter_residual", "mono_layer_post_residual", ] AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [ "dur_disc_1", "dur_disc_2", ] class StochasticDurationPredictor(nn.Module): def __init__( self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0, ): super().__init__() filter_channels = in_channels # it needs to be removed from future version. self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.n_flows = n_flows self.gin_channels = gin_channels self.log_flow = modules.Log() self.flows = nn.ModuleList() self.flows.append(modules.ElementwiseAffine(2)) for i in range(n_flows): self.flows.append( modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) ) self.flows.append(modules.Flip()) self.post_pre = nn.Conv1d(1, filter_channels, 1) self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) self.post_convs = modules.DDSConv( filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout ) self.post_flows = nn.ModuleList() self.post_flows.append(modules.ElementwiseAffine(2)) for i in range(4): self.post_flows.append( modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) ) self.post_flows.append(modules.Flip()) self.pre = nn.Conv1d(in_channels, filter_channels, 1) self.proj = nn.Conv1d(filter_channels, filter_channels, 1) self.convs = modules.DDSConv( filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout ) if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, filter_channels, 1) def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): x = torch.detach(x) x = self.pre(x) if g is not None: g = torch.detach(g) x = x + self.cond(g) x = self.convs(x, x_mask) x = self.proj(x) * x_mask if not reverse: flows = self.flows assert w is not None logdet_tot_q = 0 h_w = self.post_pre(w) h_w = self.post_convs(h_w, x_mask) h_w = self.post_proj(h_w) * x_mask e_q = ( torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask ) z_q = e_q for flow in self.post_flows: z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) logdet_tot_q += logdet_q z_u, z1 = torch.split(z_q, [1, 1], 1) u = torch.sigmoid(z_u) * x_mask z0 = (w - u) * x_mask logdet_tot_q += torch.sum( (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2] ) logq = ( torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2]) - logdet_tot_q ) logdet_tot = 0 z0, logdet = self.log_flow(z0, x_mask) logdet_tot += logdet z = torch.cat([z0, z1], 1) for flow in flows: z, logdet = flow(z, x_mask, g=x, reverse=reverse) logdet_tot = logdet_tot + logdet nll = ( torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2]) - logdet_tot ) return nll + logq # [b] else: flows = list(reversed(self.flows)) flows = flows[:-2] + [flows[-1]] # remove a useless vflow z = ( torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale ) for flow in flows: z = flow(z, x_mask, g=x, reverse=reverse) z0, z1 = torch.split(z, [1, 1], 1) logw = z0 return logw class DurationPredictor(nn.Module): def __init__( self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 ): super().__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.gin_channels = gin_channels self.drop = nn.Dropout(p_dropout) self.conv_1 = nn.Conv1d( in_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.norm_1 = modules.LayerNorm(filter_channels) self.conv_2 = nn.Conv1d( filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.norm_2 = modules.LayerNorm(filter_channels) self.proj = nn.Conv1d(filter_channels, 1, 1) if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, in_channels, 1) def forward(self, x, x_mask, g=None): x = torch.detach(x) if g is not None: g = torch.detach(g) x = x + self.cond(g) x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.norm_1(x) x = self.drop(x) x = self.conv_2(x * x_mask) x = torch.relu(x) x = self.norm_2(x) x = self.drop(x) x = self.proj(x * x_mask) return x * x_mask class DurationDiscriminatorV1(nn.Module): # vits2 # TODO : not using "spk conditioning" for now according to the paper. # Can be a better discriminator if we use it. def __init__( self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 ): super().__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.gin_channels = gin_channels self.drop = nn.Dropout(p_dropout) self.conv_1 = nn.Conv1d( in_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) # self.norm_1 = modules.LayerNorm(filter_channels) self.conv_2 = nn.Conv1d( filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) # self.norm_2 = modules.LayerNorm(filter_channels) self.dur_proj = nn.Conv1d(1, filter_channels, 1) self.pre_out_conv_1 = nn.Conv1d( 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.pre_out_norm_1 = modules.LayerNorm(filter_channels) self.pre_out_conv_2 = nn.Conv1d( filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.pre_out_norm_2 = modules.LayerNorm(filter_channels) # if gin_channels != 0: # self.cond = nn.Conv1d(gin_channels, in_channels, 1) self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid()) def forward_probability(self, x, x_mask, dur, g=None): dur = self.dur_proj(dur) x = torch.cat([x, dur], dim=1) x = self.pre_out_conv_1(x * x_mask) # x = torch.relu(x) # x = self.pre_out_norm_1(x) # x = self.drop(x) x = self.pre_out_conv_2(x * x_mask) # x = torch.relu(x) # x = self.pre_out_norm_2(x) # x = self.drop(x) x = x * x_mask x = x.transpose(1, 2) output_prob = self.output_layer(x) return output_prob def forward(self, x, x_mask, dur_r, dur_hat, g=None): x = torch.detach(x) # if g is not None: # g = torch.detach(g) # x = x + self.cond(g) x = self.conv_1(x * x_mask) # x = torch.relu(x) # x = self.norm_1(x) # x = self.drop(x) x = self.conv_2(x * x_mask) # x = torch.relu(x) # x = self.norm_2(x) # x = self.drop(x) output_probs = [] for dur in [dur_r, dur_hat]: output_prob = self.forward_probability(x, x_mask, dur, g) output_probs.append(output_prob) return output_probs class DurationDiscriminatorV2(nn.Module): # vits2 # TODO : not using "spk conditioning" for now according to the paper. # Can be a better discriminator if we use it. def __init__( self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 ): super().__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.gin_channels = gin_channels self.conv_1 = nn.Conv1d( in_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.norm_1 = modules.LayerNorm(filter_channels) self.conv_2 = nn.Conv1d( filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.norm_2 = modules.LayerNorm(filter_channels) self.dur_proj = nn.Conv1d(1, filter_channels, 1) self.pre_out_conv_1 = nn.Conv1d( 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.pre_out_norm_1 = modules.LayerNorm(filter_channels) self.pre_out_conv_2 = nn.Conv1d( filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.pre_out_norm_2 = modules.LayerNorm(filter_channels) # if gin_channels != 0: # self.cond = nn.Conv1d(gin_channels, in_channels, 1) self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid()) def forward_probability(self, x, x_mask, dur, g=None): dur = self.dur_proj(dur) x = torch.cat([x, dur], dim=1) x = self.pre_out_conv_1(x * x_mask) x = torch.relu(x) x = self.pre_out_norm_1(x) x = self.pre_out_conv_2(x * x_mask) x = torch.relu(x) x = self.pre_out_norm_2(x) x = x * x_mask x = x.transpose(1, 2) output_prob = self.output_layer(x) return output_prob def forward(self, x, x_mask, dur_r, dur_hat, g=None): x = torch.detach(x) # if g is not None: # g = torch.detach(g) # x = x + self.cond(g) x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.norm_1(x) x = self.conv_2(x * x_mask) x = torch.relu(x) x = self.norm_2(x) output_probs = [] for dur in [dur_r, dur_hat]: output_prob = self.forward_probability(x, x_mask, dur, g) output_probs.append([output_prob]) return output_probs class TextEncoder(nn.Module): def __init__( self, n_vocab, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, bert_emb_size = 768, gin_channels=0, ): super().__init__() self.n_vocab = n_vocab self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size self.p_dropout = p_dropout self.gin_channels = gin_channels self.emb = nn.Embedding(n_vocab, hidden_channels) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) self.encoder = attentions.Encoder( hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, gin_channels=self.gin_channels, ) self.bert_proj = nn.Conv1d(bert_emb_size, hidden_channels, 1) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, bert, bert_lengths, g=None): x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] x = torch.transpose(x, 1, -1) # [b, h, t] x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( x.dtype ) bert = self.bert_proj(bert.permute(0,2,1)) x = x + bert x = self.encoder(x * x_mask, x_mask, g=g) stats = self.proj(x) * x_mask m, logs = torch.split(stats, self.out_channels, dim=1) return x, m, logs, x_mask class ResidualCouplingTransformersLayer2(nn.Module): # vits2 def __init__( self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=0, mean_only=False, ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.half_channels = channels // 2 self.mean_only = mean_only self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) self.pre_transformer = attentions.Encoder( hidden_channels, hidden_channels, n_heads=2, n_layers=1, kernel_size=kernel_size, p_dropout=p_dropout, # window_size=None, ) self.enc = modules.WN( hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels, ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() def forward(self, x, x_mask, g=None, reverse=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) * x_mask h = h + self.pre_transformer(h * x_mask, x_mask) # vits2 residual connection h = self.enc(h, x_mask, g=g) stats = self.post(h) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) if not reverse: x1 = m + x1 * torch.exp(logs) * x_mask x = torch.cat([x0, x1], 1) logdet = torch.sum(logs, [1, 2]) return x, logdet else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) return x class ResidualCouplingTransformersLayer(nn.Module): # vits2 def __init__( self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=0, mean_only=False, ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.half_channels = channels // 2 self.mean_only = mean_only # vits2 self.pre_transformer = attentions.Encoder( self.half_channels, self.half_channels, n_heads=2, n_layers=2, kernel_size=3, p_dropout=0.1, window_size=None, ) self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) self.enc = modules.WN( hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels, ) # vits2 self.post_transformer = attentions.Encoder( self.hidden_channels, self.hidden_channels, n_heads=2, n_layers=2, kernel_size=3, p_dropout=0.1, window_size=None, ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() def forward(self, x, x_mask, g=None, reverse=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) x0_ = self.pre_transformer(x0 * x_mask, x_mask) # vits2 x0_ = x0_ + x0 # vits2 residual connection h = self.pre(x0_) * x_mask # changed from x0 to x0_ to retain x0 for the flow h = self.enc(h, x_mask, g=g) # vits2 - (experimental;uncomment the following 2 line to use) # h_ = self.post_transformer(h, x_mask) # h = h + h_ #vits2 residual connection stats = self.post(h) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) if not reverse: x1 = m + x1 * torch.exp(logs) * x_mask x = torch.cat([x0, x1], 1) logdet = torch.sum(logs, [1, 2]) return x, logdet else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) return x class FFTransformerCouplingLayer(nn.Module): # vits2 def __init__( self, channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout=0, filter_channels=768, mean_only=False, gin_channels=0, ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.n_layers = n_layers self.half_channels = channels // 2 self.mean_only = mean_only self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) self.enc = attentions.FFT( hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow=True, gin_channels=gin_channels, ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() def forward(self, x, x_mask, g=None, reverse=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) * x_mask h_ = self.enc(h, x_mask, g=g) h = h_ + h stats = self.post(h) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) if not reverse: x1 = m + x1 * torch.exp(logs) * x_mask x = torch.cat([x0, x1], 1) logdet = torch.sum(logs, [1, 2]) return x, logdet else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) return x class MonoTransformerFlowLayer(nn.Module): # vits2 def __init__( self, channels, hidden_channels, mean_only=False, residual_connection=False, # according to VITS-2 paper fig 1B set residual_connection=True ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.half_channels = channels // 2 self.mean_only = mean_only self.residual_connection = residual_connection # vits2 self.pre_transformer = attentions.Encoder( self.half_channels, self.half_channels, n_heads=2, n_layers=2, kernel_size=3, p_dropout=0.1, window_size=None, ) self.post = nn.Conv1d( self.half_channels, self.half_channels * (2 - mean_only), 1 ) self.post.weight.data.zero_() self.post.bias.data.zero_() def forward(self, x, x_mask, g=None, reverse=False): if self.residual_connection: if not reverse: x0, x1 = torch.split(x, [self.half_channels] * 2, 1) x0_ = self.pre_transformer(x0, x_mask) # vits2 stats = self.post(x0_) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) x1 = m + x1 * torch.exp(logs) * x_mask x_ = torch.cat([x0, x1], 1) x = x + x_ logdet = torch.sum(torch.log(torch.exp(logs) + 1), [1, 2]) logdet = logdet + torch.log(torch.tensor(2)) * ( x0.shape[1] * x0.shape[2] ) return x, logdet else: x0, x1 = torch.split(x, [self.half_channels] * 2, 1) x0 = x0 / 2 x0_ = x0 * x_mask x0_ = self.pre_transformer(x0, x_mask) # vits2 stats = self.post(x0_) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) x1_ = ((x1 - m) / (1 + torch.exp(-logs))) * x_mask x = torch.cat([x0, x1_], 1) return x else: x0, x1 = torch.split(x, [self.half_channels] * 2, 1) x0_ = self.pre_transformer(x0 * x_mask, x_mask) # vits2 h = x0_ + x0 # vits2 stats = self.post(h) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) if not reverse: x1 = m + x1 * torch.exp(logs) * x_mask x = torch.cat([x0, x1], 1) logdet = torch.sum(logs, [1, 2]) return x, logdet else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) return x class ResidualCouplingTransformersBlock(nn.Module): # vits2 def __init__( self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0, use_transformer_flows=False, transformer_flow_type="pre_conv", ): super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.n_flows = n_flows self.gin_channels = gin_channels self.flows = nn.ModuleList() if use_transformer_flows: if transformer_flow_type == "pre_conv": for i in range(n_flows): self.flows.append( ResidualCouplingTransformersLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) elif transformer_flow_type == "pre_conv2": for i in range(n_flows): self.flows.append( ResidualCouplingTransformersLayer2( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) elif transformer_flow_type == "fft": for i in range(n_flows): self.flows.append( FFTransformerCouplingLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) elif transformer_flow_type == "mono_layer_inter_residual": for i in range(n_flows): self.flows.append( modules.ResidualCouplingLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) self.flows.append( MonoTransformerFlowLayer( channels, hidden_channels, mean_only=True ) ) elif transformer_flow_type == "mono_layer_post_residual": for i in range(n_flows): self.flows.append( modules.ResidualCouplingLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) self.flows.append( MonoTransformerFlowLayer( channels, hidden_channels, mean_only=True, residual_connection=True, ) ) else: for i in range(n_flows): self.flows.append( modules.ResidualCouplingLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) def forward(self, x, x_mask, g=None, reverse=False): if not reverse: for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) else: for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse) return x class ResidualCouplingBlock(nn.Module): def __init__( self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0, ): super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.n_flows = n_flows self.gin_channels = gin_channels self.flows = nn.ModuleList() for i in range(n_flows): self.flows.append( modules.ResidualCouplingLayer( channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True, ) ) self.flows.append(modules.Flip()) def forward(self, x, x_mask, g=None, reverse=False): if not reverse: for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) else: for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse) return x class PosteriorEncoder(nn.Module): def __init__( self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.pre = nn.Conv1d(in_channels, hidden_channels, 1) self.enc = modules.WN( hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( x.dtype ) x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask m, logs = torch.split(stats, self.out_channels, dim=1) z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask class Generator(torch.nn.Module): def __init__( self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0, ): super(Generator, self).__init__() self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) self.conv_pre = Conv1d( initial_channel, upsample_initial_channel, 7, 1, padding=3 ) resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): self.ups.append( weight_norm( ConvTranspose1d( upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2, ) ) ) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = upsample_initial_channel // (2 ** (i + 1)) for j, (k, d) in enumerate( zip(resblock_kernel_sizes, resblock_dilation_sizes) ): self.resblocks.append(resblock(ch, k, d)) self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
self.ups.apply(init_weights)
1
2023-12-16 05:34:02+00:00
4k
Ruiyuan-Zhang/CCS
multi_part_assembly/models/modules/encoder/point_transformer/model.py
[ { "identifier": "PointNetFeaturePropagation", "path": "multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py", "snippet": "class PointNetFeaturePropagation(nn.Module):\n def __init__(self, in_channel, mlp):\n super(PointNetFeaturePropagation, self).__init__()\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm1d(out_channel))\n last_channel = out_channel\n\n def forward(self, xyz1, xyz2, points1, points2):\n \"\"\"\n Input:\n xyz1: input points position data, [B, C, N]\n xyz2: sampled input points position data, [B, C, S]\n points1: input points data, [B, D, N]\n points2: input points data, [B, D, S]\n Return:\n new_points: upsampled points data, [B, D', N]\n \"\"\"\n xyz1 = xyz1.permute(0, 2, 1)\n xyz2 = xyz2.permute(0, 2, 1)\n\n points2 = points2.permute(0, 2, 1)\n B, N, C = xyz1.shape\n _, S, _ = xyz2.shape\n\n if S == 1:\n interpolated_points = points2.repeat(1, N, 1)\n else:\n dists = square_distance(xyz1, xyz2)\n dists, idx = dists.sort(dim=-1)\n dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]\n\n dist_recip = 1.0 / (dists + 1e-8)\n norm = torch.sum(dist_recip, dim=2, keepdim=True)\n weight = dist_recip / norm\n interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)\n\n if points1 is not None:\n points1 = points1.permute(0, 2, 1)\n new_points = torch.cat([points1, interpolated_points], dim=-1)\n else:\n new_points = interpolated_points\n\n new_points = new_points.permute(0, 2, 1)\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n return new_points" }, { "identifier": "PointNetSetAbstraction", "path": "multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py", "snippet": "class PointNetSetAbstraction(nn.Module):\n def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all, knn=False):\n super(PointNetSetAbstraction, self).__init__()\n self.npoint = npoint\n self.radius = radius\n self.nsample = nsample\n self.knn = knn\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n self.group_all = group_all\n\n def forward(self, xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, N, C]\n points: input points data, [B, N, C]\n Return:\n new_xyz: sampled points position data, [B, S, C]\n new_points_concat: sample points feature data, [B, S, D']\n \"\"\"\n if self.group_all:\n new_xyz, new_points = sample_and_group_all(xyz, points)\n else:\n new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points, knn=self.knn)\n # new_xyz: sampled points position data, [B, npoint, C]\n # new_points: sampled points data, [B, npoint, nsample, C+D]\n new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n\n new_points = torch.max(new_points, 2)[0].transpose(1, 2)\n return new_xyz, new_points" }, { "identifier": "TransformerBlock", "path": "multi_part_assembly/models/modules/encoder/point_transformer/transformer.py", "snippet": "class TransformerBlock(nn.Module):\n def __init__(self, d_points, d_model, k) -> None:\n super().__init__()\n self.fc1 = nn.Linear(d_points, d_model)\n self.fc2 = nn.Linear(d_model, d_points)\n self.fc_delta = nn.Sequential(\n nn.Linear(3, d_model),\n nn.ReLU(),\n nn.Linear(d_model, d_model)\n )\n self.fc_gamma = nn.Sequential(\n nn.Linear(d_model, d_model),\n nn.ReLU(),\n nn.Linear(d_model, d_model)\n )\n self.w_qs = nn.Linear(d_model, d_model, bias=False)\n self.w_ks = nn.Linear(d_model, d_model, bias=False)\n self.w_vs = nn.Linear(d_model, d_model, bias=False)\n self.k = k\n \n # xyz: b x n x 3, features: b x n x f\n def forward(self, xyz, features):\n dists = square_distance(xyz, xyz)\n knn_idx = dists.argsort()[:, :, :self.k] # b x n x k\n knn_xyz = index_points(xyz, knn_idx)\n \n pre = features\n x = self.fc1(features)\n q, k, v = self.w_qs(x), index_points(self.w_ks(x), knn_idx), index_points(self.w_vs(x), knn_idx)\n\n pos_enc = self.fc_delta(xyz[:, :, None] - knn_xyz) # b x n x k x f\n \n attn = self.fc_gamma(q[:, :, None] - k + pos_enc)\n attn = F.softmax(attn / np.sqrt(k.size(-1)), dim=-2) # b x n x k x f\n \n res = torch.einsum('bmnf,bmnf->bmf', attn, v + pos_enc)\n res = self.fc2(res) + pre\n return res, attn" } ]
import torch import torch.nn as nn from multi_part_assembly.models.modules.encoder.point_transformer.pointnet_util import PointNetFeaturePropagation, PointNetSetAbstraction from .transformer import TransformerBlock
2,117
class TransitionDown(nn.Module): def __init__(self, k, nneighbor, channels): super().__init__() # The objective of PointNetSetAbstraction is to downsample and aggregate the input point cloud dataset, generating more advanced feature representations. self.sa = PointNetSetAbstraction(k, 0, nneighbor, channels[0], channels[1:], group_all=False, knn=True) def forward(self, xyz, points): return self.sa(xyz, points) class TransitionUp(nn.Module): def __init__(self, dim1, dim2, dim_out): class SwapAxes(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.transpose(1, 2) super().__init__() self.fc1 = nn.Sequential( nn.Linear(dim1, dim_out), SwapAxes(), nn.BatchNorm1d(dim_out), # TODO SwapAxes(), nn.ReLU(), ) self.fc2 = nn.Sequential( nn.Linear(dim2, dim_out), SwapAxes(), nn.BatchNorm1d(dim_out), # TODO SwapAxes(), nn.ReLU(), ) self.fp = PointNetFeaturePropagation(-1, []) def forward(self, xyz1, points1, xyz2, points2): feats1 = self.fc1(points1) feats2 = self.fc2(points2) feats1 = self.fp(xyz2.transpose(1, 2), xyz1.transpose(1, 2), None, feats1.transpose(1, 2)).transpose(1, 2) return feats1 + feats2 class Backbone(nn.Module): def __init__(self, cfg): super().__init__() npoints, nblocks, nneighbor, n_c, d_points = cfg.num_point, cfg.model.nblocks, cfg.model.nneighbor, cfg.num_class, cfg.input_dim self.fc1 = nn.Sequential( nn.Linear(d_points, 32), nn.ReLU(), nn.Linear(32, 32) )
class TransitionDown(nn.Module): def __init__(self, k, nneighbor, channels): super().__init__() # The objective of PointNetSetAbstraction is to downsample and aggregate the input point cloud dataset, generating more advanced feature representations. self.sa = PointNetSetAbstraction(k, 0, nneighbor, channels[0], channels[1:], group_all=False, knn=True) def forward(self, xyz, points): return self.sa(xyz, points) class TransitionUp(nn.Module): def __init__(self, dim1, dim2, dim_out): class SwapAxes(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.transpose(1, 2) super().__init__() self.fc1 = nn.Sequential( nn.Linear(dim1, dim_out), SwapAxes(), nn.BatchNorm1d(dim_out), # TODO SwapAxes(), nn.ReLU(), ) self.fc2 = nn.Sequential( nn.Linear(dim2, dim_out), SwapAxes(), nn.BatchNorm1d(dim_out), # TODO SwapAxes(), nn.ReLU(), ) self.fp = PointNetFeaturePropagation(-1, []) def forward(self, xyz1, points1, xyz2, points2): feats1 = self.fc1(points1) feats2 = self.fc2(points2) feats1 = self.fp(xyz2.transpose(1, 2), xyz1.transpose(1, 2), None, feats1.transpose(1, 2)).transpose(1, 2) return feats1 + feats2 class Backbone(nn.Module): def __init__(self, cfg): super().__init__() npoints, nblocks, nneighbor, n_c, d_points = cfg.num_point, cfg.model.nblocks, cfg.model.nneighbor, cfg.num_class, cfg.input_dim self.fc1 = nn.Sequential( nn.Linear(d_points, 32), nn.ReLU(), nn.Linear(32, 32) )
self.transformer1 = TransformerBlock(32, cfg.model.transformer_dim, nneighbor)
2
2023-12-15 13:13:01+00:00
4k
uc-vision/taichi-splatting
taichi_splatting/renderer.py
[ { "identifier": "check_packed3d", "path": "taichi_splatting/data_types.py", "snippet": "def check_packed3d(packed_gaussians: torch.Tensor):\n assert len(packed_gaussians.shape) == 2 and packed_gaussians.shape[1] == 11, f\"Expected shape (N, 11), got {packed_gaussians.shape}\" " }, { "identifier": "compute_depth_variance", "path": "taichi_splatting/misc/depth_variance.py", "snippet": "@beartype\ndef compute_depth_variance(features:torch.Tensor, alpha:torch.Tensor):\n \"\"\" \n Compute depth and depth variance from image features.\n \n Parameters:\n features: torch.Tensor (N, 3 + C) - image features\n alpha: torch.Tensor (N, 1) - alpha values\n \"\"\"\n\n _module_function = depth_variance_func(features.dtype)\n return _module_function.apply(features.contiguous(), alpha.contiguous())" }, { "identifier": "encode_depth", "path": "taichi_splatting/misc/encode_depth.py", "snippet": "def encode_depth(depths:torch.Tensor, \n depth_range:Tuple[float, float], \n use_depth16:bool=False ) -> torch.Tensor:\n \n if use_depth16:\n return encode_depth16(depths, depth_range)\n else:\n return depths[:, 0].contiguous().view(torch.int32)" }, { "identifier": "RasterConfig", "path": "taichi_splatting/rasterizer/forward.py", "snippet": "def forward_kernel(config: RasterConfig, feature_size: int):\n def _forward_kernel(\n points: ti.types.ndarray(Gaussian2D.vec, ndim=1), # (M, 6)\n point_features: ti.types.ndarray(feature_vec, ndim=1), # (M, F)\n \n # (TH, TW, 2) the start/end (0..K] index of ranges in the overlap_to_point array\n tile_overlap_ranges: ti.types.ndarray(ti.math.ivec2, ndim=1),\n # (K) ranges of points mapping to indexes into points list\n overlap_to_point: ti.types.ndarray(ti.i32, ndim=1),\n \n # outputs\n image_feature: ti.types.ndarray(feature_vec, ndim=2), # (H, W, F)\n # needed for backward\n image_alpha: ti.types.ndarray(ti.f32, ndim=2), # H, W\n image_last_valid: ti.types.ndarray(ti.i32, ndim=2), # H, W\n ):" }, { "identifier": "rasterize", "path": "taichi_splatting/rasterizer/function.py", "snippet": "def rasterize(gaussians2d:torch.Tensor, encoded_depths:torch.Tensor, \n features:torch.Tensor, image_size:Tuple[Integral, Integral],\n config:RasterConfig):\n \n \n \"\"\"\n Rasterize an image given 2d gaussians, features. \n\n Parameters:\n gaussians2d: (N, 6) packed gaussians, N is the number of gaussians\n encoded_depths: (N ) encoded depths, N is the number of gaussians\n features: (N, F) features, F is the number of features\n\n image_size: (2, ) tuple of ints, (width, height)\n config: Config - configuration parameters for rasterization\n\n Returns:\n image: (H, W, F) torch tensor, where H, W are the image height and width, F is the number of features\n alpha: (H, W) torch tensor, where H, W are the image height and width\n \"\"\"\n\n # render with padding to tile_size, later crop back to original size\n overlap_to_point, tile_overlap_ranges = map_to_tiles(gaussians2d, encoded_depths, \n image_size=image_size, config=config)\n \n image, alpha = rasterize_with_tiles(gaussians2d, features, \n tile_overlap_ranges=tile_overlap_ranges.view(-1, 2), overlap_to_point=overlap_to_point,\n image_size=image_size, config=config)\n\n return image, alpha " }, { "identifier": "evaluate_sh_at", "path": "taichi_splatting/spherical_harmonics.py", "snippet": "@beartype\ndef evaluate_sh_at(params:torch.Tensor, # N, K (degree + 1)^2, (usually K=3, for RGB)\n gaussians:torch.Tensor, # N, 11 or N, 3 (packed gaussian or xyz)\n camera_pos:torch.Tensor # 3\n ) -> torch.Tensor: # N, K\n degree = check_sh_degree(params)\n\n _module_function = sh_function(degree=degree, \n dimension=params.shape[1], \n input_size=gaussians.shape[1],\n dtype=params.dtype)\n return _module_function.apply(params.contiguous(), gaussians.contiguous(), camera_pos.contiguous())" }, { "identifier": "frustum_culling", "path": "taichi_splatting/perspective/culling.py", "snippet": "@beartype\ndef frustum_culling(gaussians: torch.Tensor, camera_params: CameraParams, margin_pixels: int = 48):\n mask = torch.empty(gaussians.shape[0], dtype=torch.bool, device=gaussians.device)\n\n frustum_culling_kernel(\n gaussians=gaussians.contiguous(),\n T_image_world=camera_params.T_image_world.unsqueeze(0),\n output_mask=mask,\n\n near_plane=camera_params.near_plane,\n far_plane=camera_params.far_plane,\n image_size=ti.math.ivec2(camera_params.image_size),\n \n margin_pixels=margin_pixels\n )\n\n return mask" }, { "identifier": "project_to_image", "path": "taichi_splatting/perspective/projection.py", "snippet": "@beartype\ndef project_to_image(gaussians:torch.Tensor, camera_params: CameraParams\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\" \n Project 3D gaussians to 2D gaussians in image space using perspective projection.\n Use EWA approximation for the projection of the gaussian covariance,\n as described in Zwicker, et al. \"EWA splatting.\" 2003.\n \n Parameters:\n gaussians: torch.Tensor (N, 11) - packed 3D gaussians\n camera_params: CameraParams\n\n Returns:\n points: torch.Tensor (N, 6) - packed 2D gaussians in image space\n depth_var: torch.Tensor (N, 3) - depth, depth variance and depth^2 of gaussians\n \"\"\"\n\n return apply(\n gaussians, \n camera_params.T_image_camera, \n camera_params.T_camera_world,\n )" }, { "identifier": "CameraParams", "path": "taichi_splatting/perspective/params.py", "snippet": "class CameraParams:\n T_image_camera: torch.Tensor # (3, 3) camera projection matrix\n T_camera_world : torch.Tensor # (4, 4) camera view matrix\n\n @property\n def device(self):\n return self.T_image_camera.device\n\n @property\n def T_image_world(self):\n T_image_camera = torch.eye(4, \n device=self.T_image_camera.device, dtype=self.T_image_camera.dtype)\n T_image_camera[0:3, 0:3] = self.T_image_camera\n\n return T_image_camera @ self.T_camera_world\n\n near_plane: float\n far_plane: float\n image_size: Tuple[Integral, Integral]\n\n def __repr__(self):\n w, h = self.image_size\n fx, fy = self.T_image_camera[0, 0], self.T_image_camera[1, 1]\n cx, cy = self.T_image_camera[0, 2], self.T_image_camera[1, 2]\n\n pos_str = \", \".join([f\"{x:.3f}\" for x in self.camera_position])\n return f\"CameraParams({w}x{h}, fx={fx:.4f}, fy={fy:.4f}, cx={cx:.4f}, cy={cy:.4f}, clipping={self.near_plane:.4f}-{self.far_plane:.4f}, position=({pos_str})\"\n \n\n @property\n def camera_position(self):\n T_world_camera = torch.inverse(self.T_camera_world)\n return T_world_camera[0:3, 3]\n\n def to(self, device=None, dtype=None):\n return CameraParams(\n T_image_camera=self.T_image_camera.to(device=device, dtype=dtype),\n T_camera_world=self.T_camera_world.to(device=device, dtype=dtype),\n near_plane=self.near_plane,\n far_plane=self.far_plane,\n image_size=self.image_size\n )\n\n def __post_init__(self):\n assert self.T_image_camera.shape == (3, 3), f\"Expected shape (3, 3), got {self.T_image_camera.shape}\"\n assert self.T_camera_world.shape == (4, 4), f\"Expected shape (4, 4), got {self.T_camera_world.shape}\"\n\n assert len(self.image_size) == 2\n assert self.near_plane > 0\n assert self.far_plane > self.near_plane" } ]
from dataclasses import dataclass from typing import Optional from taichi_splatting.data_types import check_packed3d from taichi_splatting.misc.depth_variance import compute_depth_variance from taichi_splatting.misc.encode_depth import encode_depth from taichi_splatting.rasterizer import rasterize, RasterConfig from taichi_splatting.spherical_harmonics import evaluate_sh_at from taichi_splatting.perspective import ( frustum_culling, project_to_image, CameraParams) import torch
2,295
@dataclass class Rendering: image: torch.Tensor # (H, W, C) depth: Optional[torch.Tensor] = None # (H, W) depth_var: Optional[torch.Tensor] = None # (H, W) def render_gaussians( packed_gaussians: torch.Tensor, features: torch.Tensor,
@dataclass class Rendering: image: torch.Tensor # (H, W, C) depth: Optional[torch.Tensor] = None # (H, W) depth_var: Optional[torch.Tensor] = None # (H, W) def render_gaussians( packed_gaussians: torch.Tensor, features: torch.Tensor,
camera_params: CameraParams,
8
2023-12-17 15:26:52+00:00
4k
smoores-dev/storyteller
storyteller/synchronize/sync.py
[ { "identifier": "CACHE_DIR", "path": "storyteller/synchronize/files.py", "snippet": "CACHE_DIR = f\"{DATA_DIR}/cache\"" }, { "identifier": "TEXT_DIR", "path": "storyteller/synchronize/files.py", "snippet": "TEXT_DIR = f\"{DATA_DIR}/assets/text\"" }, { "identifier": "get_audio_chapter_filenames", "path": "storyteller/synchronize/audio.py", "snippet": "def get_audio_chapter_filenames(book_name: str):\n book_dir = get_audio_directory(book_name)\n dirname = get_chapters_path(book_dir)\n return sorted([str(Path(dirname, filename)) for filename in os.listdir(dirname)])" }, { "identifier": "get_transcriptions", "path": "storyteller/synchronize/audio.py", "snippet": "def get_transcriptions(book_name: str):\n audio_chapter_filenames = get_audio_chapter_filenames(book_name)\n transcription_filenames = [\n get_transcription_filename(chapter_filename)\n for chapter_filename in audio_chapter_filenames\n ]\n transcriptions: List[whisperx.types.AlignedTranscriptionResult] = []\n\n for transcription_filename in transcription_filenames:\n with open(transcription_filename, mode=\"r\") as transcription_file:\n transcription = json.load(transcription_file)\n transcriptions.append(transcription)\n\n return transcriptions" }, { "identifier": "SentenceRange", "path": "storyteller/synchronize/epub.py", "snippet": "class SentenceRange:\n id: int\n start: float\n end: float\n audiofile: str" }, { "identifier": "create_media_overlay", "path": "storyteller/synchronize/epub.py", "snippet": "def create_media_overlay(\n base_filename: str,\n chapter_filename: str,\n sentence_ranges: List[SentenceRange],\n):\n soup = BeautifulSoup(\n \"\"\"\n<smil xmlns=\"http://www.w3.org/ns/SMIL\" xmlns:epub=\"http://www.idpf.org/2007/ops\" version=\"3.0\">\n <body>\n </body>\n</smil>\n\"\"\",\n \"xml\",\n )\n\n seq = soup.new_tag(\"seq\", id=f\"{base_filename}_overlay\")\n seq[\"epub:textref\"] = f\"../{chapter_filename}\"\n seq[\"epub:type\"] = \"chapter\"\n soup.body.append(seq) # type: ignore\n for sentence_range in sentence_ranges:\n par = soup.new_tag(\"par\", id=f\"sentence{sentence_range.id}\")\n text = soup.new_tag(\n \"text\", src=f\"../{chapter_filename}#sentence{sentence_range.id}\"\n )\n audio = soup.new_tag(\n \"audio\",\n src=f\"../{get_epub_audio_filename(sentence_range.audiofile)}\",\n clipBegin=f\"{sentence_range.start}s\",\n clipEnd=f\"{sentence_range.end}s\",\n )\n par.append(text)\n par.append(\"\\n\")\n par.append(audio)\n par.append(\"\\n\")\n seq.append(par)\n seq.append(\"\\n\")\n return soup.encode(formatter=\"minimal\")" }, { "identifier": "get_chapter_sentences", "path": "storyteller/synchronize/epub.py", "snippet": "@cache\ndef get_chapter_sentences(chapter: epub.EpubHtml):\n soup = BeautifulSoup(chapter.get_body_content(), \"html.parser\")\n textblocks = soup.find_all(\n [\"p\", \"li\", \"blockquote\", \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n )\n\n return [\n re.sub(consecutivenewlines, \" \", sentence)\n for textblock in textblocks\n if isinstance(textblock, Tag)\n for sentence in sent_tokenize(textblock.get_text())\n ]" }, { "identifier": "get_chapter_text", "path": "storyteller/synchronize/epub.py", "snippet": "@cache\ndef get_chapter_text(chapter: epub.EpubHtml):\n soup = BeautifulSoup(chapter.get_body_content(), \"html.parser\")\n return re.sub(consecutivenewlines, \" \", soup.get_text())" }, { "identifier": "get_epub_audio_filename", "path": "storyteller/synchronize/epub.py", "snippet": "def get_epub_audio_filename(audio_filename: str) -> str:\n return f\"Audio/{os.path.basename(audio_filename)}\"" }, { "identifier": "get_sentences_with_offsets", "path": "storyteller/synchronize/epub.py", "snippet": "def get_sentences_with_offsets(text: str):\n sentences = sent_tokenize(text)\n sentences_with_offsets: list[str] = []\n last_sentence_end = 0\n for sentence in sentences:\n sentence_start = text.find(sentence, last_sentence_end)\n if sentence_start > last_sentence_end:\n sentences_with_offsets.append(text[last_sentence_end:sentence_start])\n\n sentences_with_offsets.append(sentence)\n last_sentence_end = sentence_start + len(sentence)\n\n if len(text) > last_sentence_end:\n sentences_with_offsets.append(text[last_sentence_end:])\n\n return sentences_with_offsets" }, { "identifier": "read_epub", "path": "storyteller/synchronize/epub.py", "snippet": "def read_epub(book_name: str):\n book = epub.read_epub(get_epub_filepath(book_name))\n for item in book.get_items_of_type(ITEM_DOCUMENT):\n if not item.is_chapter():\n continue\n soup = BeautifulSoup(item.content)\n\n head: Union[Tag, None] = soup.find(\"head\") # type: ignore\n if head is not None:\n links = head.find_all(\"link\")\n for link in links:\n item.add_link(\n href=link[\"href\"], rel=\" \".join(link[\"rel\"]), type=link[\"type\"]\n )\n return book" }, { "identifier": "get_chapters", "path": "storyteller/synchronize/epub.py", "snippet": "def get_chapters(book: epub.EpubBook) -> List[epub.EpubHtml]:\n spine_ids = [item[0] for item in book.spine]\n chapters = [cast(epub.EpubHtml, book.get_item_with_id(id)) for id in spine_ids]\n return chapters" }, { "identifier": "tag_sentences", "path": "storyteller/synchronize/epub.py", "snippet": "def tag_sentences(chapter: epub.EpubHtml):\n content = cast(str, chapter.get_content())\n soup = BeautifulSoup(content, \"html.parser\")\n body_soup = soup.find(\"body\")\n if body_soup is None:\n return\n if isinstance(body_soup, NavigableString):\n return\n textblocks = body_soup.find_all(\n [\"p\", \"li\", \"blockquote\", \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n )\n start_id = 0\n for textblock in textblocks:\n if not isinstance(textblock, Tag):\n continue\n\n spans = get_textblock_spans(start_id, textblock)\n new_content = serialize_spans(soup, spans)\n textblock.clear()\n textblock.extend(new_content)\n\n try:\n start_id = get_last_span_id(spans) + 1\n except StopIteration:\n pass\n\n chapter.set_content(soup.encode())" } ]
from dataclasses import dataclass from itertools import groupby from pathlib import Path from typing import Any, Callable, Dict, List, TypedDict, Union, cast from fuzzysearch import Match, find_near_matches from ebooklib import epub from mutagen.mp4 import MP4 from mutagen.mp3 import MP3 from .files import CACHE_DIR, TEXT_DIR from .audio import ( get_audio_chapter_filenames, get_transcriptions, ) from .epub import ( SentenceRange, create_media_overlay, get_chapter_sentences, get_chapter_text, get_epub_audio_filename, get_sentences_with_offsets, read_epub, get_chapters, tag_sentences, ) import json import math import os import sys import whisperx.types
2,619
OFFSET_SEARCH_WINDOW_SIZE = 5000 def find_best_offset( epub_sentences: list[str], transcription_text: str, last_match_offset: int ): i = 0 while i < len(transcription_text): start_sentence = 0 start_index = (last_match_offset + i) % len(transcription_text) end_index = (start_index + OFFSET_SEARCH_WINDOW_SIZE) % len(transcription_text) if end_index > start_index: transcription_text_slice = transcription_text[start_index:end_index] else: transcription_text_slice = ( transcription_text[start_index:] + transcription_text[:end_index] ) while start_sentence < len(epub_sentences): query_string = " ".join(epub_sentences[start_sentence : start_sentence + 6]) with NullIO(): matches = find_near_matches( query_string.lower(), transcription_text_slice.lower(), max_l_dist=math.floor(0.1 * len(query_string)), ) matches = cast(List[Match], matches) if len(matches) > 0: return (start_sentence, matches[0].start + start_index) start_sentence += 3 i += OFFSET_SEARCH_WINDOW_SIZE // 2 return (0, None) class StorytellerTranscriptionSegment(whisperx.types.SingleAlignedSegment): audiofile: str class StorytellerTranscription(TypedDict): segments: List[StorytellerTranscriptionSegment] word_segments: List[whisperx.types.SingleWordSegment] def concat_transcriptions( transcriptions: List[whisperx.types.AlignedTranscriptionResult], audiofiles: List[str], ): result = StorytellerTranscription(segments=[], word_segments=[]) for transcription, audiofile in zip(transcriptions, audiofiles): result["word_segments"].extend(transcription["word_segments"]) result["segments"].extend( [ StorytellerTranscriptionSegment(**segment, audiofile=audiofile) for segment in transcription["segments"] ] ) return result def get_transcription_text(transcription: StorytellerTranscription): return " ".join([segment["text"] for segment in transcription["segments"]]) def find_timestamps(match_start_index: int, transcription: StorytellerTranscription): s = 0 position = 0 while True: while position + len(transcription["segments"][s]["text"]) < match_start_index: # type: ignore position += len(transcription["segments"][s]["text"]) + 1 # type: ignore s += 1 w = 0 segment = transcription["segments"][s] while ( w < len(segment["words"]) and position + len(segment["words"][w]["word"]) <= match_start_index ): position += len(segment["words"][w]["word"]) + 1 w += 1 if w >= len(segment["words"]): s += 1 continue break start_word = segment["words"][w] # If a segment only has one word, the start and # end timestamps are only placed on the segment if "start" in start_word: return start_word["start"], segment["audiofile"] return segment["start"], segment["audiofile"] def get_window_index_from_offset(window: List[str], offset: int): index = 0 while offset >= len(window[index]): offset -= len(window[index]) index += 1 return index def get_sentence_ranges( start_sentence: int, transcription: StorytellerTranscription, sentences: List[str], chapter_offset: int, last_sentence_range: Union[SentenceRange, None], ): sentence_ranges: List[SentenceRange] = [] transcription_text = get_transcription_text(transcription).lower()[chapter_offset:]
class NullIO: def __enter__(self): self._original_stdout = sys.stdout self._original_stderr = sys.stderr sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout sys.stderr.close() sys.stderr = self._original_stderr OFFSET_SEARCH_WINDOW_SIZE = 5000 def find_best_offset( epub_sentences: list[str], transcription_text: str, last_match_offset: int ): i = 0 while i < len(transcription_text): start_sentence = 0 start_index = (last_match_offset + i) % len(transcription_text) end_index = (start_index + OFFSET_SEARCH_WINDOW_SIZE) % len(transcription_text) if end_index > start_index: transcription_text_slice = transcription_text[start_index:end_index] else: transcription_text_slice = ( transcription_text[start_index:] + transcription_text[:end_index] ) while start_sentence < len(epub_sentences): query_string = " ".join(epub_sentences[start_sentence : start_sentence + 6]) with NullIO(): matches = find_near_matches( query_string.lower(), transcription_text_slice.lower(), max_l_dist=math.floor(0.1 * len(query_string)), ) matches = cast(List[Match], matches) if len(matches) > 0: return (start_sentence, matches[0].start + start_index) start_sentence += 3 i += OFFSET_SEARCH_WINDOW_SIZE // 2 return (0, None) class StorytellerTranscriptionSegment(whisperx.types.SingleAlignedSegment): audiofile: str class StorytellerTranscription(TypedDict): segments: List[StorytellerTranscriptionSegment] word_segments: List[whisperx.types.SingleWordSegment] def concat_transcriptions( transcriptions: List[whisperx.types.AlignedTranscriptionResult], audiofiles: List[str], ): result = StorytellerTranscription(segments=[], word_segments=[]) for transcription, audiofile in zip(transcriptions, audiofiles): result["word_segments"].extend(transcription["word_segments"]) result["segments"].extend( [ StorytellerTranscriptionSegment(**segment, audiofile=audiofile) for segment in transcription["segments"] ] ) return result def get_transcription_text(transcription: StorytellerTranscription): return " ".join([segment["text"] for segment in transcription["segments"]]) def find_timestamps(match_start_index: int, transcription: StorytellerTranscription): s = 0 position = 0 while True: while position + len(transcription["segments"][s]["text"]) < match_start_index: # type: ignore position += len(transcription["segments"][s]["text"]) + 1 # type: ignore s += 1 w = 0 segment = transcription["segments"][s] while ( w < len(segment["words"]) and position + len(segment["words"][w]["word"]) <= match_start_index ): position += len(segment["words"][w]["word"]) + 1 w += 1 if w >= len(segment["words"]): s += 1 continue break start_word = segment["words"][w] # If a segment only has one word, the start and # end timestamps are only placed on the segment if "start" in start_word: return start_word["start"], segment["audiofile"] return segment["start"], segment["audiofile"] def get_window_index_from_offset(window: List[str], offset: int): index = 0 while offset >= len(window[index]): offset -= len(window[index]) index += 1 return index def get_sentence_ranges( start_sentence: int, transcription: StorytellerTranscription, sentences: List[str], chapter_offset: int, last_sentence_range: Union[SentenceRange, None], ): sentence_ranges: List[SentenceRange] = [] transcription_text = get_transcription_text(transcription).lower()[chapter_offset:]
transcription_sentences = get_sentences_with_offsets(transcription_text)
9
2023-12-15 16:07:12+00:00
4k
zyrant/SPGroup3D
tools/train.py
[ { "identifier": "__version__", "path": "mmdet3d/version.py", "snippet": "def parse_version_info(version_str):" }, { "identifier": "__version__", "path": "mmdet3d/version.py", "snippet": "def parse_version_info(version_str):" }, { "identifier": "init_random_seed", "path": "mmdet3d/apis/train.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n Args:\n seed (int, optional): The seed. Default to None.\n device (str, optional): The device where the seed will be put on.\n Default to 'cuda'.\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2**31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "train_model", "path": "mmdet3d/apis/train.py", "snippet": "def train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"A function wrapper for launching model training according to cfg.\n\n Because we need different eval_hook in runner. Should be deprecated in the\n future.\n \"\"\"\n if cfg.model.type in ['EncoderDecoder3D']:\n train_segmentor(\n model,\n dataset,\n cfg,\n distributed=distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta)\n else:\n train_detector(\n model,\n dataset,\n cfg,\n distributed=distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta)" }, { "identifier": "build_dataset", "path": "mmdet3d/datasets/builder.py", "snippet": "def build_dataset(cfg, default_args=None):\n from mmdet3d.datasets.dataset_wrappers import CBGSDataset\n from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset,\n ConcatDataset, RepeatDataset)\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'ConcatDataset':\n dataset = ConcatDataset(\n [build_dataset(c, default_args) for c in cfg['datasets']],\n cfg.get('separate_eval', True))\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif cfg['type'] == 'ClassBalancedDataset':\n dataset = ClassBalancedDataset(\n build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])\n elif cfg['type'] == 'CBGSDataset':\n dataset = CBGSDataset(build_dataset(cfg['dataset'], default_args))\n elif isinstance(cfg.get('ann_file'), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n elif cfg['type'] in DATASETS._module_dict.keys():\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n else:\n dataset = build_from_cfg(cfg, MMDET_DATASETS, default_args)\n return dataset" }, { "identifier": "build_model", "path": "mmdet3d/models/builder.py", "snippet": "def build_model(cfg, train_cfg=None, test_cfg=None):\n \"\"\"A function warpper for building 3D detector or segmentor according to\n cfg.\n\n Should be deprecated in the future.\n \"\"\"\n if cfg.type in ['EncoderDecoder3D']:\n return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg)\n else:\n return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg)" }, { "identifier": "collect_env", "path": "mmdet3d/utils/collect_env.py", "snippet": "def collect_env():\n \"\"\"Collect the information of the running environments.\"\"\"\n env_info = collect_base_env()\n env_info['MMDetection'] = mmdet.__version__\n env_info['MMSegmentation'] = mmseg.__version__\n env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7]\n env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE\n return env_info" }, { "identifier": "get_root_logger", "path": "mmdet3d/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='mmdet3d'):\n \"\"\"Get root logger and add a keyword filter to it.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., \"mmdet3d\".\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str, optional): The name of the root logger, also used as a\n filter keyword. Defaults to 'mmdet3d'.\n\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n\n # add a logging filter\n logging_filter = logging.Filter(name)\n logging_filter.filter = lambda record: record.find(name) != -1\n\n return logger" } ]
import os import argparse import copy import time import warnings import mmcv import torch import torch.distributed as dist import importlib from os import path as osp from mmcv import Config, DictAction from mmcv.runner import get_dist_info, init_dist from mmdet import __version__ as mmdet_version from mmdet3d import __version__ as mmdet3d_version from mmdet3d.apis import init_random_seed, train_model from mmdet3d.datasets import build_dataset from mmdet3d.models import build_model from mmdet3d.utils import collect_env, get_root_logger from mmdet.apis import set_random_seed from mmseg import __version__ as mmseg_version from mmdet.utils import setup_multi_processes from mmdet3d.utils import setup_multi_processes
3,355
if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both specified, ' '--options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from plguin/xx, registry will be updated if hasattr(cfg, "plugin"): if cfg.plugin: if hasattr(cfg, "plugin_dir"): plugin_dir = cfg.plugin_dir _module_dir = os.path.dirname(plugin_dir) _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) else: # import dir is the dirpath for the config file _module_dir = os.path.dirname(args.config) _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.auto_resume: cfg.auto_resume = args.auto_resume warnings.warn('`--auto-resume` is only supported when mmdet' 'version >= 2.20.0 for 3D detection model or' 'mmsegmentation verision >= 0.21.0 for 3D' 'segmentation model') if args.gpus is not None: cfg.gpu_ids = range(1) warnings.warn('`--gpus` is deprecated because we only support ' 'single GPU mode in non-distributed training. ' 'Use `gpus=1` now.') if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' 'Because we only support single GPU mode in ' 'non-distributed training. Use the first GPU ' 'in `gpu_ids` now.') if args.gpus is None and args.gpu_ids is None: cfg.gpu_ids = [args.gpu_id] if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') # specify logger name, if we still use 'mmdet', the output info will be # filtered and won't be saved in the log_file # TODO: ugly workaround to judge whether we are training det or seg model if cfg.model.type in ['EncoderDecoder3D']: logger_name = 'mmseg' else: logger_name = 'mmdet' logger = get_root_logger( log_file=log_file, log_level=cfg.log_level, name=logger_name) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info
# Copyright (c) OpenMMLab. All rights reserved. from __future__ import division try: # If mmdet version > 2.20.0, setup_multi_processes would be imported and # used from mmdet instead of mmdet3d. except ImportError: def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--resume-from', help='the checkpoint file to resume from') parser.add_argument( '--auto-resume', action='store_true', help='resume from the latest checkpoint automatically') parser.add_argument( '--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( '--gpus', type=int, help='(Deprecated, please use --gpu-id) number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-id', type=int, default=0, help='number of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument( '--diff-seed', action='store_true', help='Whether or not set different seeds for different ranks') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument( '--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus') args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both specified, ' '--options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from plguin/xx, registry will be updated if hasattr(cfg, "plugin"): if cfg.plugin: if hasattr(cfg, "plugin_dir"): plugin_dir = cfg.plugin_dir _module_dir = os.path.dirname(plugin_dir) _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) else: # import dir is the dirpath for the config file _module_dir = os.path.dirname(args.config) _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.auto_resume: cfg.auto_resume = args.auto_resume warnings.warn('`--auto-resume` is only supported when mmdet' 'version >= 2.20.0 for 3D detection model or' 'mmsegmentation verision >= 0.21.0 for 3D' 'segmentation model') if args.gpus is not None: cfg.gpu_ids = range(1) warnings.warn('`--gpus` is deprecated because we only support ' 'single GPU mode in non-distributed training. ' 'Use `gpus=1` now.') if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' 'Because we only support single GPU mode in ' 'non-distributed training. Use the first GPU ' 'in `gpu_ids` now.') if args.gpus is None and args.gpu_ids is None: cfg.gpu_ids = [args.gpu_id] if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') # specify logger name, if we still use 'mmdet', the output info will be # filtered and won't be saved in the log_file # TODO: ugly workaround to judge whether we are training det or seg model if cfg.model.type in ['EncoderDecoder3D']: logger_name = 'mmseg' else: logger_name = 'mmdet' logger = get_root_logger( log_file=log_file, log_level=cfg.log_level, name=logger_name) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info
env_info_dict = collect_env()
6
2023-12-21 12:50:35+00:00
4k
jdejaegh/irm-kmi-ha
tests/conftest.py
[ { "identifier": "IrmKmiApiError", "path": "custom_components/irm_kmi/api.py", "snippet": "class IrmKmiApiError(Exception):\n \"\"\"Exception to indicate a general API error.\"\"\"" }, { "identifier": "IrmKmiApiParametersError", "path": "custom_components/irm_kmi/api.py", "snippet": "class IrmKmiApiParametersError(IrmKmiApiError):\n \"\"\"Exception to indicate a parameter error.\"\"\"" }, { "identifier": "CONF_DARK_MODE", "path": "custom_components/irm_kmi/const.py", "snippet": "CONF_DARK_MODE: Final = \"dark_mode\"" }, { "identifier": "CONF_STYLE", "path": "custom_components/irm_kmi/const.py", "snippet": "CONF_STYLE: Final = \"style\"" }, { "identifier": "CONF_USE_DEPRECATED_FORECAST", "path": "custom_components/irm_kmi/const.py", "snippet": "CONF_USE_DEPRECATED_FORECAST: Final = 'use_deprecated_forecast_attribute'" }, { "identifier": "DOMAIN", "path": "custom_components/irm_kmi/const.py", "snippet": "DOMAIN: Final = 'irm_kmi'" }, { "identifier": "OPTION_DEPRECATED_FORECAST_NOT_USED", "path": "custom_components/irm_kmi/const.py", "snippet": "OPTION_DEPRECATED_FORECAST_NOT_USED: Final = 'do_not_use_deprecated_forecast'" }, { "identifier": "OPTION_STYLE_STD", "path": "custom_components/irm_kmi/const.py", "snippet": "OPTION_STYLE_STD: Final = 'standard_style'" } ]
import json import pytest from collections.abc import Generator from unittest.mock import MagicMock, patch from homeassistant.const import CONF_ZONE from pytest_homeassistant_custom_component.common import (MockConfigEntry, load_fixture) from custom_components.irm_kmi.api import (IrmKmiApiError, IrmKmiApiParametersError) from custom_components.irm_kmi.const import ( CONF_DARK_MODE, CONF_STYLE, CONF_USE_DEPRECATED_FORECAST, DOMAIN, OPTION_DEPRECATED_FORECAST_NOT_USED, OPTION_STYLE_STD)
1,613
@pytest.fixture def mock_config_entry() -> MockConfigEntry: """Return the default mocked config entry.""" return MockConfigEntry( title="Home", domain=DOMAIN, data={CONF_ZONE: "zone.home", CONF_STYLE: OPTION_STYLE_STD, CONF_DARK_MODE: True, CONF_USE_DEPRECATED_FORECAST: OPTION_DEPRECATED_FORECAST_NOT_USED}, unique_id="zone.home", ) @pytest.fixture def mock_setup_entry() -> Generator[None, None, None]: """Mock setting up a config entry.""" with patch( "custom_components.irm_kmi.async_setup_entry", return_value=True ): yield @pytest.fixture def mock_get_forecast_in_benelux(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something valid and in the Benelux""" with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord", return_value={'cityName': 'Brussels'}): yield @pytest.fixture def mock_get_forecast_out_benelux(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something outside Benelux""" with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord", return_value={'cityName': "Outside the Benelux (Brussels)"}): yield @pytest.fixture def mock_get_forecast_api_error(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error""" with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord", side_effet=IrmKmiApiError): return @pytest.fixture def mock_get_forecast_api_error_repair(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error""" with patch("custom_components.irm_kmi.repairs.IrmKmiApiClient.get_forecasts_coord", side_effet=IrmKmiApiError): return @pytest.fixture() def mock_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_irm_kmi_api_coordinator_out_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast_out_of_benelux.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_irm_kmi_api_repair_in_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_irm_kmi_api_repair_out_of_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast_out_of_benelux.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_exception_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" with patch( "custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value
"""Fixtures for the IRM KMI integration tests.""" from __future__ import annotations def get_api_data(fixture: str) -> dict: return json.loads(load_fixture(fixture)) async def patched(url: str, params: dict | None = None) -> bytes: if "cdn.knmi.nl" in url: file_name = "tests/fixtures/clouds_nl.png" elif "app.meteo.be/services/appv4/?s=getIncaImage" in url: file_name = "tests/fixtures/clouds_be.png" elif "getLocalizationLayerBE" in url: file_name = "tests/fixtures/loc_layer_be_n.png" elif "getLocalizationLayerNL" in url: file_name = "tests/fixtures/loc_layer_nl.png" else: raise ValueError("Not a valid parameter for the mock") with open(file_name, "rb") as file: return file.read() @pytest.fixture(autouse=True) def auto_enable_custom_integrations(enable_custom_integrations): yield @pytest.fixture def mock_config_entry() -> MockConfigEntry: """Return the default mocked config entry.""" return MockConfigEntry( title="Home", domain=DOMAIN, data={CONF_ZONE: "zone.home", CONF_STYLE: OPTION_STYLE_STD, CONF_DARK_MODE: True, CONF_USE_DEPRECATED_FORECAST: OPTION_DEPRECATED_FORECAST_NOT_USED}, unique_id="zone.home", ) @pytest.fixture def mock_setup_entry() -> Generator[None, None, None]: """Mock setting up a config entry.""" with patch( "custom_components.irm_kmi.async_setup_entry", return_value=True ): yield @pytest.fixture def mock_get_forecast_in_benelux(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something valid and in the Benelux""" with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord", return_value={'cityName': 'Brussels'}): yield @pytest.fixture def mock_get_forecast_out_benelux(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something outside Benelux""" with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord", return_value={'cityName': "Outside the Benelux (Brussels)"}): yield @pytest.fixture def mock_get_forecast_api_error(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error""" with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord", side_effet=IrmKmiApiError): return @pytest.fixture def mock_get_forecast_api_error_repair(): """Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error""" with patch("custom_components.irm_kmi.repairs.IrmKmiApiClient.get_forecasts_coord", side_effet=IrmKmiApiError): return @pytest.fixture() def mock_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_irm_kmi_api_coordinator_out_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast_out_of_benelux.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_irm_kmi_api_repair_in_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_irm_kmi_api_repair_out_of_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" fixture: str = "forecast_out_of_benelux.json" forecast = json.loads(load_fixture(fixture)) with patch( "custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value irm_kmi.get_forecasts_coord.return_value = forecast yield irm_kmi @pytest.fixture() def mock_exception_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]: """Return a mocked IrmKmi api client.""" with patch( "custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True ) as irm_kmi_api_mock: irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.side_effect = IrmKmiApiParametersError
1
2023-12-17 16:35:01+00:00
4k
v3ucn/Bert-vits2-V2.2
compress_model.py
[ { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" }, { "identifier": "logger", "path": "tools/log.py", "snippet": "" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n self.n_speakers,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, loss_commit = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n g,\n loss_commit,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, _ = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" } ]
from collections import OrderedDict from text.symbols import symbols from tools.log import logger from models import SynthesizerTrn import torch import utils import os import argparse import os.path
3,177
def copyStateDict(state_dict): if list(state_dict.keys())[0].startswith("module"): start_idx = 1 else: start_idx = 0 new_state_dict = OrderedDict() for k, v in state_dict.items(): name = ",".join(k.split(".")[start_idx:]) new_state_dict[name] = v return new_state_dict def removeOptimizer(config: str, input_model: str, ishalf: bool, output_model: str): hps = utils.get_hparams_from_file(config) net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ) optim_g = torch.optim.AdamW( net_g.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) state_dict_g = torch.load(input_model, map_location="cpu") new_dict_g = copyStateDict(state_dict_g) keys = [] for k, v in new_dict_g["model"].items(): if "enc_q" in k: continue # noqa: E701 keys.append(k) new_dict_g = ( {k: new_dict_g["model"][k].half() for k in keys} if ishalf else {k: new_dict_g["model"][k] for k in keys} ) torch.save( { "model": new_dict_g, "iteration": 0, "optimizer": optim_g.state_dict(), "learning_rate": 0.0001, }, output_model, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-c", "--config", type=str, default="configs/config.json") parser.add_argument("-i", "--input", type=str) parser.add_argument("-o", "--output", type=str, default=None) parser.add_argument( "-hf", "--half", action="store_true", default=False, help="Save as FP16" ) args = parser.parse_args() output = args.output if output is None: filename, ext = os.path.splitext(args.input) half = "_half" if args.half else "" output = filename + "_release" + half + ext removeOptimizer(args.config, args.input, args.half, output)
def copyStateDict(state_dict): if list(state_dict.keys())[0].startswith("module"): start_idx = 1 else: start_idx = 0 new_state_dict = OrderedDict() for k, v in state_dict.items(): name = ",".join(k.split(".")[start_idx:]) new_state_dict[name] = v return new_state_dict def removeOptimizer(config: str, input_model: str, ishalf: bool, output_model: str): hps = utils.get_hparams_from_file(config) net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ) optim_g = torch.optim.AdamW( net_g.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) state_dict_g = torch.load(input_model, map_location="cpu") new_dict_g = copyStateDict(state_dict_g) keys = [] for k, v in new_dict_g["model"].items(): if "enc_q" in k: continue # noqa: E701 keys.append(k) new_dict_g = ( {k: new_dict_g["model"][k].half() for k in keys} if ishalf else {k: new_dict_g["model"][k] for k in keys} ) torch.save( { "model": new_dict_g, "iteration": 0, "optimizer": optim_g.state_dict(), "learning_rate": 0.0001, }, output_model, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-c", "--config", type=str, default="configs/config.json") parser.add_argument("-i", "--input", type=str) parser.add_argument("-o", "--output", type=str, default=None) parser.add_argument( "-hf", "--half", action="store_true", default=False, help="Save as FP16" ) args = parser.parse_args() output = args.output if output is None: filename, ext = os.path.splitext(args.input) half = "_half" if args.half else "" output = filename + "_release" + half + ext removeOptimizer(args.config, args.input, args.half, output)
logger.info(f"压缩模型成功, 输出模型: {os.path.abspath(output)}")
1
2023-12-18 04:54:46+00:00
4k
mjunaidca/travel-ai-service
backend/app/service/openai_travel_agent_call.py
[ { "identifier": "TravelAIChat", "path": "backend/app/utils/chat_assistant.py", "snippet": "class TravelAIChat():\n def __init__(self, client: OpenAI, assistant: Assistant, thread: Thread):\n if (client is None):\n raise Exception(\"OpenAI Client is not initialized\")\n self.client = client\n self.assistant: Assistant | None = assistant\n self.thread: Thread | None = thread\n\n def modifyAssistant(self, new_instructions: str, tools: list, file_obj: list[str], model: str = \"gpt-4-1106-preview\") -> Assistant:\n \"\"\"Update an existing assistant.\"\"\"\n print(\"Updating edisting assistant...\")\n if self.assistant is None:\n raise ValueError(\"Assistant is not set!\")\n self.assistant = self.client.beta.assistants.update(\n assistant_id=self.assistant.id,\n instructions=new_instructions,\n tools=tools,\n model=model,\n file_ids=file_obj\n )\n return self.assistant\n\n def add_message_to_thread(self, role: Literal['user'], content: str, file_obj_ids: list[str] = ['']) -> None:\n if self.thread is None:\n raise ValueError(\"Thread is not set!\")\n\n self.client.beta.threads.messages.create(\n thread_id=self.thread.id,\n role=role,\n content=content,\n file_ids=file_obj_ids\n )\n\n def run_assistant(self) -> Run:\n\n if self.assistant is None:\n raise ValueError(\n \"Assistant is not set. Cannot run assistant without an assistant.\")\n\n if self.thread is None:\n raise ValueError(\n \"Thread is not set!\")\n\n self.run: Run = self.client.beta.threads.runs.create(\n thread_id=self.thread.id,\n assistant_id=self.assistant.id,\n )\n return self.run\n\n # Polling\n def wait_for_completion(self, run: Run):\n\n if run is None:\n raise ValueError(\"Run is not set!\")\n\n if self.thread is None:\n raise ValueError(\n \"Thread is not set!\")\n\n # while run.status in [\"in_progress\", \"queued\"]:\n while run.status not in [\"completed\", \"failed\"]:\n run_status = self.client.beta.threads.runs.retrieve(\n thread_id=self.thread.id,\n run_id=run.id\n )\n time.sleep(3) # Wait for 3 seconds before checking again\n print(f\"Status: {run_status.status}\")\n\n if run_status.status == 'completed':\n print(\"Run completed.\")\n return self.client.beta.threads.messages.list(thread_id=self.thread.id)\n elif run_status.status == 'requires_action' and run_status.required_action is not None:\n print(f\"Function Calling ...\")\n toolCalls = run_status.required_action.submit_tool_outputs.model_dump()\n self.call_required_functions(\n toolCalls=toolCalls,\n thread_id=self.thread.id,\n run_id=run.id\n )\n elif run.status == \"failed\":\n print(\"Run failed.\")\n break\n else:\n print(f\"Waiting for the Assistant to process...: {run.status}\")\n\n # Function to call the required functions\n def call_required_functions(self, toolCalls, thread_id: str, run_id: str):\n\n tool_outputs: list[ToolOutput] = []\n\n # for toolcall in toolCalls:\n for toolcall in toolCalls[\"tool_calls\"]:\n function_name = toolcall['function']['name']\n function_args = json.loads(toolcall['function']['arguments'])\n\n if function_name in available_functions:\n\n # Displaying the message with values\n print(f\"calling function {function_name} with args:\")\n for key, value in function_args.items():\n print(f\"{key}: {value}\")\n\n if function_name in available_functions:\n function_to_call: Callable[...,\n dict] = available_functions[function_name]\n print(\"function_to_call >>>>>\", function_to_call)\n output = function_to_call(**function_args)\n\n print(\"Output Status\", output)\n\n tool_outputs.append({\n \"tool_call_id\": toolcall['id'],\n \"output\": output['status'] if 'status' in output else output,\n })\n\n else:\n raise ValueError(f\"Unknown function: {function_name}\")\n\n print('submit_tool_outputs >>>>>', tool_outputs,)\n # Submit tool outputs and update the run\n self.client.beta.threads.runs.submit_tool_outputs(\n thread_id=thread_id,\n run_id=run_id,\n tool_outputs=tool_outputs\n )" }, { "identifier": "GetAssistant", "path": "backend/app/utils/get_assistant.py", "snippet": "class GetAssistant():\n def __init__(self, client: OpenAI):\n if client is None:\n raise Exception(\"OpenAI Client is not initialized\")\n self.client = client\n print(\"OpenAI Client initialized successfully.\")\n\n def retrieve_assistant(self, assistant_id: str) -> Assistant:\n \"\"\"Retrieve an Assistant using the ID stored in the env variables.\n If the assistant is not found, create a new one.\"\"\"\n print(f\"Attempting to retrieve Assistant with ID: {assistant_id}\")\n\n if assistant_id is None or assistant_id == '':\n print(\"No valid Assistant ID provided. Creating a new Assistant.\")\n travel_agent = self.create_assistant()\n return travel_agent\n\n try:\n print(f\"Retrieving existing Assistant with ID: {assistant_id}\")\n ret_travel_agent: Assistant = self.client.beta.assistants.retrieve(\n assistant_id=assistant_id\n )\n print(\"Assistant retrieved successfully.\")\n return ret_travel_agent\n\n except Exception as e:\n print(f\"\"\"Error retrieving Assistant: {\n e}. Creating a new Assistant.\"\"\")\n travel_agent = self.create_assistant()\n return travel_agent\n\n def create_assistant(self) -> Assistant:\n \"\"\"Create an Assistant Once and Store its ID in the env variables.\n Next retrieve the assistant and use it. You can modify it.\"\"\"\n print(\"Creating a new Assistant...\")\n\n travel_agent: Assistant = self.client.beta.assistants.create(\n model=\"gpt-4-1106-preview\",\n name=\"AI Travel Agent\",\n instructions=SEED_INSTRUCTION,\n tools=travel_agent_tools\n )\n\n print(\"New Assistant created successfully.\")\n return travel_agent" }, { "identifier": "CreateThread", "path": "backend/app/utils/thread_manager.py", "snippet": "class CreateThread():\n def __init__(self, client: OpenAI):\n if client is None:\n raise Exception(\"OpenAI Client is not initialized\")\n self.client = client\n print(\"CreateThread initialized with OpenAI client.\")\n\n def create_thread(self, purpose: str = 'assistants') -> Thread:\n \"\"\"Create a Thread.\"\"\"\n print(f\"Creating a new Thread for purpose: '{purpose}'...\")\n thread: Thread = self.client.beta.threads.create()\n print(f\"New Thread created. Thread ID: {thread.id}\")\n return thread" } ]
from ..utils.chat_assistant import TravelAIChat from ..utils.get_assistant import GetAssistant from ..utils.thread_manager import CreateThread from openai.types.beta.threads import ThreadMessage, Run from openai.types.beta.thread import Thread from openai.types.beta.assistant import Assistant from openai import OpenAI from dotenv import load_dotenv, find_dotenv import os
1,803
_: bool = load_dotenv(find_dotenv()) # read local .env file client: OpenAI = OpenAI() # TODO: If Assistant is present in env no need to retrive & verify it. TRAVEL_ASSISTANT_ID = os.environ.get("TRAVEL_ASSISTANT_ID") # Initialize Travel Assistant Class
_: bool = load_dotenv(find_dotenv()) # read local .env file client: OpenAI = OpenAI() # TODO: If Assistant is present in env no need to retrive & verify it. TRAVEL_ASSISTANT_ID = os.environ.get("TRAVEL_ASSISTANT_ID") # Initialize Travel Assistant Class
travel_agent_call: GetAssistant = GetAssistant(
1
2023-12-17 05:57:21+00:00
4k