diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC Fixed.md b/spaces/1gistliPinn/ChatGPT4/Examples/Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC Fixed.md deleted file mode 100644 index d912acc0515313a8d394bd62a2587793fc6d6083..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC Fixed.md +++ /dev/null @@ -1,6 +0,0 @@ -

Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC


Download File ⚙⚙⚙ https://imgfil.com/2uxXmw



- -Hotel Transylvania 3: license Download Full Movie Torrent Mavis is surprised ... 6 Pro Lisans Kod License Key Sisteminizde parçalanan dosyaları birleştirmekle görevlidir. what does iDRAC stand for. ... 50 and above 14G PowerEdge Servers: iDRAC 9 with Firmware version 3. ... Ableton Live Suite 10. 4d29de3e1b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download AutoCAD Map 3D 2005 Crack TOP.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download AutoCAD Map 3D 2005 Crack TOP.md deleted file mode 100644 index 338d0a94c03255fa8dca5b278c03821560a4f23d..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download AutoCAD Map 3D 2005 Crack TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -

download AutoCAD Map 3D 2005 crack


Download ---> https://imgfil.com/2uxX5G



- -AutoCAD Map 3D forum. Welcome to the Autodesk Forum on AutoCAD Map 3D. Share your knowledge, ask questions, and explore popular AutoCAD Map 3D topics. The Frequently Asked Questions section provides answers to frequently asked questions about AutoCAD Map 3D and other Autodesk products. Tutorials are provided in the Learn section 8a78ff9644
-
-
-

diff --git a/spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/base_model.py b/spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/base_model.py deleted file mode 100644 index 8de1d16f0c7fa52d8067139abc6e769e96d0a6a1..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/base_model.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import numpy as np -import torch -from torch.autograd import Variable -from pdb import set_trace as st -from IPython import embed - -class BaseModel(): - def __init__(self): - pass; - - def name(self): - return 'BaseModel' - - def initialize(self, use_gpu=True, gpu_ids=[0]): - self.use_gpu = use_gpu - self.gpu_ids = gpu_ids - - def forward(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, path, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(path, save_filename) - torch.save(network.state_dict(), save_path) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print('Loading network from %s'%save_path) - network.load_state_dict(torch.load(save_path)) - - def update_learning_rate(): - pass - - def get_image_paths(self): - return self.image_paths - - def save_done(self, flag=False): - np.save(os.path.join(self.save_dir, 'done_flag'),flag) - np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i') diff --git a/spaces/7hao/bingo/src/components/chat-panel.tsx b/spaces/7hao/bingo/src/components/chat-panel.tsx deleted file mode 100644 index 1fbc3c2bf05b914e0c229661832fbb560745f488..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/src/components/chat-panel.tsx +++ /dev/null @@ -1,153 +0,0 @@ -'use client' - -import * as React from 'react' -import Image from 'next/image' -import Textarea from 'react-textarea-autosize' -import { useAtomValue } from 'jotai' -import { useEnterSubmit } from '@/lib/hooks/use-enter-submit' -import { cn } from '@/lib/utils' - -import BrushIcon from '@/assets/images/brush.svg' -import ChatIcon from '@/assets/images/chat.svg' -import VisualSearchIcon from '@/assets/images/visual-search.svg' -import SendIcon from '@/assets/images/send.svg' -import PinIcon from '@/assets/images/pin.svg' -import PinFillIcon from '@/assets/images/pin-fill.svg' - -import { useBing } from '@/lib/hooks/use-bing' -import { voiceListenAtom } from '@/state' -import Voice from './voice' -import { ChatImage } from './chat-image' -import { ChatAttachments } from './chat-attachments' - -export interface ChatPanelProps - extends Pick< - ReturnType, - | 'generating' - | 'input' - | 'setInput' - | 'sendMessage' - | 'resetConversation' - | 'isSpeaking' - | 'attachmentList' - | 'uploadImage' - | 'setAttachmentList' - > { - id?: string - className?: string -} - -export function ChatPanel({ - isSpeaking, - generating, - input, - setInput, - className, - sendMessage, - resetConversation, - attachmentList, - uploadImage, - setAttachmentList -}: ChatPanelProps) { - const inputRef = React.useRef(null) - const {formRef, onKeyDown} = useEnterSubmit() - const [focused, setFocused] = React.useState(false) - const [active, setActive] = React.useState(false) - const [pin, setPin] = React.useState(false) - const [tid, setTid] = React.useState() - const voiceListening = useAtomValue(voiceListenAtom) - - const setBlur = React.useCallback(() => { - clearTimeout(tid) - setActive(false) - const _tid = setTimeout(() => setFocused(false), 2000); - setTid(_tid) - }, [tid]) - - const setFocus = React.useCallback(() => { - setFocused(true) - setActive(true) - clearTimeout(tid) - inputRef.current?.focus() - }, [tid]) - - React.useEffect(() => { - if (input) { - setFocus() - } - }, [input]) - - return ( -
{ - e.preventDefault() - if (generating) { - return; - } - if (!input?.trim()) { - return - } - setInput('') - setPin(false) - await sendMessage(input) - }} - ref={formRef} - > -
-
-
-
-
-
-
- -
-
-
-
- chat - - - -
- -
- - - -
-
-

How it works

-

- Generate an image using a hosted model and add an AI disclosure to - the file. This information, referred to as Content Credentials, - serves as a nutrition label for the content. We employ the - tamper-evident open C2PA standard, which utilizes PKI and is - resistant to forgery. You can download and transfer the image to - supported editing tools like Photoshop, where your edit history can - also be securely added to the file. This historical information, - known as provenance, accompanies your media and can be extracted and - displayed using a tool or website. -

- -

- Want to know more? Read our - community blog post. -

-
-
- - - diff --git a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_canny.py b/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_canny.py deleted file mode 100644 index a313ffda0a74b6373e90681aba6cd0e9a8736c86..0000000000000000000000000000000000000000 --- a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_canny.py +++ /dev/null @@ -1,183 +0,0 @@ -import cv2 -import gradio as gr -import numpy as np -import torch -from diffusers import ControlNetModel, StableDiffusionControlNetPipeline -from PIL import Image - -from diffusion_webui.utils.model_list import ( - controlnet_canny_model_list, - stable_model_list, -) -from diffusion_webui.utils.scheduler_list import ( - SCHEDULER_LIST, - get_scheduler_list, -) - - -class StableDiffusionControlNetCannyGenerator: - def __init__(self): - self.pipe = None - - def load_model(self, stable_model_path, controlnet_model_path, scheduler): - if self.pipe is None: - controlnet = ControlNetModel.from_pretrained( - controlnet_model_path, torch_dtype=torch.float16 - ) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - pretrained_model_name_or_path=stable_model_path, - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, - ) - - self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler) - self.pipe.to("cuda") - self.pipe.enable_xformers_memory_efficient_attention() - - return self.pipe - - def controlnet_canny( - self, - image_path: str, - ): - image = Image.open(image_path) - image = np.array(image) - - image = cv2.Canny(image, 100, 200) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis=2) - image = Image.fromarray(image) - - return image - - def generate_image( - self, - image_path: str, - stable_model_path: str, - controlnet_model_path: str, - prompt: str, - negative_prompt: str, - num_images_per_prompt: int, - guidance_scale: int, - num_inference_step: int, - scheduler: str, - seed_generator: int, - ): - pipe = self.load_model( - stable_model_path=stable_model_path, - controlnet_model_path=controlnet_model_path, - scheduler=scheduler, - ) - - image = self.controlnet_canny(image_path=image_path) - - if seed_generator == 0: - random_seed = torch.randint(0, 1000000, (1,)) - generator = torch.manual_seed(random_seed) - else: - generator = torch.manual_seed(seed_generator) - - output = pipe( - prompt=prompt, - image=image, - negative_prompt=negative_prompt, - num_images_per_prompt=num_images_per_prompt, - num_inference_steps=num_inference_step, - guidance_scale=guidance_scale, - generator=generator, - ).images - - return output - - def app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - controlnet_canny_image_file = gr.Image( - type="filepath", label="Image" - ) - - controlnet_canny_prompt = gr.Textbox( - lines=1, - placeholder="Prompt", - show_label=False, - ) - - controlnet_canny_negative_prompt = gr.Textbox( - lines=1, - placeholder="Negative Prompt", - show_label=False, - ) - with gr.Row(): - with gr.Column(): - controlnet_canny_stable_model_id = gr.Dropdown( - choices=stable_model_list, - value=stable_model_list[0], - label="Stable Model Id", - ) - - controlnet_canny_guidance_scale = gr.Slider( - minimum=0.1, - maximum=15, - step=0.1, - value=7.5, - label="Guidance Scale", - ) - controlnet_canny_num_inference_step = gr.Slider( - minimum=1, - maximum=100, - step=1, - value=50, - label="Num Inference Step", - ) - controlnet_canny_num_images_per_prompt = gr.Slider( - minimum=1, - maximum=10, - step=1, - value=1, - label="Number Of Images", - ) - with gr.Row(): - with gr.Column(): - controlnet_canny_model_id = gr.Dropdown( - choices=controlnet_canny_model_list, - value=controlnet_canny_model_list[0], - label="ControlNet Model Id", - ) - - controlnet_canny_scheduler = gr.Dropdown( - choices=SCHEDULER_LIST, - value=SCHEDULER_LIST[0], - label="Scheduler", - ) - - controlnet_canny_seed_generator = gr.Number( - value=0, - label="Seed Generator", - ) - controlnet_canny_predict = gr.Button(value="Generator") - - with gr.Column(): - output_image = gr.Gallery( - label="Generated images", - show_label=False, - elem_id="gallery", - ).style(grid=(1, 2)) - - controlnet_canny_predict.click( - fn=StableDiffusionControlNetCannyGenerator().generate_image, - inputs=[ - controlnet_canny_image_file, - controlnet_canny_stable_model_id, - controlnet_canny_model_id, - controlnet_canny_prompt, - controlnet_canny_negative_prompt, - controlnet_canny_num_images_per_prompt, - controlnet_canny_guidance_scale, - controlnet_canny_num_inference_step, - controlnet_canny_scheduler, - controlnet_canny_seed_generator, - ], - outputs=[output_image], - ) diff --git a/spaces/Volkopat/arXivGPT/pdf2vectorstore.py b/spaces/Volkopat/arXivGPT/pdf2vectorstore.py deleted file mode 100644 index 56d8cf47c58288662bb42519ef1391979665f1e9..0000000000000000000000000000000000000000 --- a/spaces/Volkopat/arXivGPT/pdf2vectorstore.py +++ /dev/null @@ -1,72 +0,0 @@ - -import os -import requests -from bs4 import BeautifulSoup -from pdf2image import convert_from_path -import pytesseract -import pickle - -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.document_loaders import UnstructuredFileLoader -from langchain.vectorstores.faiss import FAISS -from langchain.embeddings import OpenAIEmbeddings - -def download_pdf(url, filename): - print("Downloading pdf...") - response = requests.get(url, stream=True) - with open(filename, 'wb') as f: - for chunk in response.iter_content(chunk_size=8192): - f.write(chunk) - -def extract_pdf_text(filename): - print("Extracting text from pdf...") - pytesseract.pytesseract.tesseract_cmd = 'tesseract' - images = convert_from_path(filename) - text = "" - for image in images: - text += pytesseract.image_to_string(image) - - return text - -def get_arxiv_pdf_url(paper_link): - if paper_link.endswith('.pdf'): - return paper_link - else: - print("Getting pdf url...") - response = requests.get(paper_link) - soup = BeautifulSoup(response.text, 'html.parser') - pdf_url = soup.find('a', {'class': 'mobile-submission-download'})['href'] - pdf_url = 'https://arxiv.org' + pdf_url - return pdf_url - -def read_paper(paper_link): - print("Reading paper...") - pdf_filename = 'paper.pdf' - pdf_url = get_arxiv_pdf_url(paper_link) - download_pdf(pdf_url, pdf_filename) - text = extract_pdf_text(pdf_filename) - os.remove(pdf_filename) - - return text - -def convert_to_vectorstore(arxiv_url, api_key): - if not arxiv_url or not api_key: - return None - print("Converting to vectorstore...") - txtfile = "paper.txt" - with open(txtfile, 'w') as f: - f.write(read_paper(arxiv_url)) - - loader = UnstructuredFileLoader(txtfile) - raw_documents = loader.load() - os.remove(txtfile) - print("Loaded document") - - text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200) - documents = text_splitter.split_documents(raw_documents) - os.environ["OPENAI_API_KEY"] = api_key - embeddings = OpenAIEmbeddings() - os.environ["OPENAI_API_KEY"] = "" - vectorstore = FAISS.from_documents(documents, embeddings) - - return vectorstore \ No newline at end of file diff --git a/spaces/Wootang01/image_classifier/app.py b/spaces/Wootang01/image_classifier/app.py deleted file mode 100644 index 4a34012cb5f7c1497bcd1273b65c1673f0a28693..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/image_classifier/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr -from transformers import pipeline - -title = "Image Classifier" -description = "This machine has vision. It can see objects and concepts in an image. To test the machine, upload or drop an image, submit and read the results. The results comprise a list of words that the machine sees in the image. Beside a word, the length of the bar indicates the confidence with which the machine sees the word. The longer the bar, the more confident the machine is." - -pipe = pipeline(task="image-classification", - model="microsoft/beit-base-patch16-224-pt22k-ft22k") -gr.Interface.from_pipeline(pipe, - title=title, - description=description, - enable_queue=True, - ).launch() \ No newline at end of file diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/gen_doc/nbtest.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/gen_doc/nbtest.py deleted file mode 100644 index f4ee0355811907359e21219f6994c3a222a41e9b..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/gen_doc/nbtest.py +++ /dev/null @@ -1,173 +0,0 @@ -"`gen_doc.nbtest` shows pytest documentation for module functions" - -import inspect, os, re -from os.path import abspath, dirname, join -from collections import namedtuple - -from fastai.gen_doc import nbdoc -from ..imports.core import * -from .core import ifnone -from .doctest import get_parent_func, relative_test_path, get_func_fq_name, DB_NAME - -from nbconvert import HTMLExporter -from IPython.core import page -from IPython.core.display import display, Markdown, HTML - -__all__ = ['show_test', 'doctest', 'find_related_tests', 'lookup_db', 'find_test_matches', 'find_test_files', 'fuzzy_test_match', 'get_pytest_html'] - -TestFunctionMatch = namedtuple('TestFunctionMatch', ['line_number', 'line']) - -def show_test(elt)->str: - "Show associated tests for a fastai function/class" - md = build_tests_markdown(elt) - display(Markdown(md)) - -def doctest(elt): - "Inline notebook popup for `show_test`" - md = build_tests_markdown(elt) - output = nbdoc.md2html(md) - try: page.page({'text/html': output}) - except: display(Markdown(md)) - -def build_tests_markdown(elt): - fn_name = nbdoc.fn_name(elt) - md = '' - db_matches = [get_links(t) for t in lookup_db(elt)] - md += tests2md(db_matches, '') - try: - related = [get_links(t) for t in find_related_tests(elt)] - other_tests = [k for k in OrderedDict.fromkeys(related) if k not in db_matches] - md += tests2md(other_tests, f'Some other tests where `{fn_name}` is used:') - except OSError as e: pass - - if len(md.strip())==0: - return (f'No tests found for `{fn_name}`.' - ' To contribute a test please refer to [this guide](/dev/test.html)' - ' and [this discussion](https://forums.fast.ai/t/improving-expanding-functional-tests/32929).') - return (f'Tests found for `{fn_name}`: {md}' - '\n\nTo run tests please refer to this [guide](/dev/test.html#quick-guide).') - -def tests2md(tests, type_label:str): - if not tests: return '' - md = [f'\n\n{type_label}'] + [f'* `{cmd}` {link}' for link,cmd in sorted(tests, key=lambda k: k[1])] - return '\n'.join(md) - -def get_pytest_html(elt, anchor_id:str)->Tuple[str,str]: - md = build_tests_markdown(elt) - html = nbdoc.md2html(md).replace('\n','') # nbconverter fails to parse markdown if it has both html and '\n' - anchor_id = anchor_id.replace('.', '-') + '-pytest' - link, body = get_pytest_card(html, anchor_id) - return link, body - -def get_pytest_card(html, anchor_id): - "creates a collapsible bootstrap card for `show_test`" - link = f'[test]' - body = (f'
' - f'' - f'{html}' - '
') - return link, body - -def lookup_db(elt)->List[Dict]: - "Finds `this_test` entries from test_registry.json" - db_file = Path(abspath(join(dirname( __file__ ), '..')))/DB_NAME - if not db_file.exists(): - raise Exception(f'Could not find {db_file}. Please make sure it exists at "{db_file}" or run `make test`') - with open(db_file, 'r') as f: - db = json.load(f) - key = get_func_fq_name(elt) - return db.get(key, []) - -def find_related_tests(elt)->Tuple[List[Dict],List[Dict]]: - "Searches `fastai/tests` folder for any test functions related to `elt`" - related_matches = [] - for test_file in find_test_files(elt): - fuzzy_matches = find_test_matches(elt, test_file) - related_matches.extend(fuzzy_matches) - return related_matches - -def get_tests_dir(elt)->Path: - "Absolute path of `fastai/tests` directory" - test_dir = Path(__file__).parent.parent.parent.resolve()/'tests' - if not test_dir.exists(): raise OSError('Could not find test directory at this location:', test_dir) - return test_dir - -def get_file(elt)->str: - if hasattr(elt, '__wrapped__'): elt = elt.__wrapped__ - if not nbdoc.is_fastai_class(elt): return None - return inspect.getfile(elt) - -def find_test_files(elt, exact_match:bool=False)->List[Path]: - "Searches in `fastai/tests` directory for module tests" - test_dir = get_tests_dir(elt) - matches = [test_dir/o.name for o in os.scandir(test_dir) if _is_file_match(elt, o.name)] - # if len(matches) != 1: raise Error('Could not find exact file match:', matches) - return matches - -def _is_file_match(elt, file_name:str, exact_match:bool=False)->bool: - fp = get_file(elt) - if fp is None: return False - subdir = ifnone(_submodule_name(elt), '') - exact_re = '' if exact_match else '\w*' - return re.match(f'test_{subdir}\w*{Path(fp).stem}{exact_re}\.py', file_name) - -def _submodule_name(elt)->str: - "Returns submodule - utils, text, vision, imports, etc." - if inspect.ismodule(elt): return None - modules = elt.__module__.split('.') - if len(modules) > 2: - return modules[1] - return None - -def find_test_matches(elt, test_file:Path)->Tuple[List[Dict],List[Dict]]: - "Find all functions in `test_file` related to `elt`" - lines = get_lines(test_file) - rel_path = relative_test_path(test_file) - fn_name = get_qualname(elt) if not inspect.ismodule(elt) else '' - return fuzzy_test_match(fn_name, lines, rel_path) - -def get_qualname(elt): - return elt.__qualname__ if hasattr(elt, '__qualname__') else fn_name(elt) - -def separate_comp(qualname:str): - if not isinstance(qualname, str): qualname = get_qualname(qualname) - parts = qualname.split('.') - parts[-1] = remove_underscore(parts[-1]) - if len(parts) == 1: return [], parts[0] - return parts[:-1], parts[-1] - -def remove_underscore(fn_name): - if fn_name and fn_name[0] == '_': return fn_name[1:] # remove private method underscore prefix - return fn_name - -def fuzzy_test_match(fn_name:str, lines:List[Dict], rel_path:str)->List[TestFunctionMatch]: - "Find any lines where `fn_name` is invoked and return the parent test function" - fuzzy_line_matches = _fuzzy_line_match(fn_name, lines) - fuzzy_matches = [get_parent_func(lno, lines, ignore_missing=True) for lno,_ in fuzzy_line_matches] - fuzzy_matches = list(filter(None.__ne__, fuzzy_matches)) - return [map_test(rel_path, lno, l) for lno,l in fuzzy_matches] - -def _fuzzy_line_match(fn_name:str, lines)->List[TestFunctionMatch]: - "Find any lines where `fn_name` is called" - result = [] - _,fn_name = separate_comp(fn_name) - for idx,line in enumerate(lines): - if re.match(f'.*[\s\.\(]{fn_name}[\.\(]', line): - result.append((idx,line)) - return result - -def get_lines(file:Path)->List[str]: - with open(file, 'r') as f: return f.readlines() - -def map_test(test_file, line, line_text): - "Creates dictionary test format to match doctest api" - test_name = re.match(f'\s*def (test_\w*)', line_text).groups(0)[0] - return { 'file': test_file, 'line': line, 'test': test_name } - -def get_links(metadata)->Tuple[str,str]: - "Returns source code link and pytest command" - return nbdoc.get_source_link(**metadata), pytest_command(**metadata) - -def pytest_command(file:str, test:str, **kwargs)->str: - "Returns CLI command to run specific test function" - return f'pytest -sv {file}::{test}' diff --git a/spaces/Xenova/sponsorblock-ml/src/evaluate.py b/spaces/Xenova/sponsorblock-ml/src/evaluate.py deleted file mode 100644 index b0f9ae2bacff6223c3f136d925907f260ccef7dd..0000000000000000000000000000000000000000 --- a/spaces/Xenova/sponsorblock-ml/src/evaluate.py +++ /dev/null @@ -1,408 +0,0 @@ - -from model import get_model_tokenizer_classifier, InferenceArguments -from utils import jaccard, safe_print -from transformers import HfArgumentParser -from preprocess import get_words, clean_text -from shared import GeneralArguments, DatasetArguments -from predict import predict -from segment import extract_segment, word_start, word_end, SegmentationArguments, add_labels_to_words -import pandas as pd -from dataclasses import dataclass, field -from typing import Optional -from tqdm import tqdm -import json -import os -import random -from shared import seconds_to_time -from urllib.parse import quote -import logging - -logging.basicConfig() -logger = logging.getLogger(__name__) - - -@dataclass -class EvaluationArguments(InferenceArguments): - """Arguments pertaining to how evaluation will occur.""" - output_file: Optional[str] = field( - default='metrics.csv', - metadata={ - 'help': 'Save metrics to output file' - } - ) - - skip_missing: bool = field( - default=False, - metadata={ - 'help': 'Whether to skip checking for missing segments. If False, predictions will be made.' - } - ) - skip_incorrect: bool = field( - default=False, - metadata={ - 'help': 'Whether to skip checking for incorrect segments. If False, classifications will be made on existing segments.' - } - ) - - -def attach_predictions_to_sponsor_segments(predictions, sponsor_segments): - """Attach sponsor segments to closest prediction""" - for prediction in predictions: - prediction['best_overlap'] = 0 - prediction['best_sponsorship'] = None - - # Assign predictions to actual (labelled) sponsored segments - for sponsor_segment in sponsor_segments: - j = jaccard(prediction['start'], prediction['end'], - sponsor_segment['start'], sponsor_segment['end']) - if prediction['best_overlap'] < j: - prediction['best_overlap'] = j - prediction['best_sponsorship'] = sponsor_segment - - return sponsor_segments - - -def calculate_metrics(labelled_words, predictions): - - metrics = { - 'true_positive': 0, # Is sponsor, predicted sponsor - # Is sponsor, predicted not sponsor (i.e., missed it - bad) - 'false_negative': 0, - # Is not sponsor, predicted sponsor (classified incorectly, not that bad since we do manual checking afterwards) - 'false_positive': 0, - 'true_negative': 0, # Is not sponsor, predicted not sponsor - } - - metrics['video_duration'] = word_end( - labelled_words[-1])-word_start(labelled_words[0]) - - for index, word in enumerate(labelled_words): - if index >= len(labelled_words) - 1: - continue - - duration = word_end(word) - word_start(word) - - predicted_sponsor = False - for p in predictions: - # Is in some prediction - if p['start'] <= word['start'] <= p['end']: - predicted_sponsor = True - break - - if predicted_sponsor: - # total_positive_time += duration - if word.get('category') is not None: # Is actual sponsor - metrics['true_positive'] += duration - else: - metrics['false_positive'] += duration - else: - # total_negative_time += duration - if word.get('category') is not None: # Is actual sponsor - metrics['false_negative'] += duration - else: - metrics['true_negative'] += duration - - # NOTE In cases where we encounter division by 0, we say that the value is 1 - # https://stats.stackexchange.com/a/1775 - # (Precision) TP+FP=0: means that all instances were predicted as negative - # (Recall) TP+FN=0: means that there were no positive cases in the input data - - # The fraction of predictions our model got right - # Can simplify, but use full formula - z = metrics['true_positive'] + metrics['true_negative'] + \ - metrics['false_positive'] + metrics['false_negative'] - metrics['accuracy'] = ( - (metrics['true_positive'] + metrics['true_negative']) / z) if z > 0 else 1 - - # What proportion of positive identifications was actually correct? - z = metrics['true_positive'] + metrics['false_positive'] - metrics['precision'] = (metrics['true_positive'] / z) if z > 0 else 1 - - # What proportion of actual positives was identified correctly? - z = metrics['true_positive'] + metrics['false_negative'] - metrics['recall'] = (metrics['true_positive'] / z) if z > 0 else 1 - - # https://deepai.org/machine-learning-glossary-and-terms/f-score - - s = metrics['precision'] + metrics['recall'] - metrics['f-score'] = (2 * (metrics['precision'] * - metrics['recall']) / s) if s > 0 else 0 - - return metrics - - -def main(): - logger.setLevel(logging.DEBUG) - - hf_parser = HfArgumentParser(( - EvaluationArguments, - DatasetArguments, - SegmentationArguments, - GeneralArguments - )) - - evaluation_args, dataset_args, segmentation_args, general_args = hf_parser.parse_args_into_dataclasses() - - if evaluation_args.skip_missing and evaluation_args.skip_incorrect: - logger.error('ERROR: Nothing to do') - return - - # Load labelled data: - final_path = os.path.join( - dataset_args.data_dir, dataset_args.processed_file) - - if not os.path.exists(final_path): - logger.error('ERROR: Processed database not found.\n' - f'Run `python src/preprocess.py --update_database --do_create` to generate "{final_path}".') - return - - model, tokenizer, classifier = get_model_tokenizer_classifier( - evaluation_args, general_args) - - with open(final_path) as fp: - final_data = json.load(fp) - - if evaluation_args.video_ids: # Use specified - video_ids = evaluation_args.video_ids - - else: # Use items found in preprocessed database - video_ids = list(final_data.keys()) - random.shuffle(video_ids) - - if evaluation_args.start_index is not None: - video_ids = video_ids[evaluation_args.start_index:] - - if evaluation_args.max_videos is not None: - video_ids = video_ids[:evaluation_args.max_videos] - - out_metrics = [] - - all_metrics = {} - if not evaluation_args.skip_missing: - all_metrics['total_prediction_accuracy'] = 0 - all_metrics['total_prediction_precision'] = 0 - all_metrics['total_prediction_recall'] = 0 - all_metrics['total_prediction_fscore'] = 0 - - if not evaluation_args.skip_incorrect: - all_metrics['classifier_segment_correct'] = 0 - all_metrics['classifier_segment_count'] = 0 - - metric_count = 0 - - postfix_info = {} - - try: - with tqdm(video_ids) as progress: - for video_index, video_id in enumerate(progress): - progress.set_description(f'Processing {video_id}') - - words = get_words(video_id) - if not words: - continue - - # Get labels - sponsor_segments = final_data.get(video_id) - - # Reset previous - missed_segments = [] - incorrect_segments = [] - - current_metrics = { - 'video_id': video_id - } - metric_count += 1 - - if not evaluation_args.skip_missing: # Make predictions - predictions = predict(video_id, model, tokenizer, segmentation_args, - classifier=classifier, - min_probability=evaluation_args.min_probability) - - if sponsor_segments: - labelled_words = add_labels_to_words( - words, sponsor_segments) - - current_metrics.update( - calculate_metrics(labelled_words, predictions)) - - all_metrics['total_prediction_accuracy'] += current_metrics['accuracy'] - all_metrics['total_prediction_precision'] += current_metrics['precision'] - all_metrics['total_prediction_recall'] += current_metrics['recall'] - all_metrics['total_prediction_fscore'] += current_metrics['f-score'] - - # Just for display purposes - postfix_info.update({ - 'accuracy': all_metrics['total_prediction_accuracy']/metric_count, - 'precision': all_metrics['total_prediction_precision']/metric_count, - 'recall': all_metrics['total_prediction_recall']/metric_count, - 'f-score': all_metrics['total_prediction_fscore']/metric_count, - }) - - sponsor_segments = attach_predictions_to_sponsor_segments( - predictions, sponsor_segments) - - # Identify possible issues: - for prediction in predictions: - if prediction['best_sponsorship'] is not None: - continue - - prediction_words = prediction.pop('words', []) - - # Attach original text to missed segments - prediction['text'] = ' '.join( - x['text'] for x in prediction_words) - missed_segments.append(prediction) - - else: - # Not in database (all segments missed) - missed_segments = predictions - - if not evaluation_args.skip_incorrect and sponsor_segments: - # Check for incorrect segments using the classifier - - segments_to_check = [] - cleaned_texts = [] # Texts to send through tokenizer - for sponsor_segment in sponsor_segments: - segment_words = extract_segment( - words, sponsor_segment['start'], sponsor_segment['end']) - sponsor_segment['text'] = ' '.join( - x['text'] for x in segment_words) - - duration = sponsor_segment['end'] - \ - sponsor_segment['start'] - wps = (len(segment_words) / - duration) if duration > 0 else 0 - if wps < 1.5: - continue - - # Do not worry about those that are locked or have enough votes - # or segment['votes'] > 5: - if sponsor_segment['locked']: - continue - - cleaned_texts.append( - clean_text(sponsor_segment['text'])) - segments_to_check.append(sponsor_segment) - - if segments_to_check: # Some segments to check - - segments_scores = classifier(cleaned_texts) - - num_correct = 0 - for segment, scores in zip(segments_to_check, segments_scores): - - fixed_scores = { - score['label']: score['score'] - for score in scores - } - - all_metrics['classifier_segment_count'] += 1 - - prediction = max(scores, key=lambda x: x['score']) - predicted_category = prediction['label'].lower() - - if predicted_category == segment['category']: - num_correct += 1 - continue # Ignore correct segments - - segment.update({ - 'predicted': predicted_category, - 'scores': fixed_scores - }) - - incorrect_segments.append(segment) - - current_metrics['num_segments'] = len( - segments_to_check) - current_metrics['classified_correct'] = num_correct - - all_metrics['classifier_segment_correct'] += num_correct - - if all_metrics['classifier_segment_count'] > 0: - postfix_info['classifier_accuracy'] = all_metrics['classifier_segment_correct'] / \ - all_metrics['classifier_segment_count'] - - out_metrics.append(current_metrics) - progress.set_postfix(postfix_info) - - if missed_segments or incorrect_segments: - - if evaluation_args.output_as_json: - to_print = {'video_id': video_id} - - if missed_segments: - to_print['missed'] = missed_segments - - if incorrect_segments: - to_print['incorrect'] = incorrect_segments - - safe_print(json.dumps(to_print)) - - else: - safe_print( - f'Issues identified for {video_id} (#{video_index})') - # Potentially missed segments (model predicted, but not in database) - if missed_segments: - safe_print(' - Missed segments:') - segments_to_submit = [] - for i, missed_segment in enumerate(missed_segments, start=1): - safe_print(f'\t#{i}:', seconds_to_time( - missed_segment['start']), '-->', seconds_to_time(missed_segment['end'])) - safe_print('\t\tText: "', - missed_segment['text'], '"', sep='') - safe_print('\t\tCategory:', - missed_segment.get('category')) - if 'probability' in missed_segment: - safe_print('\t\tProbability:', - missed_segment['probability']) - - segments_to_submit.append({ - 'segment': [missed_segment['start'], missed_segment['end']], - 'category': missed_segment['category'].lower(), - 'actionType': 'skip' - }) - - json_data = quote(json.dumps(segments_to_submit)) - safe_print( - f'\tSubmit: https://www.youtube.com/watch?v={video_id}#segments={json_data}') - - # Incorrect segments (in database, but incorrectly classified) - if incorrect_segments: - safe_print(' - Incorrect segments:') - for i, incorrect_segment in enumerate(incorrect_segments, start=1): - safe_print(f'\t#{i}:', seconds_to_time( - incorrect_segment['start']), '-->', seconds_to_time(incorrect_segment['end'])) - - safe_print( - '\t\tText: "', incorrect_segment['text'], '"', sep='') - safe_print( - '\t\tUUID:', incorrect_segment['uuid']) - safe_print( - '\t\tVotes:', incorrect_segment['votes']) - safe_print( - '\t\tViews:', incorrect_segment['views']) - safe_print('\t\tLocked:', - incorrect_segment['locked']) - - safe_print('\t\tCurrent Category:', - incorrect_segment['category']) - safe_print('\t\tPredicted Category:', - incorrect_segment['predicted']) - safe_print('\t\tProbabilities:') - for label, score in incorrect_segment['scores'].items(): - safe_print( - f"\t\t\t{label}: {score}") - - safe_print() - - except KeyboardInterrupt: - pass - - df = pd.DataFrame(out_metrics) - - df.to_csv(evaluation_args.output_file) - logger.info(df.mean()) - - -if __name__ == '__main__': - main() diff --git a/spaces/Xsciss/hakurei-waifu-diffusion/app.py b/spaces/Xsciss/hakurei-waifu-diffusion/app.py deleted file mode 100644 index ccef706bf3035fe470bf6a4f5bd701b18bf59133..0000000000000000000000000000000000000000 --- a/spaces/Xsciss/hakurei-waifu-diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/hakurei/waifu-diffusion").launch() \ No newline at end of file diff --git a/spaces/XzJosh/Ava-Bert-VITS2/monotonic_align/core.py b/spaces/XzJosh/Ava-Bert-VITS2/monotonic_align/core.py deleted file mode 100644 index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Ava-Bert-VITS2/monotonic_align/core.py +++ /dev/null @@ -1,35 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val=-1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y-1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y-1, x-1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - index = index - 1 diff --git a/spaces/XzJosh/nanami-Bert-VITS2/modules.py b/spaces/XzJosh/nanami-Bert-VITS2/modules.py deleted file mode 100644 index 92e0f32a51c472bfd1659a50a95a95d195281d2b..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nanami-Bert-VITS2/modules.py +++ /dev/null @@ -1,452 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform -from attentions import Encoder - -LRELU_SLOPE = 0.1 - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x -class TransformerCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=0, - mean_only=False, - wn_sharing_parameter=None, - gin_channels = 0 - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/loggers/comet/hpo.py b/spaces/YONG627/456123/yolov5-code-main/utils/loggers/comet/hpo.py deleted file mode 100644 index fc49115c13581554bebe1ddddaf3d5e10caaae07..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/utils/loggers/comet/hpo.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import json -import logging -import os -import sys -from pathlib import Path - -import comet_ml - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - -# Project Configuration -config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') - - -def get_args(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - - # Comet Arguments - parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') - parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') - parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') - parser.add_argument('--comet_optimizer_workers', - type=int, - default=1, - help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} - - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get('batch_size') - opt.epochs = parameters.get('epochs') - - device = select_device(opt.device, batch_size=opt.batch_size) - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == '__main__': - opt = get_args(known=True) - - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.project = str(opt.project) - - optimizer_id = os.getenv('COMET_OPTIMIZER_ID') - if optimizer_id is None: - with open(opt.comet_optimizer_config) as f: - optimizer_config = json.load(f) - optimizer = comet_ml.Optimizer(optimizer_config) - else: - optimizer = comet_ml.Optimizer(optimizer_id) - - opt.comet_optimizer_id = optimizer.id - status = optimizer.status() - - opt.comet_optimizer_objective = status['spec']['objective'] - opt.comet_optimizer_metric = status['spec']['metric'] - - logger.info('COMET INFO: Starting Hyperparameter Sweep') - for parameter in optimizer.get_parameters(): - run(parameter['parameters'], opt) diff --git a/spaces/YuhangDeng123/Whisper-offline/app.py b/spaces/YuhangDeng123/Whisper-offline/app.py deleted file mode 100644 index 1dae89251b051a15c7bfea477bccd8b2ce53d027..0000000000000000000000000000000000000000 --- a/spaces/YuhangDeng123/Whisper-offline/app.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Dec 8 17:37:44 2022 - -@author: Kenneth -""" - -from transformers import pipeline -import gradio as gr -from pytube import YouTube -from datasets import Dataset, Audio -import os -from moviepy.editor import AudioFileClip -import time -import requests - -pipe = pipeline(model="YuhangDeng123/whisper-small-hi") # change to "your-username/the-name-you-picked" - -def transcribe(audio): - text = pipe(audio)["text"] - return text - -def DownloadFile(mp3_url): - file_name = 'test.mp3' - res = requests.get(mp3_url + "?raw=true") - music = res.content - # 获取文件地址 - file_path = os.path.join(os.getcwd(), file_name) - with open(file_path, 'ab') as file: #保存到本地的文件名 - file.write(res.content) - file.flush() - return file_path - -def convert_to_wav(path): - - audio = AudioFileClip(path) - audio_frame = audio.subclip(0, -2) - audio_frame.write_audiofile(f"audio.wav") - return f"audio.wav" - -def url_transcribe(url): - path = DownloadFile(url) - path_wav = convert_to_wav(path) - audio_dataset = Dataset.from_dict({"audio": [path_wav]}).cast_column("audio", Audio(sampling_rate=16000)) - text = pipe(audio_dataset["audio"]) - os.remove(path) - return text[0]["text"] - - - -with gr.Blocks() as demo: - gr.Markdown("Whisper-Small Cantonese Recognition") - with gr.Row(): - with gr.TabItem("Upload An Audio File"): - upload_file = gr.Audio(source="upload", type="filepath",label="Upload An Audio File") - upload_button = gr.Button("Submit") - upload_outputs = [gr.Textbox(label="Recognized result from uploaded audio file"),] - with gr.Row(): - with gr.TabItem("Record from Microphone"): - record_file = gr.Audio(source="microphone", type="filepath",label="Record from microphone") - record_button = gr.Button("Submit") - record_outputs = [gr.Textbox(label="Recognized result from Microphone"),] - with gr.Row(): - with gr.TabItem("Transcribe from GitHub URL"): - url = gr.Text(max_lines=1, label="Transcribe from GitHub URL") - Github_button = gr.Button("Submit") - Github_outputs = [ - gr.Textbox(label="Recognized speech from GitHub URL") - ] - upload_button.click( - fn=transcribe, - inputs=upload_file, - outputs=upload_outputs, - ) - - record_button.click( - fn=transcribe, - inputs=record_file, - outputs=record_outputs, - ) - Github_button.click( - fn=url_transcribe, - inputs=url, - outputs=Github_outputs, - ) -demo.launch() \ No newline at end of file diff --git a/spaces/aaronW/PaddlePaddle-plato-mini/README.md b/spaces/aaronW/PaddlePaddle-plato-mini/README.md deleted file mode 100644 index f9be899efcfb81aa1456f1b4adbd8b8a242a93e5..0000000000000000000000000000000000000000 --- a/spaces/aaronW/PaddlePaddle-plato-mini/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: PaddlePaddle Plato Mini -emoji: 💻 -colorFrom: green -colorTo: red -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abdvl/datahub_qa_bot/docs/what/search-index.md b/spaces/abdvl/datahub_qa_bot/docs/what/search-index.md deleted file mode 100644 index 8a41a2fb15c967b344c3d18dfb0a7767ebd5085a..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/what/search-index.md +++ /dev/null @@ -1,17 +0,0 @@ -# What is GMA search index? - -Each [search document](search-document.md) type (or [entity](entity.md) type) will be mapped to an independent search index in Elasticsearch. -Beyond the standard search engine features (analyzer, tokenizer, filter queries, faceting, sharding, etc), -GMA also supports the following specific features: -* Partial update of indexed documents -* Membership testing on multi-value fields -* Zero downtime switch between indices - -Check out [Search DAO](../architecture/metadata-serving.md#search-dao) for search query abstraction in GMA. - -## Search Automation (TBD) - -We aim to automate the index creation, schema evolution, and reindexing such that the team will only need to focus on the search document model and their custom [Index Builder](../architecture/metadata-ingestion.md#search-index-builders) logic. -As the logic changes, a new version of the index will be created and populated from historic MAEs. -Once it’s fully populated, the team can switch to the new version through a simple config change from their [GMS](gms.md). -They can also rollback to an older version of index whenever needed. \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/samplers/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/samplers/__init__.py deleted file mode 100644 index 0b06303fe1000e11c5486c40c70606a34a5208e3..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/samplers/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .base_sampler import BaseSampler -from .combined_sampler import CombinedSampler -from .instance_balanced_pos_sampler import InstanceBalancedPosSampler -from .iou_balanced_neg_sampler import IoUBalancedNegSampler -from .ohem_sampler import OHEMSampler -from .pseudo_sampler import PseudoSampler -from .random_sampler import RandomSampler -from .sampling_result import SamplingResult -from .score_hlr_sampler import ScoreHLRSampler - -__all__ = [ - 'BaseSampler', 'PseudoSampler', 'RandomSampler', - 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', - 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler' -] diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/build/lib/pyrender/renderer.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/build/lib/pyrender/renderer.py deleted file mode 100644 index 5ae14c5cdb1785226a52ae6b71b08f01de069962..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/build/lib/pyrender/renderer.py +++ /dev/null @@ -1,1339 +0,0 @@ -"""PBR renderer for Python. - -Author: Matthew Matl -""" -import sys - -import numpy as np -import PIL - -from .constants import (RenderFlags, TextAlign, GLTF, BufFlags, TexFlags, - ProgramFlags, DEFAULT_Z_FAR, DEFAULT_Z_NEAR, - SHADOW_TEX_SZ, MAX_N_LIGHTS) -from .shader_program import ShaderProgramCache -from .material import MetallicRoughnessMaterial, SpecularGlossinessMaterial -from .light import PointLight, SpotLight, DirectionalLight -from .font import FontCache -from .utils import format_color_vector - -from OpenGL.GL import * - - -class Renderer(object): - """Class for handling all rendering operations on a scene. - - Note - ---- - This renderer relies on the existence of an OpenGL context and - does not create one on its own. - - Parameters - ---------- - viewport_width : int - Width of the viewport in pixels. - viewport_height : int - Width of the viewport height in pixels. - point_size : float, optional - Size of points in pixels. Defaults to 1.0. - """ - - def __init__(self, viewport_width, viewport_height, point_size=1.0): - self.dpscale = 1 - # Scaling needed on retina displays - if sys.platform == 'darwin': - self.dpscale = 2 - - self.viewport_width = viewport_width - self.viewport_height = viewport_height - self.point_size = point_size - - # Optional framebuffer for offscreen renders - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - self._shadow_fb = None - self._latest_znear = DEFAULT_Z_NEAR - self._latest_zfar = DEFAULT_Z_FAR - - # Shader Program Cache - self._program_cache = ShaderProgramCache() - self._font_cache = FontCache() - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - @property - def viewport_width(self): - """int : The width of the main viewport, in pixels. - """ - return self._viewport_width - - @viewport_width.setter - def viewport_width(self, value): - self._viewport_width = self.dpscale * value - - @property - def viewport_height(self): - """int : The height of the main viewport, in pixels. - """ - return self._viewport_height - - @viewport_height.setter - def viewport_height(self, value): - self._viewport_height = self.dpscale * value - - @property - def point_size(self): - """float : The size of screen-space points, in pixels. - """ - return self._point_size - - @point_size.setter - def point_size(self, value): - self._point_size = float(value) - - def render(self, scene, flags, seg_node_map=None): - """Render a scene with the given set of flags. - - Parameters - ---------- - scene : :class:`Scene` - A scene to render. - flags : int - A specification from :class:`.RenderFlags`. - seg_node_map : dict - A map from :class:`.Node` objects to (3,) colors for each. - If specified along with flags set to :attr:`.RenderFlags.SEG`, - the color image will be a segmentation image. - - Returns - ------- - color_im : (h, w, 3) uint8 or (h, w, 4) uint8 - If :attr:`RenderFlags.OFFSCREEN` is set, the color buffer. This is - normally an RGB buffer, but if :attr:`.RenderFlags.RGBA` is set, - the buffer will be a full RGBA buffer. - depth_im : (h, w) float32 - If :attr:`RenderFlags.OFFSCREEN` is set, the depth buffer - in linear units. - """ - # Update context with meshes and textures - self._update_context(scene, flags) - - # Render necessary shadow maps - if not bool(flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - for ln in scene.light_nodes: - take_pass = False - if (isinstance(ln.light, DirectionalLight) and - bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)): - take_pass = True - elif (isinstance(ln.light, SpotLight) and - bool(flags & RenderFlags.SHADOWS_SPOT)): - take_pass = True - elif (isinstance(ln.light, PointLight) and - bool(flags & RenderFlags.SHADOWS_POINT)): - take_pass = True - if take_pass: - self._shadow_mapping_pass(scene, ln, flags) - - # Make forward pass - retval = self._forward_pass(scene, flags, seg_node_map=seg_node_map) - - # If necessary, make normals pass - if flags & (RenderFlags.VERTEX_NORMALS | RenderFlags.FACE_NORMALS): - self._normals_pass(scene, flags) - - # Update camera settings for retrieving depth buffers - self._latest_znear = scene.main_camera_node.camera.znear - self._latest_zfar = scene.main_camera_node.camera.zfar - - return retval - - def render_text(self, text, x, y, font_name='OpenSans-Regular', - font_pt=40, color=None, scale=1.0, - align=TextAlign.BOTTOM_LEFT): - """Render text into the current viewport. - - Note - ---- - This cannot be done into an offscreen buffer. - - Parameters - ---------- - text : str - The text to render. - x : int - Horizontal pixel location of text. - y : int - Vertical pixel location of text. - font_name : str - Name of font, from the ``pyrender/fonts`` folder, or - a path to a ``.ttf`` file. - font_pt : int - Height of the text, in font points. - color : (4,) float - The color of the text. Default is black. - scale : int - Scaling factor for text. - align : int - One of the :class:`TextAlign` options which specifies where the - ``x`` and ``y`` parameters lie on the text. For example, - :attr:`TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate - the position of the bottom-left corner of the textbox. - """ - x *= self.dpscale - y *= self.dpscale - font_pt *= self.dpscale - - if color is None: - color = np.array([0.0, 0.0, 0.0, 1.0]) - else: - color = format_color_vector(color, 4) - - # Set up viewport for render - self._configure_forward_pass_viewport(0) - - # Load font - font = self._font_cache.get_font(font_name, font_pt) - if not font._in_context(): - font._add_to_context() - - # Load program - program = self._get_text_program() - program._bind() - - # Set uniforms - p = np.eye(4) - p[0,0] = 2.0 / self.viewport_width - p[0,3] = -1.0 - p[1,1] = 2.0 / self.viewport_height - p[1,3] = -1.0 - program.set_uniform('projection', p) - program.set_uniform('text_color', color) - - # Draw text - font.render_string(text, x, y, scale, align) - - def read_color_buf(self): - """Read and return the current viewport's color buffer. - - Alpha cannot be computed for an on-screen buffer. - - Returns - ------- - color_im : (h, w, 3) uint8 - The color buffer in RGB byte format. - """ - # Extract color image from frame buffer - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) - - # Re-format them into numpy arrays - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im - - def read_depth_buf(self): - """Read and return the current viewport's color buffer. - - Returns - ------- - depth_im : (h, w) float32 - The depth buffer in linear units. - """ - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near, z_far = self._latest_znear, self._latest_zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - return depth_im - - def delete(self): - """Free all allocated OpenGL resources. - """ - # Free shaders - self._program_cache.clear() - - # Free fonts - self._font_cache.clear() - - # Free meshes - for mesh in self._meshes: - for p in mesh.primitives: - p.delete() - - # Free textures - for mesh_texture in self._mesh_textures: - mesh_texture.delete() - - for shadow_texture in self._shadow_textures: - shadow_texture.delete() - - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - self._delete_main_framebuffer() - self._delete_shadow_framebuffer() - - def __del__(self): - try: - self.delete() - except Exception: - pass - - ########################################################################### - # Rendering passes - ########################################################################### - - def _forward_pass(self, scene, flags, seg_node_map=None): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - - # Clear it - if bool(flags & RenderFlags.SEG): - glClearColor(0.0, 0.0, 0.0, 1.0) - if seg_node_map is None: - seg_node_map = {} - else: - glClearColor(*scene.bg_color) - - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - - if not bool(flags & RenderFlags.SEG): - glEnable(GL_MULTISAMPLE) - else: - glDisable(GL_MULTISAMPLE) - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - program = None - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - # If SEG, set color - if bool(flags & RenderFlags.SEG): - if node not in seg_node_map: - continue - color = seg_node_map[node] - if not isinstance(color, (list, tuple, np.ndarray)): - color = np.repeat(color, 3) - else: - color = np.asanyarray(color) - color = color / 255.0 - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - if bool(flags & RenderFlags.SEG): - program.set_uniform('color', color) - - # Next, bind the lighting - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.FLAT or - flags & RenderFlags.SEG): - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - # If doing offscreen render, copy result from framebuffer and return - if flags & RenderFlags.OFFSCREEN: - return self._read_main_framebuffer(scene, flags) - else: - return - - def _shadow_mapping_pass(self, scene, light_node, flags): - light = light_node.light - - # Set up viewport for render - self._configure_shadow_mapping_viewport(light, flags) - - # Set up camera matrices - V, P = self._get_light_cam_matrices(scene, light_node, flags) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.NONE - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _normals_pass(self, scene, flags): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - program = None - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # Skip objects that don't have normals - if not primitive.buf_flags & BufFlags.NORMAL: - continue - - # First, get and bind the appropriate program - pf = ProgramFlags.NONE - if flags & RenderFlags.VERTEX_NORMALS: - pf = pf | ProgramFlags.VERTEX_NORMALS - if flags & RenderFlags.FACE_NORMALS: - pf = pf | ProgramFlags.FACE_NORMALS - program = self._get_primitive_program(primitive, flags, pf) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform('normal_magnitude', 0.05 * primitive.scale) - program.set_uniform( - 'normal_color', np.array([0.1, 0.1, 1.0, 1.0]) - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - ########################################################################### - # Handlers for binding uniforms and drawing primitives - ########################################################################### - - def _bind_and_draw_primitive(self, primitive, pose, program, flags): - # Set model pose matrix - program.set_uniform('M', pose) - - # Bind mesh buffers - primitive._bind() - - # Bind mesh material - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - material = primitive.material - - # Bind textures - tf = material.tex_flags - if tf & TexFlags.NORMAL: - self._bind_texture(material.normalTexture, - 'material.normal_texture', program) - if tf & TexFlags.OCCLUSION: - self._bind_texture(material.occlusionTexture, - 'material.occlusion_texture', program) - if tf & TexFlags.EMISSIVE: - self._bind_texture(material.emissiveTexture, - 'material.emissive_texture', program) - if tf & TexFlags.BASE_COLOR: - self._bind_texture(material.baseColorTexture, - 'material.base_color_texture', program) - if tf & TexFlags.METALLIC_ROUGHNESS: - self._bind_texture(material.metallicRoughnessTexture, - 'material.metallic_roughness_texture', - program) - if tf & TexFlags.DIFFUSE: - self._bind_texture(material.diffuseTexture, - 'material.diffuse_texture', program) - if tf & TexFlags.SPECULAR_GLOSSINESS: - self._bind_texture(material.specularGlossinessTexture, - 'material.specular_glossiness_texture', - program) - - # Bind other uniforms - b = 'material.{}' - program.set_uniform(b.format('emissive_factor'), - material.emissiveFactor) - if isinstance(material, MetallicRoughnessMaterial): - program.set_uniform(b.format('base_color_factor'), - material.baseColorFactor) - program.set_uniform(b.format('metallic_factor'), - material.metallicFactor) - program.set_uniform(b.format('roughness_factor'), - material.roughnessFactor) - elif isinstance(material, SpecularGlossinessMaterial): - program.set_uniform(b.format('diffuse_factor'), - material.diffuseFactor) - program.set_uniform(b.format('specular_factor'), - material.specularFactor) - program.set_uniform(b.format('glossiness_factor'), - material.glossinessFactor) - - # Set blending options - if material.alphaMode == 'BLEND': - glEnable(GL_BLEND) - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) - else: - glEnable(GL_BLEND) - glBlendFunc(GL_ONE, GL_ZERO) - - # Set wireframe mode - wf = material.wireframe - if flags & RenderFlags.FLIP_WIREFRAME: - wf = not wf - if (flags & RenderFlags.ALL_WIREFRAME) or wf: - glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) - else: - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set culling mode - if material.doubleSided or flags & RenderFlags.SKIP_CULL_FACES: - glDisable(GL_CULL_FACE) - else: - glEnable(GL_CULL_FACE) - glCullFace(GL_BACK) - else: - glEnable(GL_CULL_FACE) - glEnable(GL_BLEND) - glCullFace(GL_BACK) - glBlendFunc(GL_ONE, GL_ZERO) - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set point size if needed - glDisable(GL_PROGRAM_POINT_SIZE) - if primitive.mode == GLTF.POINTS: - glEnable(GL_PROGRAM_POINT_SIZE) - glPointSize(self.point_size) - - # Render mesh - n_instances = 1 - if primitive.poses is not None: - n_instances = len(primitive.poses) - - if primitive.indices is not None: - glDrawElementsInstanced( - primitive.mode, primitive.indices.size, GL_UNSIGNED_INT, - ctypes.c_void_p(0), n_instances - ) - else: - glDrawArraysInstanced( - primitive.mode, 0, len(primitive.positions), n_instances - ) - - # Unbind mesh buffers - primitive._unbind() - - def _bind_lighting(self, scene, program, node, flags): - """Bind all lighting uniform values for a scene. - """ - max_n_lights = self._compute_max_n_lights(flags) - - n_d = min(len(scene.directional_light_nodes), max_n_lights[0]) - n_s = min(len(scene.spot_light_nodes), max_n_lights[1]) - n_p = min(len(scene.point_light_nodes), max_n_lights[2]) - program.set_uniform('ambient_light', scene.ambient_light) - program.set_uniform('n_directional_lights', n_d) - program.set_uniform('n_spot_lights', n_s) - program.set_uniform('n_point_lights', n_p) - plc = 0 - slc = 0 - dlc = 0 - - light_nodes = scene.light_nodes - if (len(scene.directional_light_nodes) > max_n_lights[0] or - len(scene.spot_light_nodes) > max_n_lights[1] or - len(scene.point_light_nodes) > max_n_lights[2]): - light_nodes = self._sorted_nodes_by_distance( - scene, scene.light_nodes, node - ) - - for n in light_nodes: - light = n.light - pose = scene.get_pose(n) - position = pose[:3,3] - direction = -pose[:3,2] - - if isinstance(light, PointLight): - if plc == max_n_lights[2]: - continue - b = 'point_lights[{}].'.format(plc) - plc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_POINT) - program.set_uniform(b + 'position', position) - elif isinstance(light, SpotLight): - if slc == max_n_lights[1]: - continue - b = 'spot_lights[{}].'.format(slc) - slc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_SPOT) - las = 1.0 / max(0.001, np.cos(light.innerConeAngle) - - np.cos(light.outerConeAngle)) - lao = -np.cos(light.outerConeAngle) * las - program.set_uniform(b + 'direction', direction) - program.set_uniform(b + 'position', position) - program.set_uniform(b + 'light_angle_scale', las) - program.set_uniform(b + 'light_angle_offset', lao) - else: - if dlc == max_n_lights[0]: - continue - b = 'directional_lights[{}].'.format(dlc) - dlc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_DIRECTIONAL) - program.set_uniform(b + 'direction', direction) - - program.set_uniform(b + 'color', light.color) - program.set_uniform(b + 'intensity', light.intensity) - # if light.range is not None: - # program.set_uniform(b + 'range', light.range) - # else: - # program.set_uniform(b + 'range', 0) - - if shadow: - self._bind_texture(light.shadow_texture, - b + 'shadow_map', program) - if not isinstance(light, PointLight): - V, P = self._get_light_cam_matrices(scene, n, flags) - program.set_uniform(b + 'light_matrix', P.dot(V)) - else: - raise NotImplementedError( - 'Point light shadows not implemented' - ) - - def _sorted_mesh_nodes(self, scene): - cam_loc = scene.get_pose(scene.main_camera_node)[:3,3] - solid_nodes = [] - trans_nodes = [] - for node in scene.mesh_nodes: - mesh = node.mesh - if mesh.is_transparent: - trans_nodes.append(node) - else: - solid_nodes.append(node) - - # TODO BETTER SORTING METHOD - trans_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - solid_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - - return solid_nodes + trans_nodes - - def _sorted_nodes_by_distance(self, scene, nodes, compare_node): - nodes = list(nodes) - compare_posn = scene.get_pose(compare_node)[:3,3] - nodes.sort(key=lambda n: np.linalg.norm( - scene.get_pose(n)[:3,3] - compare_posn) - ) - return nodes - - ########################################################################### - # Context Management - ########################################################################### - - def _update_context(self, scene, flags): - - # Update meshes - scene_meshes = scene.meshes - - # Add new meshes to context - for mesh in scene_meshes - self._meshes: - for p in mesh.primitives: - p._add_to_context() - - # Remove old meshes from context - for mesh in self._meshes - scene_meshes: - for p in mesh.primitives: - p.delete() - - self._meshes = scene_meshes.copy() - - # Update mesh textures - mesh_textures = set() - for m in scene_meshes: - for p in m.primitives: - mesh_textures |= p.material.textures - - # Add new textures to context - for texture in mesh_textures - self._mesh_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._mesh_textures - mesh_textures: - texture.delete() - - self._mesh_textures = mesh_textures.copy() - - shadow_textures = set() - for l in scene.lights: - # Create if needed - active = False - if (isinstance(l, DirectionalLight) and - flags & RenderFlags.SHADOWS_DIRECTIONAL): - active = True - elif (isinstance(l, PointLight) and - flags & RenderFlags.SHADOWS_POINT): - active = True - elif isinstance(l, SpotLight) and flags & RenderFlags.SHADOWS_SPOT: - active = True - - if active and l.shadow_texture is None: - l._generate_shadow_texture() - if l.shadow_texture is not None: - shadow_textures.add(l.shadow_texture) - - # Add new textures to context - for texture in shadow_textures - self._shadow_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._shadow_textures - shadow_textures: - texture.delete() - - self._shadow_textures = shadow_textures.copy() - - ########################################################################### - # Texture Management - ########################################################################### - - def _bind_texture(self, texture, uniform_name, program): - """Bind a texture to an active texture unit and return - the texture unit index that was used. - """ - tex_id = self._get_next_active_texture() - glActiveTexture(GL_TEXTURE0 + tex_id) - texture._bind() - program.set_uniform(uniform_name, tex_id) - - def _get_next_active_texture(self): - val = self._texture_alloc_idx - self._texture_alloc_idx += 1 - return val - - def _reset_active_textures(self): - self._texture_alloc_idx = 0 - - ########################################################################### - # Camera Matrix Management - ########################################################################### - - def _get_camera_matrices(self, scene): - main_camera_node = scene.main_camera_node - if main_camera_node is None: - raise ValueError('Cannot render scene without a camera') - P = main_camera_node.camera.get_projection_matrix( - width=self.viewport_width, height=self.viewport_height - ) - pose = scene.get_pose(main_camera_node) - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - def _get_light_cam_matrices(self, scene, light_node, flags): - light = light_node.light - pose = scene.get_pose(light_node).copy() - s = scene.scale - camera = light._get_shadow_camera(s) - P = camera.get_projection_matrix() - if isinstance(light, DirectionalLight): - direction = -pose[:3,2] - c = scene.centroid - loc = c - direction * s - pose[:3,3] = loc - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - ########################################################################### - # Shader Program Management - ########################################################################### - - def _get_text_program(self): - program = self._program_cache.get_program( - vertex_shader='text.vert', - fragment_shader='text.frag' - ) - - if not program._in_context(): - program._add_to_context() - - return program - - def _compute_max_n_lights(self, flags): - max_n_lights = [MAX_N_LIGHTS, MAX_N_LIGHTS, MAX_N_LIGHTS] - n_tex_units = glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS) - - # Reserved texture units: 6 - # Normal Map - # Occlusion Map - # Emissive Map - # Base Color or Diffuse Map - # MR or SG Map - # Environment cubemap - - n_reserved_textures = 6 - n_available_textures = n_tex_units - n_reserved_textures - - # Distribute textures evenly among lights with shadows, with - # a preference for directional lights - n_shadow_types = 0 - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_SPOT: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_POINT: - n_shadow_types += 1 - - if n_shadow_types > 0: - tex_per_light = n_available_textures // n_shadow_types - - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - max_n_lights[0] = ( - tex_per_light + - (n_available_textures - tex_per_light * n_shadow_types) - ) - if flags & RenderFlags.SHADOWS_SPOT: - max_n_lights[1] = tex_per_light - if flags & RenderFlags.SHADOWS_POINT: - max_n_lights[2] = tex_per_light - - return max_n_lights - - def _get_primitive_program(self, primitive, flags, program_flags): - vertex_shader = None - fragment_shader = None - geometry_shader = None - defines = {} - - if (bool(program_flags & ProgramFlags.USE_MATERIAL) and - not flags & RenderFlags.DEPTH_ONLY and - not flags & RenderFlags.FLAT and - not flags & RenderFlags.SEG): - vertex_shader = 'mesh.vert' - fragment_shader = 'mesh.frag' - elif bool(program_flags & (ProgramFlags.VERTEX_NORMALS | - ProgramFlags.FACE_NORMALS)): - vertex_shader = 'vertex_normals.vert' - if primitive.mode == GLTF.POINTS: - geometry_shader = 'vertex_normals_pc.geom' - else: - geometry_shader = 'vertex_normals.geom' - fragment_shader = 'vertex_normals.frag' - elif flags & RenderFlags.FLAT: - vertex_shader = 'flat.vert' - fragment_shader = 'flat.frag' - elif flags & RenderFlags.SEG: - vertex_shader = 'segmentation.vert' - fragment_shader = 'segmentation.frag' - else: - vertex_shader = 'mesh_depth.vert' - fragment_shader = 'mesh_depth.frag' - - # Set up vertex buffer DEFINES - bf = primitive.buf_flags - buf_idx = 1 - if bf & BufFlags.NORMAL: - defines['NORMAL_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TANGENT: - defines['TANGENT_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_0: - defines['TEXCOORD_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_1: - defines['TEXCOORD_1_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.COLOR_0: - defines['COLOR_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.JOINTS_0: - defines['JOINTS_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.WEIGHTS_0: - defines['WEIGHTS_0_LOC'] = buf_idx - buf_idx += 1 - defines['INST_M_LOC'] = buf_idx - - # Set up shadow mapping defines - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - defines['DIRECTIONAL_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_SPOT: - defines['SPOT_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_POINT: - defines['POINT_LIGHT_SHADOWS'] = 1 - max_n_lights = self._compute_max_n_lights(flags) - defines['MAX_DIRECTIONAL_LIGHTS'] = max_n_lights[0] - defines['MAX_SPOT_LIGHTS'] = max_n_lights[1] - defines['MAX_POINT_LIGHTS'] = max_n_lights[2] - - # Set up vertex normal defines - if program_flags & ProgramFlags.VERTEX_NORMALS: - defines['VERTEX_NORMALS'] = 1 - if program_flags & ProgramFlags.FACE_NORMALS: - defines['FACE_NORMALS'] = 1 - - # Set up material texture defines - if bool(program_flags & ProgramFlags.USE_MATERIAL): - tf = primitive.material.tex_flags - if tf & TexFlags.NORMAL: - defines['HAS_NORMAL_TEX'] = 1 - if tf & TexFlags.OCCLUSION: - defines['HAS_OCCLUSION_TEX'] = 1 - if tf & TexFlags.EMISSIVE: - defines['HAS_EMISSIVE_TEX'] = 1 - if tf & TexFlags.BASE_COLOR: - defines['HAS_BASE_COLOR_TEX'] = 1 - if tf & TexFlags.METALLIC_ROUGHNESS: - defines['HAS_METALLIC_ROUGHNESS_TEX'] = 1 - if tf & TexFlags.DIFFUSE: - defines['HAS_DIFFUSE_TEX'] = 1 - if tf & TexFlags.SPECULAR_GLOSSINESS: - defines['HAS_SPECULAR_GLOSSINESS_TEX'] = 1 - if isinstance(primitive.material, MetallicRoughnessMaterial): - defines['USE_METALLIC_MATERIAL'] = 1 - elif isinstance(primitive.material, SpecularGlossinessMaterial): - defines['USE_GLOSSY_MATERIAL'] = 1 - - program = self._program_cache.get_program( - vertex_shader=vertex_shader, - fragment_shader=fragment_shader, - geometry_shader=geometry_shader, - defines=defines - ) - - if not program._in_context(): - program._add_to_context() - - return program - - ########################################################################### - # Viewport Management - ########################################################################### - - def _configure_forward_pass_viewport(self, flags): - - # If using offscreen render, bind main framebuffer - if flags & RenderFlags.OFFSCREEN: - self._configure_main_framebuffer() - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - else: - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - - glViewport(0, 0, self.viewport_width, self.viewport_height) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - def _configure_shadow_mapping_viewport(self, light, flags): - self._configure_shadow_framebuffer() - glBindFramebuffer(GL_FRAMEBUFFER, self._shadow_fb) - light.shadow_texture._bind() - light.shadow_texture._bind_as_depth_attachment() - glActiveTexture(GL_TEXTURE0) - light.shadow_texture._bind() - glDrawBuffer(GL_NONE) - glReadBuffer(GL_NONE) - - glClear(GL_DEPTH_BUFFER_BIT) - glViewport(0, 0, SHADOW_TEX_SZ, SHADOW_TEX_SZ) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - glDisable(GL_CULL_FACE) - glDisable(GL_BLEND) - - ########################################################################### - # Framebuffer Management - ########################################################################### - - def _configure_shadow_framebuffer(self): - if self._shadow_fb is None: - self._shadow_fb = glGenFramebuffers(1) - - def _delete_shadow_framebuffer(self): - if self._shadow_fb is not None: - glDeleteFramebuffers(1, [self._shadow_fb]) - - def _configure_main_framebuffer(self): - # If mismatch with prior framebuffer, delete it - if (self._main_fb is not None and - self.viewport_width != self._main_fb_dims[0] or - self.viewport_height != self._main_fb_dims[1]): - self._delete_main_framebuffer() - - # If framebuffer doesn't exist, create it - if self._main_fb is None: - # Generate standard buffer - self._main_cb, self._main_db = glGenRenderbuffers(2) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_RGBA, - self.viewport_width, self.viewport_height - ) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, - self.viewport_width, self.viewport_height - ) - - self._main_fb = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db - ) - - # Generate multisample buffer - self._main_cb_ms, self._main_db_ms = glGenRenderbuffers(2) - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_RGBA, - # self.viewport_width, self.viewport_height - # ) - # glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT24, - # self.viewport_width, self.viewport_height - # ) - # 增加这一行 - num_samples = min(glGetIntegerv(GL_MAX_SAMPLES), 4) # No more than GL_MAX_SAMPLES - - # 其实就是把 4 替换成 num_samples,其余不变 - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_RGBA, self.viewport_width, self.viewport_height) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) # 这行不变 - - # 这一行也是将 4 替换成 num_samples - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_DEPTH_COMPONENT24, self.viewport_width, self.viewport_height) - - self._main_fb_ms = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb_ms - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db_ms - ) - - self._main_fb_dims = (self.viewport_width, self.viewport_height) - - def _delete_main_framebuffer(self): - if self._main_fb is not None: - glDeleteFramebuffers(2, [self._main_fb, self._main_fb_ms]) - if self._main_cb is not None: - glDeleteRenderbuffers(2, [self._main_cb, self._main_cb_ms]) - if self._main_db is not None: - glDeleteRenderbuffers(2, [self._main_db, self._main_db_ms]) - - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - - def _read_main_framebuffer(self, scene, flags): - width, height = self._main_fb_dims[0], self._main_fb_dims[1] - - # Bind framebuffer and blit buffers - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb_ms) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_COLOR_BUFFER_BIT, GL_LINEAR - ) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_DEPTH_BUFFER_BIT, GL_NEAREST - ) - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb) - - # Read depth - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near = scene.main_camera_node.camera.znear - z_far = scene.main_camera_node.camera.zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - if flags & RenderFlags.DEPTH_ONLY: - return depth_im - - # Read color - if flags & RenderFlags.RGBA: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 4)) - else: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im, depth_im - - def _resize_image(self, value, antialias=False): - """If needed, rescale the render for MacOS.""" - img = PIL.Image.fromarray(value) - resample = PIL.Image.NEAREST - if antialias: - resample = PIL.Image.BILINEAR - size = (self.viewport_width // self.dpscale, - self.viewport_height // self.dpscale) - img = img.resize(size, resample=resample) - return np.array(img) - - ########################################################################### - # Shadowmap Debugging - ########################################################################### - - def _forward_pass_no_reset(self, scene, flags): - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Next, bind the lighting - if not flags & RenderFlags.DEPTH_ONLY and not flags & RenderFlags.FLAT: - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _render_light_shadowmaps(self, scene, light_nodes, flags, tile=False): - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - glClearColor(*scene.bg_color) - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - w = self.viewport_width - h = self.viewport_height - - num_nodes = len(light_nodes) - viewport_dims = { - (0, 2): [0, h // 2, w // 2, h], - (1, 2): [w // 2, h // 2, w, h], - (0, 3): [0, h // 2, w // 2, h], - (1, 3): [w // 2, h // 2, w, h], - (2, 3): [0, 0, w // 2, h // 2], - (0, 4): [0, h // 2, w // 2, h], - (1, 4): [w // 2, h // 2, w, h], - (2, 4): [0, 0, w // 2, h // 2], - (3, 4): [w // 2, 0, w, h // 2] - } - - if tile: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(*viewport_dims[(i, num_nodes + 1)]) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - i += 1 - glViewport(*viewport_dims[(i, num_nodes + 1)]) - self._forward_pass_no_reset(scene, flags) - else: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(0, 0, self.viewport_width, self.viewport_height) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - return - - def _get_debug_quad_program(self): - program = self._program_cache.get_program( - vertex_shader='debug_quad.vert', - fragment_shader='debug_quad.frag' - ) - if not program._in_context(): - program._add_to_context() - return program - - def _render_debug_quad(self): - x = glGenVertexArrays(1) - glBindVertexArray(x) - glDrawArrays(GL_TRIANGLES, 0, 6) - glBindVertexArray(0) - glDeleteVertexArrays(1, [x]) diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Trainers/HMNetTrainer.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Trainers/HMNetTrainer.py deleted file mode 100644 index 771e4883f7325e18d99c7ef1686fb1393a36ebe4..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Trainers/HMNetTrainer.py +++ /dev/null @@ -1,689 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -from collections import defaultdict -from datetime import datetime -import os -import sys -import importlib -import json -import random -import numpy as np -import inspect -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.optim.lr_scheduler as lr_scheduler - -from model.third_party.HMNet.Models.Trainers.DistributedTrainer import ( - DistributedTrainer, -) -from model.third_party.HMNet.Models.Trainers.Tasks import Task -from model.third_party.HMNet.Utils.GeneralUtils import ( - AverageMeter, - BaseBatchGen, - bcolors, -) - -from model.third_party.HMNet.DataLoader import iterators - - -class ObjectView(object): - def __init__(self, d): - self.__dict__ = d - - -class WrappedModel(nn.Module): - def __init__(self, model, criterion): - super(WrappedModel, self).__init__() - self.add_module("model", model) - self.add_module("criterion", criterion) - - def forward(self, batch): - output = self.model(batch) - loss = self.criterion(output, batch) - return loss - - -class HMNetTrainer(DistributedTrainer): - """ - The trainer class for HMNet model training (pre-train and fine-tune.) - Its train() and eval() methods are intended to directly called to - start training and evaluation respectively. - - Before running, the trainer must contain proper Task, Criterion, and Optimizer - instances. - - """ - - def __init__(self, opt): - super().__init__(opt) - self.task = Task.setup_task(self.opt["TASK"], self.opt, self.saveFolder) - - def is_gradient_accumulation_boundary(self): - return (self.updates + 1) % self.grad_acc_steps == 0 - - def get_batch_generator(self, dataset_label): - batch_generator = self.task.batch_gen( - self.opt, - dataset_label=dataset_label, - model_config=self.module.config, - tokenizer=self.module.tokenizer, - world_size=self.opt["world_size"], - rank=self.opt["rank"], - seed=self.seed, - ) - if isinstance(batch_generator, BaseBatchGen): - # If it is a wrapper class of an infinibatch iterator, - # get the internal infnitibatch iterator. - batch_generator = batch_generator.iterator - self.log(f"Loaded data on rank {self.opt['rank']}.") - return batch_generator - - def set_up_model(self): - # instantiate module (tokenizer should be contained in module as self.module.tokenizer) - try: - model_module = importlib.import_module( - "model.third_party.HMNet.Models.Networks." + self.opt["MODEL"] - ) - model_class = getattr(model_module, self.opt["MODEL"]) - self.module = model_class(self.opt) - except Exception as e: - self.log(e) - self.log("ERROR: Model {} is unknown".format(self.opt["MODEL"])) - assert False - - # calculate total trainable parameters - pytorch_total_params = sum( - p.numel() for p in self.module.parameters() if p.requires_grad - ) - self.log("Total trainable parameters: {}".format(pytorch_total_params)) - - # instantiate criterion - try: - criterion_module = importlib.import_module( - "model.third_party.HMNet.Models.Criteria." + self.opt["CRITERION"] - ) - criterion_class = getattr(criterion_module, self.opt["CRITERION"]) - self.criterion = criterion_class(self.opt, self.module) - except Exception as e: - self.log(e) - self.log("ERROR: Criterion {} is unknown".format(self.opt["CRITERION"])) - assert False - - self.module.to(self.opt["device"]) - - def get_optimizer_params_config(self, optimizer_class): - optimizer_parameters = {} - sig = inspect.signature(optimizer_class) - for param_name in sig.parameters.keys(): - if param_name == "lr": - optimizer_parameters[param_name] = self.opt["START_LEARNING_RATE"] - if param_name not in ["params", "lr"] and param_name.upper() in self.opt: - optimizer_parameters[param_name] = self.opt[param_name.upper()] - return optimizer_parameters - - def get_lr_scheduler_params_config(self, lr_scheduler_class): - lr_scheduler_parameters = {} - sig = inspect.signature(lr_scheduler_class) - for param_name in sig.parameters.keys(): - if param_name not in ["optimizer"] and param_name.upper() in self.opt: - lr_scheduler_parameters[param_name] = self.opt[param_name.upper()] - return lr_scheduler_parameters - - def set_up_optimizer_and_lr_scheduler(self): - - parameters = self.module.get_training_parameters() - - # instantiate optimizer - try: # first try pytorch native optimizer - optimizer_class = getattr(optim, self.opt["OPTIMIZER"]) - self.log( - "Using pytorch native optimizier: {}".format(self.opt["OPTIMIZER"]) - ) - except: - try: # then try custom optimizer inside Models.Optimizers - optimizer_module = importlib.import_module( - "model.third_party.HMNet.Models.Optimizers." + self.opt["OPTIMIZER"] - ) - optimizer_class = getattr(optimizer_module, self.opt["OPTIMIZER"]) - self.log("Using custom optimizer: {}".format(self.opt["OPTIMIZER"])) - except Exception as e: - self.log(e) - self.log("ERROR: Optimizer {} is unknown".format(self.opt["OPTIMIZER"])) - assert False - - optimizer_parameters = self.get_optimizer_params_config(optimizer_class) - self.log(f"Optimizer parameters: {optimizer_parameters}") - self.optimizer = optimizer_class(parameters, **optimizer_parameters) - self.optimizer.zero_grad() - - # instantiate lr scheduler - try: # first look for pytorch native lr scheduler - lr_scheduler_class = getattr(lr_scheduler, self.opt["LR_SCHEDULER"]) - self.log( - "Using pytorch native lr scheduler: {}".format(self.opt["LR_SCHEDULER"]) - ) - except: - try: # then look for custom lr scheduler inside Models.Optimizers - lr_scheduler_module = importlib.import_module( - "model.third_party.HMNet.Models.Optimizers." - + self.opt["LR_SCHEDULER"] - ) - lr_scheduler_class = getattr( - lr_scheduler_module, self.opt["LR_SCHEDULER"] - ) - self.log( - "Using custom lr scheduler: {}".format(self.opt["LR_SCHEDULER"]) - ) - except Exception as e: - self.log(e) - self.log( - "ERROR: LR Scheduler {} is unknown".format(self.opt["LR_SCHEDULER"]) - ) - assert False - - lr_scheduler_parameters = self.get_lr_scheduler_params_config( - lr_scheduler_class - ) - self.log(f"Lr scheduler parameters: {lr_scheduler_parameters}") - self.lr_scheduler = lr_scheduler_class( - self.optimizer, **lr_scheduler_parameters - ) - - def initialize_fp16_DDP(self): - """ - Wrap the module and criterion to a single network, then depending on the settings, - wrap the network with apex amp module for fp16 training, and wrap the network with - pytorch DDP module for distributed data parallel training - """ - self.network = WrappedModel(self.module, self.criterion) - self.network.to(self.opt["device"]) - - if self.opt["fp16"]: - from apex import amp - - self.network, self.optimizer = amp.initialize( - self.network, self.optimizer, opt_level=self.opt["fp16_opt_level"] - ) - - if self.opt["world_size"] > 1: - self.network = torch.nn.parallel.DistributedDataParallel( - self.network, - device_ids=[self.opt["local_rank"]], - output_device=self.opt["local_rank"], - find_unused_parameters=True, - ) - self.log(f"Wrapped model with DDP on rank {self.opt['rank']}.") - assert self.module is self.network.module.model - else: - assert self.module is self.network.model - - def eval(self): - if self.opt["rank"] == 0: - self.log("-----------------------------------------------") - self.log("Evaluating model ... ") - self.set_up_model() - - for eval_dataset in ["dev", "test"]: - batch_generator_eval = self.get_batch_generator(eval_dataset) - - self.task.evaluator.reset_best_score(set_high=True) - result, score, got_better_score = self.task.evaluator.eval_batches( - self.module, batch_generator_eval, self.saveFolder, eval_dataset - ) - if self.opt["rank"] == 0: - self.log("{0} results breakdown\n{1}".format(eval_dataset, result)) - - def eval_return_results(self): - if self.opt["rank"] == 0: - self.log("-----------------------------------------------") - self.log("Evaluating model ... ") - self.set_up_model() - - for eval_dataset in ["test"]: - batch_generator_eval = self.get_batch_generator(eval_dataset) - - self.task.evaluator.reset_best_score(set_high=True) - result, score, got_better_score = self.task.evaluator.eval_batches( - self.module, batch_generator_eval, self.saveFolder, eval_dataset - ) - if self.opt["rank"] == 0: - self.log("{0} results breakdown\n{1}".format(eval_dataset, result)) - return result - - def train(self): - self.log(f"train on rank {self.opt['rank']}") - if self.opt["rank"] == 0: - self.log("-----------------------------------------------") - self.log("Initializing model...") - - self.set_up_model() # setup self.module as original model - self.network = None - self.train_batch_generator = self.get_batch_generator("train") - if isinstance(self.train_batch_generator, iterators.CheckpointableIterator): - # training batch generator is infinite - self.updates_per_epoch = self.opt["UPDATES_PER_EPOCH"] - else: - self.updates_per_epoch = len(self.train_batch_generator) - self.updates = 0 - self.optim_steps = 0 - self.start_epoch_idx = 0 - self.start_batch_idx = 0 - - self.set_up_optimizer_and_lr_scheduler() - self.initialize_fp16_DDP() - if "RESUME" in self.opt: - # Resume complete training states, including optimizer, lr_scheduler, train batch generator, and updates count - # from the checkpoint location indicated in a .json file - self.load_checkpoint() - - ###################### - # Start the main loop - ###################### - - numEpochs = self.opt["MAX_NUM_EPOCHS"] - self.train_loss = AverageMeter() # track the average training loss - self.acc_loss = 0.0 - # after every 'SAVE_PER_UPDATE_NUM' updates, it will save a checkpoint by setting save_a_checkpoint to True temporarily - save_a_checkpoint = False - for epoch in range(self.start_epoch_idx, numEpochs): - self.current_epoch_idx = epoch - self.log("Epoch {}".format(epoch)) - - startTime = datetime.now() - - for batch_idx, batch in enumerate(self.train_batch_generator): - if self.current_epoch_idx == self.start_epoch_idx: - if isinstance( - self.train_batch_generator, iterators.CheckpointableIterator - ): - batch_idx += self.start_batch_idx - elif batch_idx < self.start_batch_idx: - continue - self.current_batch_idx = batch_idx - - # after every 'SAVE_PER_UPDATE_NUM' updates, save a checkpoint - if ("SAVE_PER_UPDATE_NUM" in self.opt) and ( - self.updates + 1 - ) % self.opt["SAVE_PER_UPDATE_NUM"] == 0: - # Make sure the next update is going to update the weights and zero the gradients, then we can checkpoint - assert self.is_gradient_accumulation_boundary() - save_a_checkpoint = True - - # update - self.update(batch) - - if save_a_checkpoint: - # evaluate at the checkpointed moment, and log the results - if self.task.evaluator is not None: - evaluate_label = "update_" + str(self.updates) - eval_dataset = "dev" - batches = self.get_batch_generator(eval_dataset) - ( - result, - score, - got_better_score, - ) = self.task.evaluator.eval_batches( - self.module, batches, self.saveFolder, evaluate_label - ) - self.tb_log_scalar("Eval/score", score, self.updates) - if got_better_score: - self.log( - "Got new better score on rank-{0} evaluator, at updates {1}".format( - self.opt["rank"], self.updates - ) - ) - self.log( - "Updates {0} - {1}: Current Score: {2:.3f} (best Score: {3:.3f})".format( - self.updates, - eval_dataset, - score, - self.task.evaluator.best_score, - ) - ) - self.log("Current results breakdown\n{0}".format(result)) - self.log( - "Best results breakdown\n{0}".format( - self.task.evaluator.best_res - ) - ) - # save complete training states, including model weights, optimizer, lr_scheduler, batch generator, and updates count - self.save_checkpoint(self.updates) - save_a_checkpoint = False - - # logging - if ( - (batch_idx % 10 == 0) - or (epoch == 0 and batch_idx <= 50) - or "DEBUG" in self.opt - ): - if self.opt["rank"] == 0: - batch_size = batch["encoder_input_ids"].shape[0] - self.log( - "epochs[{0:6}] updates[{1:6}] bsz[{2:d}] train loss[{3:.5f}] avg train loss[{4:.5f}] learning rate[{5:.5e}] remaining[{6}]".format( - epoch, - self.updates, - batch_size, - self.train_loss.val, - self.train_loss.avg, - self.lr_scheduler.get_lr()[0], - str( - (datetime.now() - startTime) - / (batch_idx + 1) - * (self.updates_per_epoch - batch_idx - 1) - ).split(".")[0], - ) - ) - - self.tb_log_scalar( - "Loss/train_val", self.train_loss.val, self.updates - ) - self.tb_log_scalar( - "Loss/train_avg", self.train_loss.avg, self.updates - ) - self.tb_log_scalar( - "Learning Rate/lr", - self.lr_scheduler.get_lr()[0], - self.updates, - ) - - # if "DEBUG" in self.opt and batch_idx > 200: # exist early for DEBUG mode - # break - - if ( - isinstance( - self.train_batch_generator, iterators.CheckpointableIterator - ) - and batch_idx + 1 == self.updates_per_epoch - ): - break - - self.log("This epoch takes" + str(datetime.now() - startTime)) - self.log("PROGRESS: {0:.2f}%".format(100.0 * (epoch + 1) / numEpochs)) - self.log("Config file is at " + self.opt["confFile"]) - - if "DEBUG" in self.opt: # exist early for DEBUG mode - break - - def update(self, batch): - # forward loss, backward propagation, model update, and one step of optimization and lr scheduler - self.network.train() - # put the batch to the device - # @TODO make this more general, maybe have a self.task.move_batch(batch, device) - # so the trainer decides when and where to move batches, and task tells how - if isinstance(batch, tuple): - batch = tuple(t.to(self.opt["device"]) for t in batch) - elif isinstance(batch, list): - batch = [t.to(self.opt["device"]) for t in batch] - elif isinstance(batch, dict): - for k in batch: - if torch.is_tensor(batch[k]): - batch[k] = batch[k].to(self.opt["device"]) - else: - assert torch.is_tensor(batch) - batch = batch.to(self.opt["device"]) - - # determine whether gradient sync can be skiped or not for this update - skip_gradient_sync = False - if self.opt["world_size"] > 1 and not self.is_gradient_accumulation_boundary(): - if not self.opt["fp16"]: - # https://krishansubudhi.github.io/deeplearning/2020/02/06/apex-gradient-accumulation.html - # When using fp16, if we skip grad sync during grad accumulation, the grad sync at the - # grad accumulation boundary cannot properly sync the whole accumulated grad. - # So with fp16 on, we have to sync even if it's not grad accumulation boundary. - if self.high_pytorch_version: - skip_gradient_sync = True - - # forward - if skip_gradient_sync: - with self.network.no_sync(): - loss = self.network(batch) - else: - loss = self.network(batch) - if self.grad_acc_steps > 1: - loss = loss / self.grad_acc_steps - self.acc_loss += loss - # self.log(f"forward() done on rank {self.opt['rank']}") - # print(loss.item()) - - # backward - def backward(loss_tensor): - if self.opt["fp16"]: - from apex import amp - - with amp.scale_loss(loss_tensor, self.optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss_tensor.backward() - - if skip_gradient_sync: - with self.network.no_sync(): - backward(loss) - else: - if "DEBUG" in self.opt and self.opt["rank"] == 0: - self.log( - "Performing synchronized backward at step {0}".format( - self.optim_steps - ) - ) - backward(loss) - # self.log(f"backward() done on rank {self.opt['rank']}") - - # step - if self.is_gradient_accumulation_boundary(): - if self.opt["world_size"] > 1: - # ddp: use all_reduce to sum up values of self.acc_loss over all processes - # the operations happens in place (i.e., the value of self.acc_loss is replaced) and all processes received the updated value - torch.distributed.all_reduce( - self.acc_loss, torch.distributed.ReduceOp.SUM - ) - self.acc_loss /= self.opt["world_size"] - self.train_loss.update(self.acc_loss.data, 1) - self.acc_loss = 0.0 - if "GRAD_CLIPPING" in self.opt: - if self.opt["fp16"]: - from apex import amp - - torch.nn.utils.clip_grad_norm_( - amp.master_params(self.optimizer), self.opt["GRAD_CLIPPING"] - ) - else: - torch.nn.utils.clip_grad_norm_( - self.network.parameters(), self.opt["GRAD_CLIPPING"] - ) - self.optim_steps += 1 - self.optimizer.step() - self.optimizer.zero_grad() - self.lr_scheduler.step() - - self.updates += 1 - # self.log(f"step() done on rank {self.opt['rank']}") - - def save_checkpoint(self, tag): - """ - Save complete training states, including model weights, optimizer, lr_scheduler, - fp16 loss scaler, random state, batch generator, and updates count - Also save a model with save_pretrained API for model transfer - """ - self.log("Saving checkpoint...") - resume_epoch_idx = self.current_epoch_idx - resume_batch_idx = self.current_batch_idx + 1 - if resume_batch_idx == self.updates_per_epoch: - resume_batch_idx = 0 - resume_epoch_idx += 1 - - if self.opt["fp16"]: - from apex import amp - if self.opt["rank"] == 0: - save_dir = os.path.join(self.saveFolder, str(tag)) - os.makedirs(save_dir) - save_path = os.path.join(save_dir, "training_states.pt") - state = { - "network": self.network.state_dict(), - "optimizer": self.optimizer.state_dict(), - "lr_scheduler": self.lr_scheduler.state_dict(), - "amp": amp.state_dict() if self.opt["fp16"] else None, - "optim_steps": self.optim_steps, - "updates": self.updates, - "updates_per_epoch": self.updates_per_epoch, - "start_epoch_idx": resume_epoch_idx, - "start_batch_idx": resume_batch_idx, - } - - torch.save(state, save_path) - if self.opt["world_size"] > 1: - torch.distributed.barrier() - save_dir = os.path.join(self.saveFolder, str(tag)) - assert os.path.isdir(save_dir) - - random_state_path = os.path.join( - save_dir, "random_state_rank_{:04d}".format(self.opt["rank"]) - ) - random_state = { - "random": random.getstate(), - "numpy_random": np.random.get_state(), - "torch_random": torch.get_rng_state(), - "torch_cuda_random": torch.cuda.get_rng_state(device=self.opt["device"]) - if self.use_cuda - else None, - } - torch.save(random_state, random_state_path) - - if isinstance(self.train_batch_generator, iterators.CheckpointableIterator): - # save batch generators for all ranks - batch_generator_file_path = os.path.join( - save_dir, - "batch_generator_checkpoint_rank_{:04d}".format(self.opt["rank"]), - ) - batch_generator_state = self.train_batch_generator.getstate() - torch.save(batch_generator_state, batch_generator_file_path) - else: - self.log( - "Batch generator is not checkpointable. Cannot save to checkpoint." - ) - - if self.opt["rank"] == 0: - self.module.save_pretrained(save_dir) - - if self.opt["rank"] == 0: - # save the latest checkpoint location to json file - checkpoint_location = { - "checkpoint_tag": str(tag), - "checkpoint_path": os.path.relpath( - self.saveFolder, start=self.opt["datadir"] - ), - } - json.dump( - checkpoint_location, - open( - os.path.join( - self.opt["datadir"], - self.opt["basename"] + "_resume_checkpoint.json", - ), - "w", - encoding="utf-8", - ), - ) - self.log(f"Finished saving checkpoint and model to {save_dir}.") - - def load_model(self, model_path): - # Load the model only, without any training states, using the from_pretrained API - self.module = self.module.from_pretrained(model_path) - self.module.to(self.opt["device"]) - - def load_checkpoint(self): - """ - Load complete training states, including model weights, optimizer, lr_scheduler, - fp16 loss scaler, random state, batch generator, and updates count - """ - try: - # load the checkpoint location from json file - checkpoint_location = json.load( - open( - os.path.join( - self.opt["datadir"], - self.opt["basename"] + "_resume_checkpoint.json", - ), - encoding="utf-8", - ) - ) - checkpoint_path = os.path.join( - self.opt["datadir"], - checkpoint_location["checkpoint_path"], - checkpoint_location["checkpoint_tag"], - ) - tag = checkpoint_location["checkpoint_tag"] - if not os.path.isdir(checkpoint_path): - if self.opt["rank"] == 0: - self.log( - "Checkpoint path {} not exist. Continue without loading checkpoint".format( - checkpoint_path - ) - ) - return - except: - if self.opt["rank"] == 0: - self.log( - f"Cannot find checkpoint path from {self.opt['basename']+'_resume_checkpoint.json'}.\n" - f"Make sure {os.path.join(self.opt['datadir'], self.opt['basename']+'_resume_checkpoint.json')} exists.\n" - f"Continue without loading checkpoint" - ) - return - # save a copy of the resumed checkpoint location in the save folder of current run - if self.opt["rank"] == 0: - json.dump( - checkpoint_location, - open( - os.path.join(self.saveFolder, "resumed_checkpoint.json"), - "w", - encoding="utf-8", - ), - ) - - self.log(f"Loading checkpoint from {checkpoint_path}...") - load_path = os.path.join(checkpoint_path, "training_states.pt") - state = torch.load(load_path, map_location=self.opt["device"]) - self.network.load_state_dict(state["network"]) - self.optimizer.load_state_dict(state["optimizer"]) - self.lr_scheduler.load_state_dict(state["lr_scheduler"]) - if self.opt["fp16"]: - from apex import amp - - amp.load_state_dict(state["amp"]) - self.optim_steps = state["optim_steps"] - self.updates = state["updates"] - self.start_epoch_idx = state["start_epoch_idx"] - self.start_batch_idx = state["start_batch_idx"] - assert self.updates_per_epoch == state["updates_per_epoch"] - assert self.start_batch_idx < self.updates_per_epoch - - random_state_path = os.path.join( - checkpoint_path, "random_state_rank_{:04d}".format(self.opt["rank"]) - ) - random_state = torch.load(random_state_path, map_location="cpu") - random.setstate(random_state["random"]) - np.random.set_state(random_state["numpy_random"]) - torch.set_rng_state(random_state["torch_random"]) - if self.use_cuda: - torch.cuda.set_rng_state( - random_state["torch_cuda_random"], device=self.opt["device"] - ) - - if "RESET_DATA_LOADER" not in self.opt and isinstance( - self.train_batch_generator, iterators.CheckpointableIterator - ): - batch_generator_file_path = os.path.join( - checkpoint_path, - "batch_generator_checkpoint_rank_{:04d}".format(self.opt["rank"]), - ) - batch_generator_state = torch.load( - batch_generator_file_path, map_location="cpu" - ) - self.train_batch_generator.setstate(batch_generator_state) - else: - self.log( - "No need to resume batch generator or batch generator is not checkpointable. Didn't load from checkpoint." - ) - self.log(f"Finished loading checkpoint from {checkpoint_path}.") diff --git a/spaces/akhaliq/lama/models/ade20k/segm_lib/utils/data/distributed.py b/spaces/akhaliq/lama/models/ade20k/segm_lib/utils/data/distributed.py deleted file mode 100644 index c3d890e28fd2b9e044bdd9494de4a43ad2471eed..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/models/ade20k/segm_lib/utils/data/distributed.py +++ /dev/null @@ -1,58 +0,0 @@ -import math -import torch -from .sampler import Sampler -from torch.distributed import get_world_size, get_rank - - -class DistributedSampler(Sampler): - """Sampler that restricts data loading to a subset of the dataset. - - It is especially useful in conjunction with - :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each - process can pass a DistributedSampler instance as a DataLoader sampler, - and load a subset of the original dataset that is exclusive to it. - - .. note:: - Dataset is assumed to be of constant size. - - Arguments: - dataset: Dataset used for sampling. - num_replicas (optional): Number of processes participating in - distributed training. - rank (optional): Rank of the current process within num_replicas. - """ - - def __init__(self, dataset, num_replicas=None, rank=None): - if num_replicas is None: - num_replicas = get_world_size() - if rank is None: - rank = get_rank() - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.epoch = 0 - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) - self.total_size = self.num_samples * self.num_replicas - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - indices = list(torch.randperm(len(self.dataset), generator=g)) - - # add extra samples to make it evenly divisible - indices += indices[:(self.total_size - len(indices))] - assert len(indices) == self.total_size - - # subsample - offset = self.num_samples * self.rank - indices = indices[offset:offset + self.num_samples] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch diff --git a/spaces/alamin655/websurfx/src/server/routes/mod.rs b/spaces/alamin655/websurfx/src/server/routes/mod.rs deleted file mode 100644 index 6bc5750121d2976ebb387f0600ae3bbb2a69c0fc..0000000000000000000000000000000000000000 --- a/spaces/alamin655/websurfx/src/server/routes/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! This module provides modules to handle various routes in the search engine website. - -pub mod search; diff --git a/spaces/alibidaran/Davinci_EYE/README.md b/spaces/alibidaran/Davinci_EYE/README.md deleted file mode 100644 index 8a7349c9774115fbcd83019b3f1ffb4afa988da9..0000000000000000000000000000000000000000 --- a/spaces/alibidaran/Davinci_EYE/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Davinci EYE -emoji: 👀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.2.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/dataset.py b/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index cfd01a174978d97180a897e40cb59ecadec1d12e..0000000000000000000000000000000000000000 --- a/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/spaces/alistairmcleay/cambridge-masters-project/src/crazyneuraluser/user_model_code/utils_generation.py b/spaces/alistairmcleay/cambridge-masters-project/src/crazyneuraluser/user_model_code/utils_generation.py deleted file mode 100644 index 1d509d06931620a2f218bc1f29c7f43f209530f0..0000000000000000000000000000000000000000 --- a/spaces/alistairmcleay/cambridge-masters-project/src/crazyneuraluser/user_model_code/utils_generation.py +++ /dev/null @@ -1,210 +0,0 @@ -import torch - -from crazyneuraluser.user_model_code.utils_sgd import add_str, bcolors, wrap_element - - -def find_segment(gen, tag): - assert isinstance(gen, str) - gen = gen.split() - try: - start = gen.index("<{}/>".format(tag)) + 1 - end = gen.index("".format(tag)) - segment = " ".join(gen[start:end]) - except Exception: - print("Missing {} tag in generated sequence".format(tag)) - segment = None - return segment - - -def segment_gen(gen, dial_id): - def _color(_segment): - if tag == "CTX": - _segment = _segment.replace(" ", f"{bcolors.ENDC}") - _segment = _segment.replace(" ", f"{bcolors.ENDC}") - _segment = _segment.replace(" ", f"USR: {bcolors.OKCYAN}") - _segment = _segment.replace(" ", f"SYS: {bcolors.OKBLUE}") - if tag == "SYS_UTT": - _segment = f"{bcolors.OKBLUE}" + _segment + f"{bcolors.ENDC}" - if tag == "USR_UTT": - _segment = f"{bcolors.OKCYAN}" + _segment + f"{bcolors.ENDC}" - if tag in ["SYS_ACT", "USR_ACT", "GOAL"]: - _segment = _segment.replace(" ", f"{bcolors.RED}") - _segment = _segment.replace(" ", f"{bcolors.ENDC}") - _segment = _segment.replace(" ", f"{bcolors.YELLOW}") - _segment = _segment.replace(" ", f"{bcolors.ENDC}") - _segment = _segment.replace(" ", f"{bcolors.GREEN}") - _segment = _segment.replace(" ", f"{bcolors.ENDC}") - if tag == "GOAL": - _segment = _segment.replace( - "", f"{bcolors.UNDERLINE}" - ) - _segment = _segment.replace("", f"{bcolors.ENDC}") - _segment = _segment.replace("", f"{bcolors.UNDERLINE}") - _segment = _segment.replace("", f"{bcolors.ENDC}") - # if tag in ["SNT", "GC"]: - # segment = segment.replace("<{}/> ".format(tag), "<{}/> *".format(tag)) - # segment = segment.replace(" ".format(tag), "* <{}/>".format(tag)) - return _segment - - assert isinstance(gen, str) - print("*** Dial_id: {} ***".format(dial_id)) - for tag in [ - "CTX", - "SYS_UTT", - "SYS_ACT", - "GOAL", - "SNT", - "RA", - "GC", - "USR_ACT", - "USR_UTT", - ]: - segment = find_segment(gen, tag) - if segment is not None: - print('{} -> "{}"'.format(tag, _color(segment))) - else: - print("Fail to find the segment...") - print("GEN:", gen) - print("---" * 30) - input("press any key to continue...") - - -def save_gen(gen, dial_id, container): - output = {"raw_generation": gen} - parsed_generation = {} - - assert isinstance(gen, str) - for tag in [ - "CTX", - "SYS_UTT", - "SYS_ACT", - "GOAL", - "SNT", - "RA", - "GC", - "USR_ACT", - "USR_UTT", - ]: - segment = find_segment(gen, tag) - if segment is not None: - parsed_generation[tag] = segment - else: - print("Fail to parse generation on example {}".format(dial_id)) - parsed_generation[tag] = None - - output["parsed_generation"] = parsed_generation - container[dial_id] = output - - -# def decode(args, batch, model, tokenizer): -# input_ids = batch['input_ids'] -# batch_size, ctx_len = input_ids.size() -# assert batch_size == 1 -# bos_id, eos_id, pad_id, sep_id = tokenizer.convert_tokens_to_ids(['', '', '', '']) -# -# # output size: (B, T) -# output = model.generate(input_ids, max_length=(ctx_len+args.dec_max_len), do_sample=False, -# temperature=args.temperature, use_cache=True, num_beams=args.num_beams, bos_token_id=bos_id, -# eos_token_id=eos_id, pad_token_id=pad_id, early_stopping=True) -# -# gen = tokenizer.decode(output[0]) # include context fed into model -# segment_gen(gen, batch["example_id"][0]) -# return [gen] - - -def prepare_input_ids( - args: object, tokenizer: object, data: object, start_token: object -) -> object: - assert start_token in ["", ""] - input_seq = "" - for key in [ - "CTX", - "SYS_UTT", - "SYS_ACT", - "SNT", - "RA", - "GC", - "GOAL", - ]: # fixed order, consistent between training and inference - if key not in data: - continue - wrap = wrap_element(key, data[key]) - input_seq = add_str(input_seq, wrap) - - input_seq = add_str(input_seq, start_token) - - input_ids = tokenizer(input_seq)["input_ids"] # convert to ids - input_ids = torch.tensor([input_ids]).long().to(args.device) - return input_ids - - -def decode_e2e( - args, batch, model, tokenizer, user_goal=None, prev_usr_act=None, collector=None -): - """decode with predicted sys act, goal can be random or from the corpus""" - assert len(batch["metadata"]) == 1 - context = batch["metadata"][0]["context"] - sys_utt = batch["metadata"][0]["utterances"]["sys"] - bos_id, _, pad_id, sep_id = tokenizer.convert_tokens_to_ids( - ["", "", "", ""] - ) - - # first forward pass - data = {"CTX": context, "SYS_UTT": sys_utt} - start_token, end_token = "", "" - input_ids = prepare_input_ids(args, tokenizer, data, start_token) - eos_id = tokenizer.convert_tokens_to_ids(end_token) - output = model.generate( - input_ids, - max_length=args.dec_max_len, - do_sample=False, - temperature=args.temperature, - use_cache=True, - num_beams=args.num_beams, - bos_token_id=bos_id, - eos_token_id=eos_id, - pad_token_id=pad_id, - early_stopping=True, - ) - gen = tokenizer.decode(output[0]) # include context fed into model - - # parse the first pass prediction - for key in ["SYS_ACT", "SNT", "GC", "RA"]: - value = find_segment(gen, key) - data[key] = value - # print("***** First run generation *****") - # print("SYS_ACT -> {}".format(data["SYS_ACT"])) - # print("FLAGS -> SNT: {}, GC: {}, RA: {} *****".format(data["SNT"], data["GC"], data["RA"])) - # print("********************************") - - # prepare goal - if user_goal is None: # use ground truth goal from corpus - data["GOAL"] = batch["metadata"][0]["goal"] - else: - goal = user_goal.prepare_turn_goal( - prev_usr_act, data["SYS_ACT"], data["SNT"], data["GC"], data["RA"] - ) - data["GOAL"] = goal - - # second forward pass - start_token, end_token = "", "" - input_ids = prepare_input_ids(args, tokenizer, data, start_token) - eos_id = tokenizer.convert_tokens_to_ids(end_token) - output = model.generate( - input_ids, - max_length=args.dec_max_len, - do_sample=False, - temperature=args.temperature, - use_cache=True, - num_beams=args.num_beams, - bos_token_id=bos_id, - eos_token_id=eos_id, - pad_token_id=pad_id, - early_stopping=True, - ) - gen = tokenizer.decode(output[0]) # include context fed into model - if args.eye_browse_output: - segment_gen(gen, batch["example_id"][0]) - else: - save_gen(gen, batch["example_id"][0], collector) - return [gen] diff --git a/spaces/allknowingroger/Image-Models-Test110/README.md b/spaces/allknowingroger/Image-Models-Test110/README.md deleted file mode 100644 index 133e39677134f550e4a1fc1666bff5e90bb4c6b3..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test110/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -duplicated_from: allknowingroger/Image-Models-Test109 ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test26/app.py b/spaces/allknowingroger/Image-Models-Test26/app.py deleted file mode 100644 index 96faabce5821cf0dd9b9a432b74c4357af25e525..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test26/app.py +++ /dev/null @@ -1,143 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "digiplay/2K", - "rigewiof/my-pet-dog", - "digiplay/PrefixRealisticMix_v1", - "rktf/osage-v1-0", - "rktf/osage-v2-0", - "digiplay/SoapMix2.5D_v1", - "akmalinn/monument2", - "Yntec/animeTEN", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/alphunt/diffdock-alphunt-demo/evaluate.py b/spaces/alphunt/diffdock-alphunt-demo/evaluate.py deleted file mode 100644 index 328394c6f6c4ce7057557abdbde70a135801d324..0000000000000000000000000000000000000000 --- a/spaces/alphunt/diffdock-alphunt-demo/evaluate.py +++ /dev/null @@ -1,534 +0,0 @@ -import copy -import os -import torch -import time -from argparse import ArgumentParser, Namespace, FileType -from datetime import datetime -from functools import partial -import numpy as np -import wandb -from biopandas.pdb import PandasPdb -from rdkit import RDLogger -from torch_geometric.loader import DataLoader - -from datasets.pdbbind import PDBBind, read_mol -from utils.diffusion_utils import t_to_sigma as t_to_sigma_compl, get_t_schedule -from utils.sampling import randomize_position, sampling -from utils.utils import get_model, get_symmetry_rmsd, remove_all_hs, read_strings_from_txt, ExponentialMovingAverage -from utils.visualise import PDBFile -from tqdm import tqdm - -RDLogger.DisableLog('rdApp.*') -import yaml - -cache_name = datetime.now().strftime('date%d-%m_time%H-%M-%S.%f') -parser = ArgumentParser() -parser.add_argument('--config', type=FileType(mode='r'), default=None) -parser.add_argument('--model_dir', type=str, default='workdir', help='Path to folder with trained score model and hyperparameters') -parser.add_argument('--ckpt', type=str, default='best_model.pt', help='Checkpoint to use inside the folder') -parser.add_argument('--confidence_model_dir', type=str, default=None, help='Path to folder with trained confidence model and hyperparameters') -parser.add_argument('--confidence_ckpt', type=str, default='best_model.pt', help='Checkpoint to use inside the folder') -parser.add_argument('--affinity_model_dir', type=str, default=None, help='Path to folder with trained affinity model and hyperparameters') -parser.add_argument('--affinity_ckpt', type=str, default='best_model.pt', help='Checkpoint to use inside the folder') -parser.add_argument('--num_cpu', type=int, default=None, help='if this is a number instead of none, the max number of cpus used by torch will be set to this.') -parser.add_argument('--run_name', type=str, default='test', help='') -parser.add_argument('--project', type=str, default='ligbind_inf', help='') -parser.add_argument('--out_dir', type=str, default=None, help='Where to save results to') -parser.add_argument('--batch_size', type=int, default=10, help='Number of poses to sample in parallel') -parser.add_argument('--cache_path', type=str, default='data/cacheNew', help='Folder from where to load/restore cached dataset') -parser.add_argument('--data_dir', type=str, default='data/PDBBind_processed/', help='Folder containing original structures') -parser.add_argument('--split_path', type=str, default='data/splits/timesplit_no_lig_overlap_val', help='Path of file defining the split') -parser.add_argument('--no_model', action='store_true', default=False, help='Whether to return seed conformer without running model') -parser.add_argument('--no_random', action='store_true', default=False, help='Whether to add randomness in diffusion steps') -parser.add_argument('--no_final_step_noise', action='store_true', default=False, help='Whether to add noise after the final step') -parser.add_argument('--ode', action='store_true', default=False, help='Whether to run the probability flow ODE') -parser.add_argument('--wandb', action='store_true', default=False, help='') -parser.add_argument('--inference_steps', type=int, default=20, help='Number of denoising steps') -parser.add_argument('--limit_complexes', type=int, default=0, help='Limit to the number of complexes') -parser.add_argument('--num_workers', type=int, default=1, help='Number of workers for dataset creation') -parser.add_argument('--tqdm', action='store_true', default=False, help='Whether to show progress bar') -parser.add_argument('--save_visualisation', action='store_true', default=False, help='Whether to save visualizations') -parser.add_argument('--samples_per_complex', type=int, default=1, help='Number of poses to sample for each complex') -parser.add_argument('--actual_steps', type=int, default=None, help='') -args = parser.parse_args() - -if args.config: - config_dict = yaml.load(args.config, Loader=yaml.FullLoader) - arg_dict = args.__dict__ - for key, value in config_dict.items(): - if isinstance(value, list): - for v in value: - arg_dict[key].append(v) - else: - arg_dict[key] = value - -if args.out_dir is None: args.out_dir = f'inference_out_dir_not_specified/{args.run_name}' -os.makedirs(args.out_dir, exist_ok=True) -with open(f'{args.model_dir}/model_parameters.yml') as f: - score_model_args = Namespace(**yaml.full_load(f)) - - -if args.confidence_model_dir is not None: - with open(f'{args.confidence_model_dir}/model_parameters.yml') as f: - confidence_args = Namespace(**yaml.full_load(f)) - if not os.path.exists(confidence_args.original_model_dir): - print("Path does not exist: ", confidence_args.original_model_dir) - confidence_args.original_model_dir = os.path.join(*confidence_args.original_model_dir.split('/')[-2:]) - print('instead trying path: ', confidence_args.original_model_dir) - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -test_dataset = PDBBind(transform=None, root=args.data_dir, limit_complexes=args.limit_complexes, - receptor_radius=score_model_args.receptor_radius, - cache_path=args.cache_path, split_path=args.split_path, - remove_hs=score_model_args.remove_hs, max_lig_size=None, - c_alpha_max_neighbors=score_model_args.c_alpha_max_neighbors, - matching=not score_model_args.no_torsion, keep_original=True, - popsize=score_model_args.matching_popsize, - maxiter=score_model_args.matching_maxiter, - all_atoms=score_model_args.all_atoms, - atom_radius=score_model_args.atom_radius, - atom_max_neighbors=score_model_args.atom_max_neighbors, - esm_embeddings_path=score_model_args.esm_embeddings_path, - require_ligand=True, - num_workers=args.num_workers) -test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False) - -if args.confidence_model_dir is not None: - if not (confidence_args.use_original_model_cache or confidence_args.transfer_weights): - # if the confidence model uses the same type of data as the original model then we do not need this dataset and can just use the complexes - print('HAPPENING | confidence model uses different type of graphs than the score model. Loading (or creating if not existing) the data for the confidence model now.') - confidence_test_dataset = PDBBind(transform=None, root=args.data_dir, limit_complexes=args.limit_complexes, - receptor_radius=confidence_args.receptor_radius, - cache_path=args.cache_path, split_path=args.split_path, - remove_hs=confidence_args.remove_hs, max_lig_size=None, c_alpha_max_neighbors=confidence_args.c_alpha_max_neighbors, - matching=not confidence_args.no_torsion, keep_original=True, - popsize=confidence_args.matching_popsize, - maxiter=confidence_args.matching_maxiter, - all_atoms=confidence_args.all_atoms, - atom_radius=confidence_args.atom_radius, - atom_max_neighbors=confidence_args.atom_max_neighbors, - esm_embeddings_path= confidence_args.esm_embeddings_path, require_ligand=True, - num_workers=args.num_workers) - confidence_complex_dict = {d.name: d for d in confidence_test_dataset} - -t_to_sigma = partial(t_to_sigma_compl, args=score_model_args) - -if not args.no_model: - model = get_model(score_model_args, device, t_to_sigma=t_to_sigma, no_parallel=True) - state_dict = torch.load(f'{args.model_dir}/{args.ckpt}', map_location=torch.device('cpu')) - if args.ckpt == 'last_model.pt': - model_state_dict = state_dict['model'] - ema_weights_state = state_dict['ema_weights'] - model.load_state_dict(model_state_dict, strict=True) - ema_weights = ExponentialMovingAverage(model.parameters(), decay=score_model_args.ema_rate) - ema_weights.load_state_dict(ema_weights_state, device=device) - ema_weights.copy_to(model.parameters()) - else: - model.load_state_dict(state_dict, strict=True) - model = model.to(device) - model.eval() - if args.confidence_model_dir is not None: - if confidence_args.transfer_weights: - with open(f'{confidence_args.original_model_dir}/model_parameters.yml') as f: - confidence_model_args = Namespace(**yaml.full_load(f)) - else: - confidence_model_args = confidence_args - - confidence_model = get_model(confidence_model_args, device, t_to_sigma=t_to_sigma, no_parallel=True, - confidence_mode=True) - state_dict = torch.load(f'{args.confidence_model_dir}/{args.confidence_ckpt}', map_location=torch.device('cpu')) - confidence_model.load_state_dict(state_dict, strict=True) - confidence_model = confidence_model.to(device) - confidence_model.eval() - else: - confidence_model = None - confidence_args = None - confidence_model_args = None - - -if args.wandb: - run = wandb.init( - entity='entity', - settings=wandb.Settings(start_method="fork"), - project=args.project, - name=args.run_name, - config=args - ) - -tr_schedule = get_t_schedule(inference_steps=args.inference_steps) -rot_schedule = tr_schedule -tor_schedule = tr_schedule -print('t schedule', tr_schedule) - -rmsds_list, obrmsds, centroid_distances_list, failures, skipped, min_cross_distances_list, base_min_cross_distances_list, confidences_list, names_list = [], [], [], 0, 0, [], [], [], [] -true_affinities_list, pred_affinities_list, run_times, min_self_distances_list, without_rec_overlap_list = [], [], [], [], [] -N = args.samples_per_complex -names_no_rec_overlap = read_strings_from_txt(f'data/splits/timesplit_test_no_rec_overlap') -print('Size of test dataset: ', len(test_dataset)) - -for idx, orig_complex_graph in tqdm(enumerate(test_loader)): - if confidence_model is not None and not (confidence_args.use_original_model_cache or - confidence_args.transfer_weights) and orig_complex_graph.name[0] not in confidence_complex_dict.keys(): - skipped += 1 - print(f"HAPPENING | The confidence dataset did not contain {orig_complex_graph.name[0]}. We are skipping this complex.") - continue - - success = 0 - while not success: # keep trying in case of failure (sometimes stochastic) - try: - success = 1 - data_list = [copy.deepcopy(orig_complex_graph) for _ in range(N)] - randomize_position(data_list, score_model_args.no_torsion, args.no_random, score_model_args.tr_sigma_max) - - pdb = None - if args.save_visualisation: - visualization_list = [] - for idx, graph in enumerate(data_list): - lig = read_mol(args.data_dir, graph['name'][0], remove_hs=score_model_args.remove_hs) - pdb = PDBFile(lig) - pdb.add(lig, 0, 0) - pdb.add((orig_complex_graph['ligand'].pos + orig_complex_graph.original_center).detach().cpu(), 1, 0) - pdb.add((graph['ligand'].pos + graph.original_center).detach().cpu(), part=1, order=1) - visualization_list.append(pdb) - else: - visualization_list = None - - rec_path = os.path.join(args.data_dir, data_list[0]["name"][0], f'{data_list[0]["name"][0]}_protein_processed.pdb') - if not os.path.exists(rec_path): - rec_path = os.path.join(args.data_dir, data_list[0]["name"][0], f'{data_list[0]["name"][0]}_protein_obabel_reduce.pdb') - rec = PandasPdb().read_pdb(rec_path) - rec_df = rec.df['ATOM'] - receptor_pos = rec_df[['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype( - np.float32) - orig_complex_graph.original_center.cpu().numpy() - receptor_pos = np.tile(receptor_pos, (N, 1, 1)) - start_time = time.time() - if not args.no_model: - if confidence_model is not None and not ( - confidence_args.use_original_model_cache or confidence_args.transfer_weights): - confidence_data_list = [copy.deepcopy(confidence_complex_dict[orig_complex_graph.name[0]]) for _ in - range(N)] - else: - confidence_data_list = None - - data_list, confidence = sampling(data_list=data_list, model=model, - inference_steps=args.actual_steps if args.actual_steps is not None else args.inference_steps, - tr_schedule=tr_schedule, rot_schedule=rot_schedule, - tor_schedule=tor_schedule, - device=device, t_to_sigma=t_to_sigma, model_args=score_model_args, - no_random=args.no_random, - ode=args.ode, visualization_list=visualization_list, - confidence_model=confidence_model, - confidence_data_list=confidence_data_list, - confidence_model_args=confidence_model_args, - batch_size=args.batch_size, - no_final_step_noise=args.no_final_step_noise) - - run_times.append(time.time() - start_time) - if score_model_args.no_torsion: orig_complex_graph['ligand'].orig_pos = (orig_complex_graph['ligand'].pos.cpu().numpy() + orig_complex_graph.original_center.cpu().numpy()) - - filterHs = torch.not_equal(data_list[0]['ligand'].x[:, 0], 0).cpu().numpy() - - if isinstance(orig_complex_graph['ligand'].orig_pos, list): - orig_complex_graph['ligand'].orig_pos = orig_complex_graph['ligand'].orig_pos[0] - - ligand_pos = np.asarray( - [complex_graph['ligand'].pos.cpu().numpy()[filterHs] for complex_graph in data_list]) - orig_ligand_pos = np.expand_dims( - orig_complex_graph['ligand'].orig_pos[filterHs] - orig_complex_graph.original_center.cpu().numpy(), - axis=0) - - try: - mol = remove_all_hs(orig_complex_graph.mol[0]) - rmsd = get_symmetry_rmsd(mol, orig_ligand_pos[0], [l for l in ligand_pos]) - except Exception as e: - print("Using non corrected RMSD because of the error", e) - rmsd = np.sqrt(((ligand_pos - orig_ligand_pos) ** 2).sum(axis=2).mean(axis=1)) - rmsds_list.append(rmsd) - centroid_distance = np.linalg.norm(ligand_pos.mean(axis=1) - orig_ligand_pos.mean(axis=1), axis=1) - if confidence is not None and isinstance(confidence_args.rmsd_classification_cutoff, list): - confidence = confidence[:, 0] - if confidence is not None: - confidence = confidence.cpu().numpy() - re_order = np.argsort(confidence)[::-1] - print(orig_complex_graph['name'], ' rmsd', np.around(rmsd, 1)[re_order], ' centroid distance', - np.around(centroid_distance, 1)[re_order], ' confidences ', np.around(confidence, 4)[re_order]) - confidences_list.append(confidence) - else: - print(orig_complex_graph['name'], ' rmsd', np.around(rmsd, 1), ' centroid distance', - np.around(centroid_distance, 1)) - centroid_distances_list.append(centroid_distance) - - cross_distances = np.linalg.norm(receptor_pos[:, :, None, :] - ligand_pos[:, None, :, :], axis=-1) - min_cross_distances_list.append(np.min(cross_distances, axis=(1, 2))) - self_distances = np.linalg.norm(ligand_pos[:, :, None, :] - ligand_pos[:, None, :, :], axis=-1) - self_distances = np.where(np.eye(self_distances.shape[2]), np.inf, self_distances) - min_self_distances_list.append(np.min(self_distances, axis=(1, 2))) - - base_cross_distances = np.linalg.norm(receptor_pos[:, :, None, :] - orig_ligand_pos[:, None, :, :], axis=-1) - base_min_cross_distances_list.append(np.min(base_cross_distances, axis=(1, 2))) - - if args.save_visualisation: - if confidence is not None: - for rank, batch_idx in enumerate(re_order): - visualization_list[batch_idx].write( - f'{args.out_dir}/{data_list[batch_idx]["name"][0]}_{rank + 1}_{rmsd[batch_idx]:.1f}_{(confidence)[batch_idx]:.1f}.pdb') - else: - for rank, batch_idx in enumerate(np.argsort(rmsd)): - visualization_list[batch_idx].write( - f'{args.out_dir}/{data_list[batch_idx]["name"][0]}_{rank + 1}_{rmsd[batch_idx]:.1f}.pdb') - without_rec_overlap_list.append(1 if orig_complex_graph.name[0] in names_no_rec_overlap else 0) - names_list.append(orig_complex_graph.name[0]) - except Exception as e: - print("Failed on", orig_complex_graph["name"], e) - failures += 1 - success = 0 - -print('Performance without hydrogens included in the loss') -print(failures, "failures due to exceptions") -print(skipped, ' skipped because complex was not in confidence dataset') - -performance_metrics = {} -for overlap in ['', 'no_overlap_']: - if 'no_overlap_' == overlap: - without_rec_overlap = np.array(without_rec_overlap_list, dtype=bool) - if without_rec_overlap.sum() == 0: continue - rmsds = np.array(rmsds_list)[without_rec_overlap] - min_self_distances = np.array(min_self_distances_list)[without_rec_overlap] - centroid_distances = np.array(centroid_distances_list)[without_rec_overlap] - confidences = np.array(confidences_list)[without_rec_overlap] - min_cross_distances = np.array(min_cross_distances_list)[without_rec_overlap] - base_min_cross_distances = np.array(base_min_cross_distances_list)[without_rec_overlap] - names = np.array(names_list)[without_rec_overlap] - else: - rmsds = np.array(rmsds_list) - min_self_distances = np.array(min_self_distances_list) - centroid_distances = np.array(centroid_distances_list) - confidences = np.array(confidences_list) - min_cross_distances = np.array(min_cross_distances_list) - base_min_cross_distances = np.array(base_min_cross_distances_list) - names = np.array(names_list) - - run_times = np.array(run_times) - np.save(f'{args.out_dir}/{overlap}min_cross_distances.npy', min_cross_distances) - np.save(f'{args.out_dir}/{overlap}min_self_distances.npy', min_self_distances) - np.save(f'{args.out_dir}/{overlap}base_min_cross_distances.npy', base_min_cross_distances) - np.save(f'{args.out_dir}/{overlap}rmsds.npy', rmsds) - np.save(f'{args.out_dir}/{overlap}centroid_distances.npy', centroid_distances) - np.save(f'{args.out_dir}/{overlap}confidences.npy', confidences) - np.save(f'{args.out_dir}/{overlap}run_times.npy', run_times) - np.save(f'{args.out_dir}/{overlap}complex_names.npy', np.array(names)) - - performance_metrics.update({ - f'{overlap}run_times_std': run_times.std().__round__(2), - f'{overlap}run_times_mean': run_times.mean().__round__(2), - f'{overlap}steric_clash_fraction': ( - 100 * (min_cross_distances < 0.4).sum() / len(min_cross_distances) / N).__round__(2), - f'{overlap}self_intersect_fraction': ( - 100 * (min_self_distances < 0.4).sum() / len(min_self_distances) / N).__round__(2), - f'{overlap}mean_rmsd': rmsds.mean(), - f'{overlap}rmsds_below_2': (100 * (rmsds < 2).sum() / len(rmsds) / N), - f'{overlap}rmsds_below_5': (100 * (rmsds < 5).sum() / len(rmsds) / N), - f'{overlap}rmsds_percentile_25': np.percentile(rmsds, 25).round(2), - f'{overlap}rmsds_percentile_50': np.percentile(rmsds, 50).round(2), - f'{overlap}rmsds_percentile_75': np.percentile(rmsds, 75).round(2), - - f'{overlap}mean_centroid': centroid_distances.mean().__round__(2), - f'{overlap}centroid_below_2': (100 * (centroid_distances < 2).sum() / len(centroid_distances) / N).__round__(2), - f'{overlap}centroid_below_5': (100 * (centroid_distances < 5).sum() / len(centroid_distances) / N).__round__(2), - f'{overlap}centroid_percentile_25': np.percentile(centroid_distances, 25).round(2), - f'{overlap}centroid_percentile_50': np.percentile(centroid_distances, 50).round(2), - f'{overlap}centroid_percentile_75': np.percentile(centroid_distances, 75).round(2), - }) - - if N >= 5: - top5_rmsds = np.min(rmsds[:, :5], axis=1) - top5_centroid_distances = centroid_distances[ - np.arange(rmsds.shape[0])[:, None], np.argsort(rmsds[:, :5], axis=1)][:, 0] - top5_min_cross_distances = min_cross_distances[ - np.arange(rmsds.shape[0])[:, None], np.argsort(rmsds[:, :5], axis=1)][:, 0] - top5_min_self_distances = min_self_distances[ - np.arange(rmsds.shape[0])[:, None], np.argsort(rmsds[:, :5], axis=1)][:, 0] - performance_metrics.update({ - f'{overlap}top5_steric_clash_fraction': ( - 100 * (top5_min_cross_distances < 0.4).sum() / len(top5_min_cross_distances)).__round__(2), - f'{overlap}top5_self_intersect_fraction': ( - 100 * (top5_min_self_distances < 0.4).sum() / len(top5_min_self_distances)).__round__(2), - f'{overlap}top5_rmsds_below_2': (100 * (top5_rmsds < 2).sum() / len(top5_rmsds)).__round__(2), - f'{overlap}top5_rmsds_below_5': (100 * (top5_rmsds < 5).sum() / len(top5_rmsds)).__round__(2), - f'{overlap}top5_rmsds_percentile_25': np.percentile(top5_rmsds, 25).round(2), - f'{overlap}top5_rmsds_percentile_50': np.percentile(top5_rmsds, 50).round(2), - f'{overlap}top5_rmsds_percentile_75': np.percentile(top5_rmsds, 75).round(2), - - f'{overlap}top5_centroid_below_2': ( - 100 * (top5_centroid_distances < 2).sum() / len(top5_centroid_distances)).__round__(2), - f'{overlap}top5_centroid_below_5': ( - 100 * (top5_centroid_distances < 5).sum() / len(top5_centroid_distances)).__round__(2), - f'{overlap}top5_centroid_percentile_25': np.percentile(top5_centroid_distances, 25).round(2), - f'{overlap}top5_centroid_percentile_50': np.percentile(top5_centroid_distances, 50).round(2), - f'{overlap}top5_centroid_percentile_75': np.percentile(top5_centroid_distances, 75).round(2), - }) - - if N >= 10: - top10_rmsds = np.min(rmsds[:, :10], axis=1) - top10_centroid_distances = centroid_distances[ - np.arange(rmsds.shape[0])[:, None], np.argsort(rmsds[:, :10], axis=1)][:, 0] - top10_min_cross_distances = min_cross_distances[ - np.arange(rmsds.shape[0])[:, None], np.argsort(rmsds[:, :10], axis=1)][:, 0] - top10_min_self_distances = min_self_distances[ - np.arange(rmsds.shape[0])[:, None], np.argsort(rmsds[:, :10], axis=1)][:, 0] - performance_metrics.update({ - f'{overlap}top10_steric_clash_fraction': ( - 100 * (top10_min_cross_distances < 0.4).sum() / len(top10_min_cross_distances)).__round__(2), - f'{overlap}top10_self_intersect_fraction': ( - 100 * (top10_min_self_distances < 0.4).sum() / len(top10_min_self_distances)).__round__(2), - f'{overlap}top10_rmsds_below_2': (100 * (top10_rmsds < 2).sum() / len(top10_rmsds)).__round__(2), - f'{overlap}top10_rmsds_below_5': (100 * (top10_rmsds < 5).sum() / len(top10_rmsds)).__round__(2), - f'{overlap}top10_rmsds_percentile_25': np.percentile(top10_rmsds, 25).round(2), - f'{overlap}top10_rmsds_percentile_50': np.percentile(top10_rmsds, 50).round(2), - f'{overlap}top10_rmsds_percentile_75': np.percentile(top10_rmsds, 75).round(2), - - f'{overlap}top10_centroid_below_2': ( - 100 * (top10_centroid_distances < 2).sum() / len(top10_centroid_distances)).__round__(2), - f'{overlap}top10_centroid_below_5': ( - 100 * (top10_centroid_distances < 5).sum() / len(top10_centroid_distances)).__round__(2), - f'{overlap}top10_centroid_percentile_25': np.percentile(top10_centroid_distances, 25).round(2), - f'{overlap}top10_centroid_percentile_50': np.percentile(top10_centroid_distances, 50).round(2), - f'{overlap}top10_centroid_percentile_75': np.percentile(top10_centroid_distances, 75).round(2), - }) - - if confidence_model is not None: - confidence_ordering = np.argsort(confidences, axis=1)[:, ::-1] - - filtered_rmsds = rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, 0] - filtered_centroid_distances = centroid_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, 0] - filtered_min_cross_distances = min_cross_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, - 0] - filtered_min_self_distances = min_self_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, 0] - performance_metrics.update({ - f'{overlap}filtered_self_intersect_fraction': ( - 100 * (filtered_min_self_distances < 0.4).sum() / len(filtered_min_self_distances)).__round__( - 2), - f'{overlap}filtered_steric_clash_fraction': ( - 100 * (filtered_min_cross_distances < 0.4).sum() / len(filtered_min_cross_distances)).__round__( - 2), - f'{overlap}filtered_rmsds_below_2': (100 * (filtered_rmsds < 2).sum() / len(filtered_rmsds)).__round__(2), - f'{overlap}filtered_rmsds_below_5': (100 * (filtered_rmsds < 5).sum() / len(filtered_rmsds)).__round__(2), - f'{overlap}filtered_rmsds_percentile_25': np.percentile(filtered_rmsds, 25).round(2), - f'{overlap}filtered_rmsds_percentile_50': np.percentile(filtered_rmsds, 50).round(2), - f'{overlap}filtered_rmsds_percentile_75': np.percentile(filtered_rmsds, 75).round(2), - - f'{overlap}filtered_centroid_below_2': ( - 100 * (filtered_centroid_distances < 2).sum() / len(filtered_centroid_distances)).__round__(2), - f'{overlap}filtered_centroid_below_5': ( - 100 * (filtered_centroid_distances < 5).sum() / len(filtered_centroid_distances)).__round__(2), - f'{overlap}filtered_centroid_percentile_25': np.percentile(filtered_centroid_distances, 25).round(2), - f'{overlap}filtered_centroid_percentile_50': np.percentile(filtered_centroid_distances, 50).round(2), - f'{overlap}filtered_centroid_percentile_75': np.percentile(filtered_centroid_distances, 75).round(2), - }) - - if N >= 5: - top5_filtered_rmsds = np.min(rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5], axis=1) - top5_filtered_centroid_distances = \ - centroid_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5][ - np.arange(rmsds.shape[0])[:, None], np.argsort( - rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5], axis=1)][:, 0] - top5_filtered_min_cross_distances = \ - min_cross_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5][ - np.arange(rmsds.shape[0])[:, None], np.argsort( - rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5], axis=1)][:, 0] - top5_filtered_min_self_distances = \ - min_self_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5][ - np.arange(rmsds.shape[0])[:, None], np.argsort( - rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :5], axis=1)][:, 0] - performance_metrics.update({ - f'{overlap}top5_filtered_self_intersect_fraction': ( - 100 * (top5_filtered_min_cross_distances < 0.4).sum() / len( - top5_filtered_min_cross_distances)).__round__(2), - f'{overlap}top5_filtered_steric_clash_fraction': ( - 100 * (top5_filtered_min_cross_distances < 0.4).sum() / len( - top5_filtered_min_cross_distances)).__round__(2), - f'{overlap}top5_filtered_rmsds_below_2': ( - 100 * (top5_filtered_rmsds < 2).sum() / len(top5_filtered_rmsds)).__round__(2), - f'{overlap}top5_filtered_rmsds_below_5': ( - 100 * (top5_filtered_rmsds < 5).sum() / len(top5_filtered_rmsds)).__round__(2), - f'{overlap}top5_filtered_rmsds_percentile_25': np.percentile(top5_filtered_rmsds, 25).round(2), - f'{overlap}top5_filtered_rmsds_percentile_50': np.percentile(top5_filtered_rmsds, 50).round(2), - f'{overlap}top5_filtered_rmsds_percentile_75': np.percentile(top5_filtered_rmsds, 75).round(2), - - f'{overlap}top5_filtered_centroid_below_2': (100 * (top5_filtered_centroid_distances < 2).sum() / len( - top5_filtered_centroid_distances)).__round__(2), - f'{overlap}top5_filtered_centroid_below_5': (100 * (top5_filtered_centroid_distances < 5).sum() / len( - top5_filtered_centroid_distances)).__round__(2), - f'{overlap}top5_filtered_centroid_percentile_25': np.percentile(top5_filtered_centroid_distances, - 25).round(2), - f'{overlap}top5_filtered_centroid_percentile_50': np.percentile(top5_filtered_centroid_distances, - 50).round(2), - f'{overlap}top5_filtered_centroid_percentile_75': np.percentile(top5_filtered_centroid_distances, - 75).round(2), - }) - if N >= 10: - top10_filtered_rmsds = np.min(rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10], - axis=1) - top10_filtered_centroid_distances = \ - centroid_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10][ - np.arange(rmsds.shape[0])[:, None], np.argsort( - rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10], axis=1)][:, 0] - top10_filtered_min_cross_distances = \ - min_cross_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10][ - np.arange(rmsds.shape[0])[:, None], np.argsort( - rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10], axis=1)][:, 0] - top10_filtered_min_self_distances = \ - min_self_distances[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10][ - np.arange(rmsds.shape[0])[:, None], np.argsort( - rmsds[np.arange(rmsds.shape[0])[:, None], confidence_ordering][:, :10], axis=1)][:, 0] - performance_metrics.update({ - f'{overlap}top10_filtered_self_intersect_fraction': ( - 100 * (top10_filtered_min_cross_distances < 0.4).sum() / len( - top10_filtered_min_cross_distances)).__round__(2), - f'{overlap}top10_filtered_steric_clash_fraction': ( - 100 * (top10_filtered_min_cross_distances < 0.4).sum() / len( - top10_filtered_min_cross_distances)).__round__(2), - f'{overlap}top10_filtered_rmsds_below_2': ( - 100 * (top10_filtered_rmsds < 2).sum() / len(top10_filtered_rmsds)).__round__(2), - f'{overlap}top10_filtered_rmsds_below_5': ( - 100 * (top10_filtered_rmsds < 5).sum() / len(top10_filtered_rmsds)).__round__(2), - f'{overlap}top10_filtered_rmsds_percentile_25': np.percentile(top10_filtered_rmsds, 25).round(2), - f'{overlap}top10_filtered_rmsds_percentile_50': np.percentile(top10_filtered_rmsds, 50).round(2), - f'{overlap}top10_filtered_rmsds_percentile_75': np.percentile(top10_filtered_rmsds, 75).round(2), - - f'{overlap}top10_filtered_centroid_below_2': (100 * (top10_filtered_centroid_distances < 2).sum() / len( - top10_filtered_centroid_distances)).__round__(2), - f'{overlap}top10_filtered_centroid_below_5': (100 * (top10_filtered_centroid_distances < 5).sum() / len( - top10_filtered_centroid_distances)).__round__(2), - f'{overlap}top10_filtered_centroid_percentile_25': np.percentile(top10_filtered_centroid_distances, - 25).round(2), - f'{overlap}top10_filtered_centroid_percentile_50': np.percentile(top10_filtered_centroid_distances, - 50).round(2), - f'{overlap}top10_filtered_centroid_percentile_75': np.percentile(top10_filtered_centroid_distances, - 75).round(2), - }) - -for k in performance_metrics: - print(k, performance_metrics[k]) - -if args.wandb: - wandb.log(performance_metrics) - histogram_metrics_list = [('rmsd', rmsds[:, 0]), - ('centroid_distance', centroid_distances[:, 0]), - ('mean_rmsd', rmsds.mean(axis=1)), - ('mean_centroid_distance', centroid_distances.mean(axis=1))] - if N >= 5: - histogram_metrics_list.append(('top5_rmsds', top5_rmsds)) - histogram_metrics_list.append(('top5_centroid_distances', top5_centroid_distances)) - if N >= 10: - histogram_metrics_list.append(('top10_rmsds', top10_rmsds)) - histogram_metrics_list.append(('top10_centroid_distances', top10_centroid_distances)) - if confidence_model is not None: - histogram_metrics_list.append(('filtered_rmsd', filtered_rmsds)) - histogram_metrics_list.append(('filtered_centroid_distance', filtered_centroid_distances)) - if N >= 5: - histogram_metrics_list.append(('top5_filtered_rmsds', top5_filtered_rmsds)) - histogram_metrics_list.append(('top5_filtered_centroid_distances', top5_filtered_centroid_distances)) - if N >= 10: - histogram_metrics_list.append(('top10_filtered_rmsds', top10_filtered_rmsds)) - histogram_metrics_list.append(('top10_filtered_centroid_distances', top10_filtered_centroid_distances)) diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_x86_plain_converters.c b/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_x86_plain_converters.c deleted file mode 100644 index 109699427237c58b0b13dc31050aca605276a153..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_x86_plain_converters.c +++ /dev/null @@ -1,1218 +0,0 @@ -/* - * Plain Intel IA32 assembly implementations of PortAudio sample converter functions. - * Copyright (c) 1999-2002 Ross Bencina, Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file - @ingroup win_src -*/ - -#include "pa_x86_plain_converters.h" - -#include "pa_converters.h" -#include "pa_dither.h" - -/* - the main reason these versions are faster than the equivalent C versions - is that float -> int casting is expensive in C on x86 because the rounding - mode needs to be changed for every cast. these versions only set - the rounding mode once outside the loop. - - small additional speed gains are made by the way that clamping is - implemented. - -TODO: - o- inline dither code - o- implement Dither only (no-clip) versions - o- implement int8 and uint8 versions - o- test thoroughly - - o- the packed 24 bit functions could benefit from unrolling and avoiding - byte and word sized register access. -*/ - -/* -------------------------------------------------------------------------- */ - -/* -#define PA_CLIP_( val, min, max )\ - { val = ((val) < (min)) ? (min) : (((val) > (max)) ? (max) : (val)); } -*/ - -/* - the following notes were used to determine whether a floating point - value should be saturated (ie >1 or <-1) by loading it into an integer - register. these should be rewritten so that they make sense. - - an ieee floating point value - - 1.xxxxxxxxxxxxxxxxxxxx? - - - is less than or equal to 1 and greater than or equal to -1 either: - - if the mantissa is 0 and the unbiased exponent is 0 - - OR - - if the unbiased exponent < 0 - - this translates to: - - if the mantissa is 0 and the biased exponent is 7F - - or - - if the biased exponent is less than 7F - - - therefore the value is greater than 1 or less than -1 if - - the mantissa is not 0 and the biased exponent is 7F - - or - - if the biased exponent is greater than 7F - - - in other words, if we mask out the sign bit, the value is - greater than 1 or less than -1 if its integer representation is greater than: - - 0 01111111 0000 0000 0000 0000 0000 000 - - 0011 1111 1000 0000 0000 0000 0000 0000 => 0x3F800000 -*/ - -#if defined(_WIN64) || defined(_WIN32_WCE) - -/* - -EMT64/AMD64 uses different asm - -VC2005 doesn't allow _WIN64 with inline assembly either! - */ -void PaUtil_InitializeX86PlainConverters( void ) -{ -} - -#else - -/* -------------------------------------------------------------------------- */ - -static const short fpuControlWord_ = 0x033F; /*round to nearest, 64 bit precision, all exceptions masked*/ -static const double int32Scaler_ = 0x7FFFFFFF; -static const double ditheredInt32Scaler_ = 0x7FFFFFFE; -static const double int24Scaler_ = 0x7FFFFF; -static const double ditheredInt24Scaler_ = 0x7FFFFE; -static const double int16Scaler_ = 0x7FFF; -static const double ditheredInt16Scaler_ = 0x7FFE; - -#define PA_DITHER_BITS_ (15) -/* Multiply by PA_FLOAT_DITHER_SCALE_ to get a float between -2.0 and +1.99999 */ -#define PA_FLOAT_DITHER_SCALE_ (1.0F / ((1< source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 and int32 - mov eax, sourceStride - imul eax, edx - - mov ecx, count - imul ecx, eax - add ecx, esi - - mov edi, destinationBuffer - - mov ebx, destinationStride - imul ebx, edx - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld int32Scaler_ // stack: (int)0x7FFFFFFF - - Float32_To_Int32_loop: - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, (int)0x7FFFFFFF - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*0x7FFFFFFF, (int)0x7FFFFFFF - /* - note: we could store to a temporary qword here which would cause - wraparound distortion instead of int indefinite 0x10. that would - be more work, and given that not enabling clipping is only advisable - when you know that your signal isn't going to clip it isn't worth it. - */ - fistp dword ptr [edi] // pop st(0) into dest, stack: (int)0x7FFFFFFF - - add edi, ebx // increment destination ptr - //lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int32_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int32_Clip( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - signed long *dest = (signed long*)destinationBuffer; - (void) ditherGenerator; // unused parameter - - while( count-- ) - { - // REVIEW - double scaled = *src * 0x7FFFFFFF; - PA_CLIP_( scaled, -2147483648., 2147483647. ); - *dest = (signed long) scaled; - - src += sourceStride; - dest += destinationStride; - } -*/ - - short savedFpuControlWord; - - (void) ditherGenerator; /* unused parameter */ - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 and int32 - mov eax, sourceStride - imul eax, edx - - mov ecx, count - imul ecx, eax - add ecx, esi - - mov edi, destinationBuffer - - mov ebx, destinationStride - imul ebx, edx - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld int32Scaler_ // stack: (int)0x7FFFFFFF - - Float32_To_Int32_Clip_loop: - - mov edx, dword ptr [esi] // load floating point value into integer register - - and edx, 0x7FFFFFFF // mask off sign - cmp edx, 0x3F800000 // greater than 1.0 or less than -1.0 - - jg Float32_To_Int32_Clip_clamp - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, (int)0x7FFFFFFF - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*0x7FFFFFFF, (int)0x7FFFFFFF - fistp dword ptr [edi] // pop st(0) into dest, stack: (int)0x7FFFFFFF - jmp Float32_To_Int32_Clip_stored - - Float32_To_Int32_Clip_clamp: - mov edx, dword ptr [esi] // load floating point value into integer register - shr edx, 31 // move sign bit into bit 0 - add esi, eax // increment source ptr - //lea esi, [esi+eax] - add edx, 0x7FFFFFFF // convert to maximum range integers - mov dword ptr [edi], edx - - Float32_To_Int32_Clip_stored: - - //add edi, ebx // increment destination ptr - lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int32_Clip_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int32_DitherClip( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ - /* - float *src = (float*)sourceBuffer; - signed long *dest = (signed long*)destinationBuffer; - - while( count-- ) - { - // REVIEW - double dither = PaUtil_GenerateFloatTriangularDither( ditherGenerator ); - // use smaller scaler to prevent overflow when we add the dither - double dithered = ((double)*src * (2147483646.0)) + dither; - PA_CLIP_( dithered, -2147483648., 2147483647. ); - *dest = (signed long) dithered; - - - src += sourceStride; - dest += destinationStride; - } - */ - - short savedFpuControlWord; - - // spill storage: - signed long sourceByteStride; - signed long highpassedDither; - - // dither state: - unsigned long ditherPrevious = ditherGenerator->previous; - unsigned long ditherRandSeed1 = ditherGenerator->randSeed1; - unsigned long ditherRandSeed2 = ditherGenerator->randSeed2; - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 and int32 - mov eax, sourceStride - imul eax, edx - - mov ecx, count - imul ecx, eax - add ecx, esi - - mov edi, destinationBuffer - - mov ebx, destinationStride - imul ebx, edx - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld ditheredInt32Scaler_ // stack: int scaler - - Float32_To_Int32_DitherClip_loop: - - mov edx, dword ptr [esi] // load floating point value into integer register - - and edx, 0x7FFFFFFF // mask off sign - cmp edx, 0x3F800000 // greater than 1.0 or less than -1.0 - - jg Float32_To_Int32_DitherClip_clamp - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, int scaler - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*(int scaler), int scaler - - /* - // call PaUtil_GenerateFloatTriangularDither with C calling convention - mov sourceByteStride, eax // save eax - mov sourceEnd, ecx // save ecx - push ditherGenerator // pass ditherGenerator parameter on stack - call PaUtil_GenerateFloatTriangularDither // stack: dither, value*(int scaler), int scaler - pop edx // clear parameter off stack - mov ecx, sourceEnd // restore ecx - mov eax, sourceByteStride // restore eax - */ - - // generate dither - mov sourceByteStride, eax // save eax - mov edx, 196314165 - mov eax, ditherRandSeed1 - mul edx // eax:edx = eax * 196314165 - //add eax, 907633515 - lea eax, [eax+907633515] - mov ditherRandSeed1, eax - mov edx, 196314165 - mov eax, ditherRandSeed2 - mul edx // eax:edx = eax * 196314165 - //add eax, 907633515 - lea eax, [eax+907633515] - mov edx, ditherRandSeed1 - shr edx, PA_DITHER_SHIFT_ - mov ditherRandSeed2, eax - shr eax, PA_DITHER_SHIFT_ - //add eax, edx // eax -> current - lea eax, [eax+edx] - mov edx, ditherPrevious - neg edx - lea edx, [eax+edx] // highpass = current - previous - mov highpassedDither, edx - mov ditherPrevious, eax // previous = current - mov eax, sourceByteStride // restore eax - fild highpassedDither - fmul const_float_dither_scale_ - // end generate dither, dither signal in st(0) - - faddp st(1), st(0) // stack: dither + value*(int scaler), int scaler - fistp dword ptr [edi] // pop st(0) into dest, stack: int scaler - jmp Float32_To_Int32_DitherClip_stored - - Float32_To_Int32_DitherClip_clamp: - mov edx, dword ptr [esi] // load floating point value into integer register - shr edx, 31 // move sign bit into bit 0 - add esi, eax // increment source ptr - //lea esi, [esi+eax] - add edx, 0x7FFFFFFF // convert to maximum range integers - mov dword ptr [edi], edx - - Float32_To_Int32_DitherClip_stored: - - //add edi, ebx // increment destination ptr - lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int32_DitherClip_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } - - ditherGenerator->previous = ditherPrevious; - ditherGenerator->randSeed1 = ditherRandSeed1; - ditherGenerator->randSeed2 = ditherRandSeed2; -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int24( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - unsigned char *dest = (unsigned char*)destinationBuffer; - signed long temp; - - (void) ditherGenerator; // unused parameter - - while( count-- ) - { - // convert to 32 bit and drop the low 8 bits - double scaled = *src * 0x7FFFFFFF; - temp = (signed long) scaled; - - dest[0] = (unsigned char)(temp >> 8); - dest[1] = (unsigned char)(temp >> 16); - dest[2] = (unsigned char)(temp >> 24); - - src += sourceStride; - dest += destinationStride * 3; - } -*/ - - short savedFpuControlWord; - - signed long tempInt32; - - (void) ditherGenerator; /* unused parameter */ - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 - mov eax, sourceStride - imul eax, edx - - mov ecx, count - imul ecx, eax - add ecx, esi - - mov edi, destinationBuffer - - mov edx, 3 // sizeof int24 - mov ebx, destinationStride - imul ebx, edx - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld int24Scaler_ // stack: (int)0x7FFFFF - - Float32_To_Int24_loop: - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, (int)0x7FFFFF - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*0x7FFFFF, (int)0x7FFFFF - fistp tempInt32 // pop st(0) into tempInt32, stack: (int)0x7FFFFF - mov edx, tempInt32 - - mov byte ptr [edi], DL - shr edx, 8 - //mov byte ptr [edi+1], DL - //mov byte ptr [edi+2], DH - mov word ptr [edi+1], DX - - //add edi, ebx // increment destination ptr - lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int24_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int24_Clip( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - unsigned char *dest = (unsigned char*)destinationBuffer; - signed long temp; - - (void) ditherGenerator; // unused parameter - - while( count-- ) - { - // convert to 32 bit and drop the low 8 bits - double scaled = *src * 0x7FFFFFFF; - PA_CLIP_( scaled, -2147483648., 2147483647. ); - temp = (signed long) scaled; - - dest[0] = (unsigned char)(temp >> 8); - dest[1] = (unsigned char)(temp >> 16); - dest[2] = (unsigned char)(temp >> 24); - - src += sourceStride; - dest += destinationStride * 3; - } -*/ - - short savedFpuControlWord; - - signed long tempInt32; - - (void) ditherGenerator; /* unused parameter */ - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 - mov eax, sourceStride - imul eax, edx - - mov ecx, count - imul ecx, eax - add ecx, esi - - mov edi, destinationBuffer - - mov edx, 3 // sizeof int24 - mov ebx, destinationStride - imul ebx, edx - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld int24Scaler_ // stack: (int)0x7FFFFF - - Float32_To_Int24_Clip_loop: - - mov edx, dword ptr [esi] // load floating point value into integer register - - and edx, 0x7FFFFFFF // mask off sign - cmp edx, 0x3F800000 // greater than 1.0 or less than -1.0 - - jg Float32_To_Int24_Clip_clamp - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, (int)0x7FFFFF - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*0x7FFFFF, (int)0x7FFFFF - fistp tempInt32 // pop st(0) into tempInt32, stack: (int)0x7FFFFF - mov edx, tempInt32 - jmp Float32_To_Int24_Clip_store - - Float32_To_Int24_Clip_clamp: - mov edx, dword ptr [esi] // load floating point value into integer register - shr edx, 31 // move sign bit into bit 0 - add esi, eax // increment source ptr - //lea esi, [esi+eax] - add edx, 0x7FFFFF // convert to maximum range integers - - Float32_To_Int24_Clip_store: - - mov byte ptr [edi], DL - shr edx, 8 - //mov byte ptr [edi+1], DL - //mov byte ptr [edi+2], DH - mov word ptr [edi+1], DX - - //add edi, ebx // increment destination ptr - lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int24_Clip_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int24_DitherClip( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - unsigned char *dest = (unsigned char*)destinationBuffer; - signed long temp; - - while( count-- ) - { - // convert to 32 bit and drop the low 8 bits - - // FIXME: the dither amplitude here appears to be too small by 8 bits - double dither = PaUtil_GenerateFloatTriangularDither( ditherGenerator ); - // use smaller scaler to prevent overflow when we add the dither - double dithered = ((double)*src * (2147483646.0)) + dither; - PA_CLIP_( dithered, -2147483648., 2147483647. ); - - temp = (signed long) dithered; - - dest[0] = (unsigned char)(temp >> 8); - dest[1] = (unsigned char)(temp >> 16); - dest[2] = (unsigned char)(temp >> 24); - - src += sourceStride; - dest += destinationStride * 3; - } -*/ - - short savedFpuControlWord; - - // spill storage: - signed long sourceByteStride; - signed long highpassedDither; - - // dither state: - unsigned long ditherPrevious = ditherGenerator->previous; - unsigned long ditherRandSeed1 = ditherGenerator->randSeed1; - unsigned long ditherRandSeed2 = ditherGenerator->randSeed2; - - signed long tempInt32; - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 - mov eax, sourceStride - imul eax, edx - - mov ecx, count - imul ecx, eax - add ecx, esi - - mov edi, destinationBuffer - - mov edx, 3 // sizeof int24 - mov ebx, destinationStride - imul ebx, edx - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld ditheredInt24Scaler_ // stack: int scaler - - Float32_To_Int24_DitherClip_loop: - - mov edx, dword ptr [esi] // load floating point value into integer register - - and edx, 0x7FFFFFFF // mask off sign - cmp edx, 0x3F800000 // greater than 1.0 or less than -1.0 - - jg Float32_To_Int24_DitherClip_clamp - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, int scaler - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*(int scaler), int scaler - - /* - // call PaUtil_GenerateFloatTriangularDither with C calling convention - mov sourceByteStride, eax // save eax - mov sourceEnd, ecx // save ecx - push ditherGenerator // pass ditherGenerator parameter on stack - call PaUtil_GenerateFloatTriangularDither // stack: dither, value*(int scaler), int scaler - pop edx // clear parameter off stack - mov ecx, sourceEnd // restore ecx - mov eax, sourceByteStride // restore eax - */ - - // generate dither - mov sourceByteStride, eax // save eax - mov edx, 196314165 - mov eax, ditherRandSeed1 - mul edx // eax:edx = eax * 196314165 - //add eax, 907633515 - lea eax, [eax+907633515] - mov ditherRandSeed1, eax - mov edx, 196314165 - mov eax, ditherRandSeed2 - mul edx // eax:edx = eax * 196314165 - //add eax, 907633515 - lea eax, [eax+907633515] - mov edx, ditherRandSeed1 - shr edx, PA_DITHER_SHIFT_ - mov ditherRandSeed2, eax - shr eax, PA_DITHER_SHIFT_ - //add eax, edx // eax -> current - lea eax, [eax+edx] - mov edx, ditherPrevious - neg edx - lea edx, [eax+edx] // highpass = current - previous - mov highpassedDither, edx - mov ditherPrevious, eax // previous = current - mov eax, sourceByteStride // restore eax - fild highpassedDither - fmul const_float_dither_scale_ - // end generate dither, dither signal in st(0) - - faddp st(1), st(0) // stack: dither * value*(int scaler), int scaler - fistp tempInt32 // pop st(0) into tempInt32, stack: int scaler - mov edx, tempInt32 - jmp Float32_To_Int24_DitherClip_store - - Float32_To_Int24_DitherClip_clamp: - mov edx, dword ptr [esi] // load floating point value into integer register - shr edx, 31 // move sign bit into bit 0 - add esi, eax // increment source ptr - //lea esi, [esi+eax] - add edx, 0x7FFFFF // convert to maximum range integers - - Float32_To_Int24_DitherClip_store: - - mov byte ptr [edi], DL - shr edx, 8 - //mov byte ptr [edi+1], DL - //mov byte ptr [edi+2], DH - mov word ptr [edi+1], DX - - //add edi, ebx // increment destination ptr - lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int24_DitherClip_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } - - ditherGenerator->previous = ditherPrevious; - ditherGenerator->randSeed1 = ditherRandSeed1; - ditherGenerator->randSeed2 = ditherRandSeed2; -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int16( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - signed short *dest = (signed short*)destinationBuffer; - (void)ditherGenerator; // unused parameter - - while( count-- ) - { - - short samp = (short) (*src * (32767.0f)); - *dest = samp; - - src += sourceStride; - dest += destinationStride; - } -*/ - - short savedFpuControlWord; - - (void) ditherGenerator; /* unused parameter */ - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 - mov eax, sourceStride - imul eax, edx // source byte stride - - mov ecx, count - imul ecx, eax - add ecx, esi // source end ptr = count * source byte stride + source ptr - - mov edi, destinationBuffer - - mov edx, 2 // sizeof int16 - mov ebx, destinationStride - imul ebx, edx // destination byte stride - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld int16Scaler_ // stack: (int)0x7FFF - - Float32_To_Int16_loop: - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, (int)0x7FFF - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*0x7FFF, (int)0x7FFF - fistp word ptr [edi] // store scaled int into dest, stack: (int)0x7FFF - - add edi, ebx // increment destination ptr - //lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int16_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int16_Clip( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - signed short *dest = (signed short*)destinationBuffer; - (void)ditherGenerator; // unused parameter - - while( count-- ) - { - long samp = (signed long) (*src * (32767.0f)); - PA_CLIP_( samp, -0x8000, 0x7FFF ); - *dest = (signed short) samp; - - src += sourceStride; - dest += destinationStride; - } -*/ - - short savedFpuControlWord; - - (void) ditherGenerator; /* unused parameter */ - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 - mov eax, sourceStride - imul eax, edx // source byte stride - - mov ecx, count - imul ecx, eax - add ecx, esi // source end ptr = count * source byte stride + source ptr - - mov edi, destinationBuffer - - mov edx, 2 // sizeof int16 - mov ebx, destinationStride - imul ebx, edx // destination byte stride - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld int16Scaler_ // stack: (int)0x7FFF - - Float32_To_Int16_Clip_loop: - - mov edx, dword ptr [esi] // load floating point value into integer register - - and edx, 0x7FFFFFFF // mask off sign - cmp edx, 0x3F800000 // greater than 1.0 or less than -1.0 - - jg Float32_To_Int16_Clip_clamp - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, (int)0x7FFF - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*0x7FFF, (int)0x7FFF - fistp word ptr [edi] // store scaled int into dest, stack: (int)0x7FFF - jmp Float32_To_Int16_Clip_stored - - Float32_To_Int16_Clip_clamp: - mov edx, dword ptr [esi] // load floating point value into integer register - shr edx, 31 // move sign bit into bit 0 - add esi, eax // increment source ptr - //lea esi, [esi+eax] - add dx, 0x7FFF // convert to maximum range integers - mov word ptr [edi], dx // store clamped into into dest - - Float32_To_Int16_Clip_stored: - - add edi, ebx // increment destination ptr - //lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int16_Clip_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } -} - -/* -------------------------------------------------------------------------- */ - -static void Float32_To_Int16_DitherClip( - void *destinationBuffer, signed int destinationStride, - void *sourceBuffer, signed int sourceStride, - unsigned int count, PaUtilTriangularDitherGenerator *ditherGenerator ) -{ -/* - float *src = (float*)sourceBuffer; - signed short *dest = (signed short*)destinationBuffer; - (void)ditherGenerator; // unused parameter - - while( count-- ) - { - - float dither = PaUtil_GenerateFloatTriangularDither( ditherGenerator ); - // use smaller scaler to prevent overflow when we add the dither - float dithered = (*src * (32766.0f)) + dither; - signed long samp = (signed long) dithered; - PA_CLIP_( samp, -0x8000, 0x7FFF ); - *dest = (signed short) samp; - - src += sourceStride; - dest += destinationStride; - } -*/ - - short savedFpuControlWord; - - // spill storage: - signed long sourceByteStride; - signed long highpassedDither; - - // dither state: - unsigned long ditherPrevious = ditherGenerator->previous; - unsigned long ditherRandSeed1 = ditherGenerator->randSeed1; - unsigned long ditherRandSeed2 = ditherGenerator->randSeed2; - - __asm{ - // esi -> source ptr - // eax -> source byte stride - // edi -> destination ptr - // ebx -> destination byte stride - // ecx -> source end ptr - // edx -> temp - - mov esi, sourceBuffer - - mov edx, 4 // sizeof float32 - mov eax, sourceStride - imul eax, edx // source byte stride - - mov ecx, count - imul ecx, eax - add ecx, esi // source end ptr = count * source byte stride + source ptr - - mov edi, destinationBuffer - - mov edx, 2 // sizeof int16 - mov ebx, destinationStride - imul ebx, edx // destination byte stride - - fwait - fstcw savedFpuControlWord - fldcw fpuControlWord_ - - fld ditheredInt16Scaler_ // stack: int scaler - - Float32_To_Int16_DitherClip_loop: - - mov edx, dword ptr [esi] // load floating point value into integer register - - and edx, 0x7FFFFFFF // mask off sign - cmp edx, 0x3F800000 // greater than 1.0 or less than -1.0 - - jg Float32_To_Int16_DitherClip_clamp - - // load unscaled value into st(0) - fld dword ptr [esi] // stack: value, int scaler - add esi, eax // increment source ptr - //lea esi, [esi+eax] - fmul st(0), st(1) // st(0) *= st(1), stack: value*(int scaler), int scaler - - /* - // call PaUtil_GenerateFloatTriangularDither with C calling convention - mov sourceByteStride, eax // save eax - mov sourceEnd, ecx // save ecx - push ditherGenerator // pass ditherGenerator parameter on stack - call PaUtil_GenerateFloatTriangularDither // stack: dither, value*(int scaler), int scaler - pop edx // clear parameter off stack - mov ecx, sourceEnd // restore ecx - mov eax, sourceByteStride // restore eax - */ - - // generate dither - mov sourceByteStride, eax // save eax - mov edx, 196314165 - mov eax, ditherRandSeed1 - mul edx // eax:edx = eax * 196314165 - //add eax, 907633515 - lea eax, [eax+907633515] - mov ditherRandSeed1, eax - mov edx, 196314165 - mov eax, ditherRandSeed2 - mul edx // eax:edx = eax * 196314165 - //add eax, 907633515 - lea eax, [eax+907633515] - mov edx, ditherRandSeed1 - shr edx, PA_DITHER_SHIFT_ - mov ditherRandSeed2, eax - shr eax, PA_DITHER_SHIFT_ - //add eax, edx // eax -> current - lea eax, [eax+edx] // current = randSeed1>>x + randSeed2>>x - mov edx, ditherPrevious - neg edx - lea edx, [eax+edx] // highpass = current - previous - mov highpassedDither, edx - mov ditherPrevious, eax // previous = current - mov eax, sourceByteStride // restore eax - fild highpassedDither - fmul const_float_dither_scale_ - // end generate dither, dither signal in st(0) - - faddp st(1), st(0) // stack: dither * value*(int scaler), int scaler - fistp word ptr [edi] // store scaled int into dest, stack: int scaler - jmp Float32_To_Int16_DitherClip_stored - - Float32_To_Int16_DitherClip_clamp: - mov edx, dword ptr [esi] // load floating point value into integer register - shr edx, 31 // move sign bit into bit 0 - add esi, eax // increment source ptr - //lea esi, [esi+eax] - add dx, 0x7FFF // convert to maximum range integers - mov word ptr [edi], dx // store clamped into into dest - - Float32_To_Int16_DitherClip_stored: - - add edi, ebx // increment destination ptr - //lea edi, [edi+ebx] - - cmp esi, ecx // has src ptr reached end? - jne Float32_To_Int16_DitherClip_loop - - ffree st(0) - fincstp - - fwait - fnclex - fldcw savedFpuControlWord - } - - ditherGenerator->previous = ditherPrevious; - ditherGenerator->randSeed1 = ditherRandSeed1; - ditherGenerator->randSeed2 = ditherRandSeed2; -} - -/* -------------------------------------------------------------------------- */ - -void PaUtil_InitializeX86PlainConverters( void ) -{ - paConverters.Float32_To_Int32 = Float32_To_Int32; - paConverters.Float32_To_Int32_Clip = Float32_To_Int32_Clip; - paConverters.Float32_To_Int32_DitherClip = Float32_To_Int32_DitherClip; - - paConverters.Float32_To_Int24 = Float32_To_Int24; - paConverters.Float32_To_Int24_Clip = Float32_To_Int24_Clip; - paConverters.Float32_To_Int24_DitherClip = Float32_To_Int24_DitherClip; - - paConverters.Float32_To_Int16 = Float32_To_Int16; - paConverters.Float32_To_Int16_Clip = Float32_To_Int16_Clip; - paConverters.Float32_To_Int16_DitherClip = Float32_To_Int16_DitherClip; -} - -#endif - -/* -------------------------------------------------------------------------- */ diff --git a/spaces/amitjamadagni/qs-benchmarks/plot_scripts/plot_display_com_prec_all.py b/spaces/amitjamadagni/qs-benchmarks/plot_scripts/plot_display_com_prec_all.py deleted file mode 100644 index 592c30fefd7cdba101a8652b81f4a9e3349ad958..0000000000000000000000000000000000000000 --- a/spaces/amitjamadagni/qs-benchmarks/plot_scripts/plot_display_com_prec_all.py +++ /dev/null @@ -1,225 +0,0 @@ -import numpy as np -import h5py -import os - -import mercury as mr - -import sys -sys.path.append('/plot_scripts/') -from map_packages_colors_all import * -from plot_scripts_all import * - -package_str = ['qiskit' , 'cirq', 'qsimcirq', 'pennylane', 'pennylane_l', 'qibo', 'qibojit', 'yao', 'quest', 'qulacs', 'intel_qs_cpp', 'projectq', 'svsim', 'hybridq', 'hiq', 'qcgpu', 'qrack_sch', 'cuquantum_qiskit', 'cuquantum_qsimcirq', 'qpanda', 'qpp', 'myqlm', 'myqlm_cpp', 'braket'] - - -def _build_data_mat(task, cc, pr_1, pr_2, _n_arr): - - dir = os.getcwd() - - data_mat = np.log(np.zeros((len(package_str), len(_n_arr)))) - - for p_i, pack in enumerate(package_str): - - dat_pr1 = dir + '/data/{}/{}_{}_{}.h5'.format(task, pack, cc, pr_1) - dat_pr2 = dir + '/data/{}/{}_{}_{}.h5'.format(task, pack, cc, pr_2) - - if os.path.isfile(dat_pr1) and os.path.isfile(dat_pr2): - - - h5f_pr1 = h5py.File(dat_pr1, 'r') - dat_pr1 = h5f_pr1[storage_dict[pack]][:] - h5f_pr1.close() - - h5f_pr2 = h5py.File(dat_pr2, 'r') - dat_pr2 = h5f_pr2[storage_dict[pack]][:] - h5f_pr2.close() - - ratio_arr = [] - - if len(dat_pr1) == len(dat_pr2): - for i, elem in enumerate(dat_pr1): - ratio_arr.append(elem/float(dat_pr2[i])) - elif len(dat_pr1) > len(dat_pr2): - for i, elem in enumerate(dat_pr2): - ratio_arr.append(dat_pr1[i]/float(elem)) - elif len(dat_pr2) > len(dat_pr1): - for i, elem in enumerate(dat_pr1): - ratio_arr.append(elem/float(dat_pr2[i])) - - if len(_n_arr) > len(ratio_arr): - for r_i, rat in enumerate(ratio_arr): - data_mat[p_i, r_i] = rat - elif len(_n_arr) < len(ratio_arr): - for n_i in range(len(_n_arr)): - data_mat[p_i, n_i] = ratio_arr[n_i] - else: - for ri, rat_v in enumerate(ratio_arr): - data_mat[p_i, ri] = rat_v - - return data_mat - -def abs_time_pack(task, cc, N_end, pr_1, pr_2): - - if task == "Heisenberg dynamics": - task = "hdyn" - elif task == "Random Quantum Circuit": - task = "rqc" - elif task == "Quantum Fourier Transform": - task = "qft" - - if cc == "Singlethread": - cc = 'singlethread' - elif cc == "Multithread": - cc = 'multithread' - elif cc == "GPU": - cc = 'gpu' - - if pr_1 == "Single": - pr_1 = "sp" - elif pr_1 == "Double": - pr_1 = "dp" - - if pr_2 == "Single": - pr_2 = "sp" - elif pr_2 == "Double": - pr_2 = "dp" - - fig, ax = plt.subplots() - - dir = os.getcwd() - - if task == 'hdyn' or task == 'qft': - N_arr = np.arange(6, N_end, 2) - elif task == 'rqc': - N_arr = np.arange(12, N_end, 2) - - # if not os.path.isfile(dat_fst) and not os.path.isfile(dat_fmt) and not os.path.isfile(dat_fgpu): - # return mr.Md(f"Precision {pr} possibly not supported") - - data_mat = _build_data_mat(task, cc, pr_1, pr_2, N_arr) - - # params = {'figure.figsize': (10, 10)} - # plt.rcParams.update(params) - # plt.imshow(data_mat, cmap='OrRd')#, vmin=-16, vmax=0) - - plt.imshow(data_mat, cmap='gist_heat_r', vmin=-1., vmax=10) - - plt.yticks(range(len(pkg_str)), package_str) - locs, labels = plt.yticks() - - # plt.setp(labels, rotation=90) - plt.xticks(range(len(N_arr)), N_arr) - # locs, labels = plt.xticks() - - ax.xaxis.set_major_locator(ticker.AutoLocator()) - ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) - - plt.colorbar() - plt.tight_layout() - # plt.savefig(fn) - plt.show() - -# abs_time_pack("Heisenberg dynamics", "Singlethread", 36, "Double", "Single") -# abs_time_pack("Random Quantum Circuit", "Double", 36, "Singlethread", "Multithread") - -def comp_time_pack(task_1, task_2, cc, N_end, pr_1, pr_2): - - if task_1 == "Heisenberg dynamics": - task_1 = "hdyn" - elif task_1 == "Random Quantum Circuit": - task_1 = "rqc" - elif task_1 == "Quantum Fourier Transform": - task_1 = "qft" - - if task_2 == "Heisenberg dynamics": - task_2 = "hdyn" - elif task_2 == "Random Quantum Circuit": - task_2 = "rqc" - elif task_2 == "Quantum Fourier Transform": - task_2 = "qft" - - if cc == "Singlethread": - cc = 'singlethread' - elif cc == "Multithread": - cc = 'multithread' - elif cc == "GPU": - cc = 'gpu' - - if pr_1 == "Single": - pr_1 = "sp" - elif pr_1 == "Double": - pr_1 = "dp" - - if pr_2 == "Single": - pr_2 = "sp" - elif pr_2 == "Double": - pr_2 = "dp" - - - fig, ax = plt.subplots() - - - dir = os.getcwd() - - if task_1 == 'hdyn' or task_1 == 'qft': - N_arr_1 = np.arange(6, N_end, 2) - elif task_1 == 'rqc': - N_arr_1 = np.arange(12, N_end, 2) - - if task_2 == 'hdyn' or task_2 == 'qft': - N_arr_2 = np.arange(6, N_end, 2) - elif task_2 == 'rqc': - N_arr_2 = np.arange(12, N_end, 2) - - data_mat_1 = np.matrix(_build_data_mat(task_1, cc, pr_1, pr_2, N_arr_1)) - data_mat_2 = np.matrix(_build_data_mat(task_2, cc, pr_1, pr_2, N_arr_2)) - - if N_arr_1[0] > N_arr_2[0]: - data_mat_2 = data_mat_2[:,3:] - - elif N_arr_1[0] < N_arr_2[0]: - data_mat_1 = data_mat_1[:,3:] - - # print(data_mat_1.shape) - # print(data_mat_2.shape) - - # plt.imshow(data_mat_1, cmap='OrRd')#, vmin=-16, vmax=0) - # plt.show() - # plt.imshow(data_mat_2, cmap='OrRd')#, vmin=-16, vmax=0) - # plt.show() - - comp_data_mat = np.zeros(data_mat_1.shape) - - for ri in range(comp_data_mat.shape[0]): - for ci in range(comp_data_mat.shape[1]): - comp_data_mat[ri, ci] = data_mat_1[ri, ci]/data_mat_2[ri, ci] - - # comp_data_mat = np.matrix(data_mat_1) - np.matrix(data_mat_2) - - # params = {'figure.figsize': (10, 10)} - # plt.rcParams.update(params) - - # plt.imshow(comp_data_mat, cmap='Spectral')#, vmin=-16, vmax=0) - plt.imshow(comp_data_mat, cmap='gist_heat_r', vmin=-0.5) - - plt.yticks(range(len(pkg_str)), package_str) - locs, labels = plt.yticks() - - # plt.setp(labels, rotation=90) - if N_arr_1[0] > N_arr_2[0]: - plt.xticks(range(len(N_arr_1)), N_arr_1) - elif N_arr_1[0] < N_arr_2[0]: - plt.xticks(range(len(N_arr_2)), N_arr_2) - else: - plt.xticks(range(len(N_arr_1)), N_arr_1) - # locs, labels = plt.xticks() - - ax.xaxis.set_major_locator(ticker.AutoLocator()) - ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) - - plt.colorbar() - plt.tight_layout() - # plt.savefig(fn) - plt.show() - -# comp_time_pack("Heisenberg dynamics", "Random Quantum Circuit", "Double", 36, "Singlethread", "Multithread") diff --git a/spaces/ammarnasr/Code-Generation-with-Language-Specific-LoRa-Models/error_analysis.py b/spaces/ammarnasr/Code-Generation-with-Language-Specific-LoRa-Models/error_analysis.py deleted file mode 100644 index 521e896ed3fe8082ad13772652c45d58e979ce0a..0000000000000000000000000000000000000000 --- a/spaces/ammarnasr/Code-Generation-with-Language-Specific-LoRa-Models/error_analysis.py +++ /dev/null @@ -1,184 +0,0 @@ -import os -import json -import numpy as np -import pandas as pd -import seaborn as sns -import streamlit as st -import matplotlib.pyplot as plt -sns.set(rc={'figure.figsize':(11.7,8.27)}) - - -def init_page(): - st.title('Error Analysis') - -def get_files_in_dir(dir_path, ext=None): - """Returns a list of files in a directory, optionally filtered by extension. - Args: - dir_path (str): Path to directory. - ext (str, optional): File extension to filter by. Defaults to None. - Returns: - list: List of file paths. - """ - files = [] - for file in os.listdir(dir_path): - if ext is None or file.endswith(ext): - files.append(os.path.join(dir_path, file)) - return files - -def load_json_file(file_path): - """Loads a JSON file. - Args: - file_path (str): Path to JSON file. - Returns: - dict: JSON file contents. - """ - with open(file_path, 'r') as f: - return json.load(f) - -def get_df_from_data(data): - propmpt = data['prompt'] - language = data['language'] - temperature = data['temperature'] - top_p = data['top_p'] - max_new_tokens = data['max_new_tokens'] - stop_tokens = data['stop_tokens'] - results = data['results'] - program = [] - timestamp = [] - stdout = [] - stderr = [] - exit_code = [] - status = [] - for result in results: - program.append(result['program']) - timestamp.append(result['timestamp']) - stdout.append(result['stdout']) - stderr.append(result['stderr']) - exit_code.append(result['exit_code']) - status.append(result['status']) - prompt = [propmpt] * len(program) - language = [language] * len(program) - temperature = [temperature] * len(program) - top_p = [top_p] * len(program) - max_new_tokens = [max_new_tokens] * len(program) - stop_tokens = [stop_tokens] * len(program) - - - df = pd.DataFrame({ - 'prompt': propmpt, - 'language': language, - 'temperature': temperature, - 'top_p': top_p, - 'max_new_tokens': max_new_tokens, - 'stop_tokens': stop_tokens, - 'program': program, - 'timestamp': timestamp, - 'stdout': stdout, - 'stderr': stderr, - 'exit_code': exit_code, - 'status': status - }) - return df - -def concat_two_df(df1, df2): - return pd.concat([df1, df2]) - -def get_df_from_files(files): - df = pd.DataFrame() - for file in files: - data = load_json_file(file) - df = concat_two_df(df, get_df_from_data(data)) - return df - -def select_columns(df, columns): - return df[columns] - -def get_value_counts(df, column): - return df[column].value_counts() - -def get_folders_in_dir(dir_path): - """Returns a list of folders in a directory. - Args: - dir_path (str): Path to directory. - Returns: - list: List of folder paths. - """ - folders = [] - for folder in os.listdir(dir_path): - if os.path.isdir(os.path.join(dir_path, folder)): - folders.append(os.path.join(dir_path, folder)) - return folders - -def find_strings_in_df(df, column, strings): - """Finds rows in a dataframe that contain a string in a column. - Args: - df (pandas.DataFrame): Dataframe. - column (str): Column to search. - strings (list): List of strings to search for. - Returns: - pandas.DataFrame: Dataframe with rows that contain a string in a column. - """ - return df[df[column].str.contains('|'.join(strings))] - -def main(): - init_page() - parent_dir = './temp' - all_strings = [ - "error: ';' expected", - " java.lang.AssertionError", - " ArrayList<" - ] - - folders = get_folders_in_dir(parent_dir) - java_folders = [folder for folder in folders if 'java' in folder] - - - - dirs = st.multiselect('Select a folder', java_folders, default=java_folders) - strings = st.multiselect('Select a string', all_strings, default=all_strings) - - counts_dict = { - 'folder': [], - 'string': [], - 'count': [] - } - - with st.spinner('Loading data...'): - - for dir in dirs: - ext = '.results.json' - files = get_files_in_dir(dir, ext) - df = get_df_from_files(files) - for string in strings: - s = [string] - string_df = find_strings_in_df(df, 'stderr', s) - counts_dict['folder'].append(dir) - counts_dict['string'].append(string) - counts_dict['count'].append(len(string_df)) - - counts_df = pd.DataFrame(counts_dict) - #Create figure with a reasonable size - fig, ax = plt.subplots(figsize=(8.7,5.27)) - sns.barplot(x='folder', y='count', hue='string', data=counts_df, ax=ax) - plt.xticks(rotation=45) - st.pyplot(fig) - # sns.barplot(x='folder', y='count', hue='string', data=counts_df) - # plt.xticks(rotation=45) - # st.pyplot() - - - target_dir = st.selectbox('Select a folder', dirs) - ext = '.results.json' - files = get_files_in_dir(target_dir, ext) - df = get_df_from_files(files) - target_strings = st.multiselect('Select a string', strings, key='target_strings') - target_df = find_strings_in_df(df, 'stderr', target_strings) - target_df = select_columns(target_df, ['program', 'stderr']) - target_index = st.number_input('Select an index', min_value=0, max_value=len(target_df)-1, value=0, step=1) - target_df = target_df.iloc[target_index] - target_program = target_df['program'] - st.code(target_program, language='java') - st.dataframe(target_df) - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/antonovmaxim/text-generation-webui-space/docs/DeepSpeed.md b/spaces/antonovmaxim/text-generation-webui-space/docs/DeepSpeed.md deleted file mode 100644 index 6170f6819ca072ff50fd1146b64d73f74ab00473..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/docs/DeepSpeed.md +++ /dev/null @@ -1,24 +0,0 @@ -An alternative way of reducing the GPU memory usage of models is to use the `DeepSpeed ZeRO-3` optimization. - -With this, I have been able to load a 6b model (GPT-J 6B) with less than 6GB of VRAM. The speed of text generation is very decent and much better than what would be accomplished with `--auto-devices --gpu-memory 6`. - -As far as I know, DeepSpeed is only available for Linux at the moment. - -### How to use it - -1. Install DeepSpeed: - -``` -conda install -c conda-forge mpi4py mpich -pip install -U deepspeed -``` - -2. Start the web UI replacing `python` with `deepspeed --num_gpus=1` and adding the `--deepspeed` flag. Example: - -``` -deepspeed --num_gpus=1 server.py --deepspeed --chat --model gpt-j-6B -``` - -### Learn more - -For more information, check out [this comment](https://github.com/oobabooga/text-generation-webui/issues/40#issuecomment-1412038622) by 81300, who came up with the DeepSpeed support in this web UI. \ No newline at end of file diff --git a/spaces/anzorq/hf-spaces-semantic-search/pages/api/hf_space.js b/spaces/anzorq/hf-spaces-semantic-search/pages/api/hf_space.js deleted file mode 100644 index 9d92749b17c00b34b827f6d7db26488b2a9a37d3..0000000000000000000000000000000000000000 --- a/spaces/anzorq/hf-spaces-semantic-search/pages/api/hf_space.js +++ /dev/null @@ -1,45 +0,0 @@ -const get_space_info = async (space_id) => { - try { - const response = await fetch(`https://huggingface.co/api/spaces/${space_id}`) - const json = await response.json() - - if (json.error) { - return null - } - - const dayjs = require('dayjs') - const relativeTime = require('dayjs/plugin/relativeTime') - dayjs.extend(relativeTime) - const lastModified = dayjs(json.lastModified).fromNow() - - const author = json.author - const title = json.cardData?.title || json.id.split('/')[1].replace(/-/g, ' ') || 'Untitled' - const emoji = json.cardData?.emoji || '🤗' - let colorFrom = json.cardData?.colorFrom || 'pink' - let colorTo = json.cardData?.colorTo || 'purple' - const likes = json.likes - const sdk = json.sdk - const runtime_stage = json.runtime.stage - const current_hardware = json.runtime.hardware.current - - const colors = ['red', 'yellow', 'green', 'blue', 'indigo', 'purple', 'pink', 'gray'] - if (!colors.includes(colorFrom)) { - colorFrom = 'pink' - } - if (!colors.includes(colorTo)) { - colorTo = 'purple' - } - - const result = { space_id, author, title, emoji, lastModified, colorFrom, colorTo, likes, sdk, runtime_stage, current_hardware } - - // console.debug("API response: ", result) - - return result - - } catch (error) { - console.error(error) - throw error - } -} - -export { get_space_info } diff --git a/spaces/aodianyun/stable-diffusion-webui/html/licenses.html b/spaces/aodianyun/stable-diffusion-webui/html/licenses.html deleted file mode 100644 index f59c352510f95a5d57df7808459c5eb5b21367a9..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/html/licenses.html +++ /dev/null @@ -1,419 +0,0 @@ - - -

CodeFormer

-Parts of CodeFormer code had to be copied to be compatible with GFPGAN. -
-S-Lab License 1.0
-
-Copyright 2022 S-Lab
-
-Redistribution and use for non-commercial purpose in source and
-binary forms, with or without modification, are permitted provided
-that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in
-   the documentation and/or other materials provided with the
-   distribution.
-
-3. Neither the name of the copyright holder nor the names of its
-   contributors may be used to endorse or promote products derived
-   from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-In the event that redistribution and/or use for commercial purpose in
-source or binary forms, with or without modification is required,
-please contact the contributor(s) of the work.
-
- - -

ESRGAN

-Code for architecture and reading models copied. -
-MIT License
-
-Copyright (c) 2021 victorca25
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

Real-ESRGAN

-Some code is copied to support ESRGAN models. -
-BSD 3-Clause License
-
-Copyright (c) 2021, Xintao Wang
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its
-   contributors may be used to endorse or promote products derived from
-   this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- -

InvokeAI

-Some code for compatibility with OSX is taken from lstein's repository. -
-MIT License
-
-Copyright (c) 2022 InvokeAI Team
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

LDSR

-Code added by contirubtors, most likely copied from this repository. -
-MIT License
-
-Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

CLIP Interrogator

-Some small amounts of code borrowed and reworked. -
-MIT License
-
-Copyright (c) 2022 pharmapsychotic
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

SwinIR

-Code added by contributors, most likely copied from this repository. - -
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [2021] [SwinIR Authors]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
- -

Memory Efficient Attention

-The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that. -
-MIT License
-
-Copyright (c) 2023 Alex Birch
-Copyright (c) 2023 Amin Rezaei
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- diff --git a/spaces/aphenx/bingo/src/lib/isomorphic/browser.ts b/spaces/aphenx/bingo/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Annotate.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Annotate.py deleted file mode 100644 index 5feac02d87f4789c316b5efc5bd1f001bccda42d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Annotate.py +++ /dev/null @@ -1,317 +0,0 @@ -# Note: Work in progress - -from __future__ import absolute_import - -import os -import os.path -import re -import codecs -import textwrap -from datetime import datetime -from functools import partial -from collections import defaultdict -from xml.sax.saxutils import escape as html_escape -try: - from StringIO import StringIO -except ImportError: - from io import StringIO # does not support writing 'str' in Py2 - -from . import Version -from .Code import CCodeWriter -from .. import Utils - - -class AnnotationCCodeWriter(CCodeWriter): - - def __init__(self, create_from=None, buffer=None, copy_formatting=True): - CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting) - if create_from is None: - self.annotation_buffer = StringIO() - self.last_annotated_pos = None - # annotations[filename][line] -> [(column, AnnotationItem)*] - self.annotations = defaultdict(partial(defaultdict, list)) - # code[filename][line] -> str - self.code = defaultdict(partial(defaultdict, str)) - # scopes[filename][line] -> set(scopes) - self.scopes = defaultdict(partial(defaultdict, set)) - else: - # When creating an insertion point, keep references to the same database - self.annotation_buffer = create_from.annotation_buffer - self.annotations = create_from.annotations - self.code = create_from.code - self.scopes = create_from.scopes - self.last_annotated_pos = create_from.last_annotated_pos - - def create_new(self, create_from, buffer, copy_formatting): - return AnnotationCCodeWriter(create_from, buffer, copy_formatting) - - def write(self, s): - CCodeWriter.write(self, s) - self.annotation_buffer.write(s) - - def mark_pos(self, pos, trace=True): - if pos is not None: - CCodeWriter.mark_pos(self, pos, trace) - if self.funcstate and self.funcstate.scope: - # lambdas and genexprs can result in multiple scopes per line => keep them in a set - self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope) - if self.last_annotated_pos: - source_desc, line, _ = self.last_annotated_pos - pos_code = self.code[source_desc.filename] - pos_code[line] += self.annotation_buffer.getvalue() - self.annotation_buffer = StringIO() - self.last_annotated_pos = pos - - def annotate(self, pos, item): - self.annotations[pos[0].filename][pos[1]].append((pos[2], item)) - - def _css(self): - """css template will later allow to choose a colormap""" - css = [self._css_template] - for i in range(255): - color = u"FFFF%02x" % int(255/(1+i/10.0)) - css.append('.cython.score-%d {background-color: #%s;}' % (i, color)) - try: - from pygments.formatters import HtmlFormatter - except ImportError: - pass - else: - css.append(HtmlFormatter().get_style_defs('.cython')) - return '\n'.join(css) - - _css_template = textwrap.dedent(""" - body.cython { font-family: courier; font-size: 12; } - - .cython.tag { } - .cython.line { margin: 0em } - .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; } - - .cython.line .run { background-color: #B0FFB0; } - .cython.line .mis { background-color: #FFB0B0; } - .cython.code.run { border-left: 8px solid #B0FFB0; } - .cython.code.mis { border-left: 8px solid #FFB0B0; } - - .cython.code .py_c_api { color: red; } - .cython.code .py_macro_api { color: #FF7000; } - .cython.code .pyx_c_api { color: #FF3000; } - .cython.code .pyx_macro_api { color: #FF7000; } - .cython.code .refnanny { color: #FFA000; } - .cython.code .trace { color: #FFA000; } - .cython.code .error_goto { color: #FFA000; } - - .cython.code .coerce { color: #008000; border: 1px dotted #008000 } - .cython.code .py_attr { color: #FF0000; font-weight: bold; } - .cython.code .c_attr { color: #0000FF; } - .cython.code .py_call { color: #FF0000; font-weight: bold; } - .cython.code .c_call { color: #0000FF; } - """) - - # on-click toggle function to show/hide C source code - _onclick_attr = ' onclick="{0}"'.format(( - "(function(s){" - " s.display = s.display === 'block' ? 'none' : 'block'" - "})(this.nextElementSibling.style)" - ).replace(' ', '') # poor dev's JS minification - ) - - def save_annotation(self, source_filename, target_filename, coverage_xml=None): - with Utils.open_source_file(source_filename) as f: - code = f.read() - generated_code = self.code.get(source_filename, {}) - c_file = Utils.decode_filename(os.path.basename(target_filename)) - html_filename = os.path.splitext(target_filename)[0] + ".html" - - with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer: - out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml)) - - def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None): - coverage_info = '' - if coverage_timestamp: - coverage_info = u' with coverage data from {timestamp}'.format( - timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000)) - - outlist = [ - textwrap.dedent(u'''\ - - - - - - Cython: {filename} - - - -

Generated by Cython {watermark}{more_info}

-

- Yellow lines hint at Python interaction.
- Click on a line that starts with a "+" to see the C code that Cython generated for it. -

- ''').format(css=self._css(), watermark=Version.watermark, - filename=os.path.basename(source_filename) if source_filename else '', - more_info=coverage_info) - ] - if c_file: - outlist.append(u'

Raw output: %s

\n' % (c_file, c_file)) - return outlist - - def _save_annotation_footer(self): - return (u'\n',) - - def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None): - """ - lines : original cython source code split by lines - generated_code : generated c code keyed by line number in original file - target filename : name of the file in which to store the generated html - c_file : filename in which the c_code has been written - """ - if coverage_xml is not None and source_filename: - coverage_timestamp = coverage_xml.get('timestamp', '').strip() - covered_lines = self._get_line_coverage(coverage_xml, source_filename) - else: - coverage_timestamp = covered_lines = None - annotation_items = dict(self.annotations[source_filename]) - scopes = dict(self.scopes[source_filename]) - - outlist = [] - outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp)) - outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines)) - outlist.extend(self._save_annotation_footer()) - return ''.join(outlist) - - def _get_line_coverage(self, coverage_xml, source_filename): - coverage_data = None - for entry in coverage_xml.iterfind('.//class'): - if not entry.get('filename'): - continue - if (entry.get('filename') == source_filename or - os.path.abspath(entry.get('filename')) == source_filename): - coverage_data = entry - break - elif source_filename.endswith(entry.get('filename')): - coverage_data = entry # but we might still find a better match... - if coverage_data is None: - return None - return dict( - (int(line.get('number')), int(line.get('hits'))) - for line in coverage_data.iterfind('lines/line') - ) - - def _htmlify_code(self, code): - try: - from pygments import highlight - from pygments.lexers import CythonLexer - from pygments.formatters import HtmlFormatter - except ImportError: - # no Pygments, just escape the code - return html_escape(code) - - html_code = highlight( - code, CythonLexer(stripnl=False, stripall=False), - HtmlFormatter(nowrap=True)) - return html_code - - def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None): - outlist = [u'
'] - pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n' - new_calls_map = dict( - (name, 0) for name in - 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split() - ).copy - - self.mark_pos(None) - - def annotate(match): - group_name = match.lastgroup - calls[group_name] += 1 - return u"%s" % ( - group_name, match.group(group_name)) - - lines = self._htmlify_code(cython_code).splitlines() - lineno_width = len(str(len(lines))) - if not covered_lines: - covered_lines = None - - for k, line in enumerate(lines, 1): - try: - c_code = generated_code[k] - except KeyError: - c_code = '' - else: - c_code = _replace_pos_comment(pos_comment_marker, c_code) - if c_code.startswith(pos_comment_marker): - c_code = c_code[len(pos_comment_marker):] - c_code = html_escape(c_code) - - calls = new_calls_map() - c_code = _parse_code(annotate, c_code) - score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] + - calls['py_macro_api'] + calls['pyx_macro_api']) - - if c_code: - onclick = self._onclick_attr - expandsymbol = '+' - else: - onclick = '' - expandsymbol = ' ' - - covered = '' - if covered_lines is not None and k in covered_lines: - hits = covered_lines[k] - if hits is not None: - covered = 'run' if hits else 'mis' - - outlist.append( - u'
'
-                # generate line number with expand symbol in front,
-                # and the right  number of digit
-                u'{expandsymbol}{line:0{lineno_width}d}: {code}
\n'.format( - score=score, - expandsymbol=expandsymbol, - covered=covered, - lineno_width=lineno_width, - line=k, - code=line.rstrip(), - onclick=onclick, - )) - if c_code: - outlist.append(u"
{code}
".format( - score=score, covered=covered, code=c_code)) - outlist.append(u"
") - return outlist - - -_parse_code = re.compile(( - br'(?P__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|' - br'(?P__Pyx_Trace[A-Za-z]+)|' - br'(?:' - br'(?P__Pyx_[A-Z][A-Z_]+)|' - br'(?P(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|' - br'(?PPy[A-Z][a-z]+_[A-Z][A-Z_]+)|' - br'(?PPy[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)' - br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement - br'(?P(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))' -).decode('ascii')).sub - - -_replace_pos_comment = re.compile( - # this matches what Cython generates as code line marker comment - br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'), - re.M -).sub - - -class AnnotationItem(object): - - def __init__(self, style, text, tag="", size=0): - self.style = style - self.text = text - self.tag = tag - self.size = size - - def start(self): - return u"%s" % (self.style, self.text, self.tag) - - def end(self): - return self.size, u"" diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestBuffer.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestBuffer.py deleted file mode 100644 index 1f69d96524d0b50928c896c5c1f9fc9ef37e070b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestBuffer.py +++ /dev/null @@ -1,105 +0,0 @@ -from Cython.TestUtils import CythonTest -import Cython.Compiler.Errors as Errors -from Cython.Compiler.Nodes import * -from Cython.Compiler.ParseTreeTransforms import * -from Cython.Compiler.Buffer import * - - -class TestBufferParsing(CythonTest): - # First, we only test the raw parser, i.e. - # the number and contents of arguments are NOT checked. - # However "dtype"/the first positional argument is special-cased - # to parse a type argument rather than an expression - - def parse(self, s): - return self.should_not_fail(lambda: self.fragment(s)).root - - def not_parseable(self, expected_error, s): - e = self.should_fail(lambda: self.fragment(s), Errors.CompileError) - self.assertEqual(expected_error, e.message_only) - - def test_basic(self): - t = self.parse(u"cdef object[float, 4, ndim=2, foo=foo] x") - bufnode = t.stats[0].base_type - self.assertTrue(isinstance(bufnode, TemplatedTypeNode)) - self.assertEqual(2, len(bufnode.positional_args)) -# print bufnode.dump() - # should put more here... - - def test_type_pos(self): - self.parse(u"cdef object[short unsigned int, 3] x") - - def test_type_keyword(self): - self.parse(u"cdef object[foo=foo, dtype=short unsigned int] x") - - def test_pos_after_key(self): - self.not_parseable("Non-keyword arg following keyword arg", - u"cdef object[foo=1, 2] x") - - -# See also tests/error/e_bufaccess.pyx and tets/run/bufaccess.pyx -# THESE TESTS ARE NOW DISABLED, the code they test was pretty much -# refactored away -class TestBufferOptions(CythonTest): - # Tests the full parsing of the options within the brackets - - def nonfatal_error(self, error): - # We're passing self as context to transform to trap this - self.error = error - self.assertTrue(self.expect_error) - - def parse_opts(self, opts, expect_error=False): - assert opts != "" - s = u"def f():\n cdef object[%s] x" % opts - self.expect_error = expect_error - root = self.fragment(s, pipeline=[NormalizeTree(self), PostParse(self)]).root - if not expect_error: - vardef = root.stats[0].body.stats[0] - assert isinstance(vardef, CVarDefNode) # use normal assert as this is to validate the test code - buftype = vardef.base_type - self.assertTrue(isinstance(buftype, TemplatedTypeNode)) - self.assertTrue(isinstance(buftype.base_type_node, CSimpleBaseTypeNode)) - self.assertEqual(u"object", buftype.base_type_node.name) - return buftype - else: - self.assertTrue(len(root.stats[0].body.stats) == 0) - - def non_parse(self, expected_err, opts): - self.parse_opts(opts, expect_error=True) -# e = self.should_fail(lambda: self.parse_opts(opts)) - self.assertEqual(expected_err, self.error.message_only) - - def __test_basic(self): - buf = self.parse_opts(u"unsigned short int, 3") - self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode)) - self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1) - self.assertEqual(3, buf.ndim) - - def __test_dict(self): - buf = self.parse_opts(u"ndim=3, dtype=unsigned short int") - self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode)) - self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1) - self.assertEqual(3, buf.ndim) - - def __test_ndim(self): - self.parse_opts(u"int, 2") - self.non_parse(ERR_BUF_NDIM, u"int, 'a'") - self.non_parse(ERR_BUF_NDIM, u"int, -34") - - def __test_use_DEF(self): - t = self.fragment(u""" - DEF ndim = 3 - def f(): - cdef object[int, ndim] x - cdef object[ndim=ndim, dtype=int] y - """, pipeline=[NormalizeTree(self), PostParse(self)]).root - stats = t.stats[0].body.stats - self.assertTrue(stats[0].base_type.ndim == 3) - self.assertTrue(stats[1].base_type.ndim == 3) - - # add exotic and impossible combinations as they come along... - -if __name__ == '__main__': - import unittest - unittest.main() - diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/tree/ParseTreePatternMatcher.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/tree/ParseTreePatternMatcher.py deleted file mode 100644 index 07b96408c3d47edc5052488bc7cd4f0a8122a5a3..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/tree/ParseTreePatternMatcher.py +++ /dev/null @@ -1,373 +0,0 @@ -# -# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -# Use of this file is governed by the BSD 3-clause license that -# can be found in the LICENSE.txt file in the project root. -# - -# -# A tree pattern matching mechanism for ANTLR {@link ParseTree}s. -# -#

Patterns are strings of source input text with special tags representing -# token or rule references such as:

-# -#

{@code = ;}

-# -#

Given a pattern start rule such as {@code statement}, this object constructs -# a {@link ParseTree} with placeholders for the {@code ID} and {@code expr} -# subtree. Then the {@link #match} routines can compare an actual -# {@link ParseTree} from a parse with this pattern. Tag {@code } matches -# any {@code ID} token and tag {@code } references the result of the -# {@code expr} rule (generally an instance of {@code ExprContext}.

-# -#

Pattern {@code x = 0;} is a similar pattern that matches the same pattern -# except that it requires the identifier to be {@code x} and the expression to -# be {@code 0}.

-# -#

The {@link #matches} routines return {@code true} or {@code false} based -# upon a match for the tree rooted at the parameter sent in. The -# {@link #match} routines return a {@link ParseTreeMatch} object that -# contains the parse tree, the parse tree pattern, and a map from tag name to -# matched nodes (more below). A subtree that fails to match, returns with -# {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not -# match.

-# -#

For efficiency, you can compile a tree pattern in string form to a -# {@link ParseTreePattern} object.

-# -#

See {@code TestParseTreeMatcher} for lots of examples. -# {@link ParseTreePattern} has two static helper methods: -# {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that -# are easy to use but not super efficient because they create new -# {@link ParseTreePatternMatcher} objects each time and have to compile the -# pattern in string form before using it.

-# -#

The lexer and parser that you pass into the {@link ParseTreePatternMatcher} -# constructor are used to parse the pattern in string form. The lexer converts -# the {@code = ;} into a sequence of four tokens (assuming lexer -# throws out whitespace or puts it on a hidden channel). Be aware that the -# input stream is reset for the lexer (but not the parser; a -# {@link ParserInterpreter} is created to parse the input.). Any user-defined -# fields you have put into the lexer might get changed when this mechanism asks -# it to scan the pattern string.

-# -#

Normally a parser does not accept token {@code } as a valid -# {@code expr} but, from the parser passed in, we create a special version of -# the underlying grammar representation (an {@link ATN}) that allows imaginary -# tokens representing rules ({@code }) to match entire rules. We call -# these bypass alternatives.

-# -#

Delimiters are {@code <} and {@code >}, with {@code \} as the escape string -# by default, but you can set them to whatever you want using -# {@link #setDelimiters}. You must escape both start and stop strings -# {@code \<} and {@code \>}.

-# -from antlr4.CommonTokenStream import CommonTokenStream -from antlr4.InputStream import InputStream -from antlr4.ParserRuleContext import ParserRuleContext -from antlr4.Lexer import Lexer -from antlr4.ListTokenSource import ListTokenSource -from antlr4.Token import Token -from antlr4.error.ErrorStrategy import BailErrorStrategy -from antlr4.error.Errors import RecognitionException, ParseCancellationException -from antlr4.tree.Chunk import TagChunk, TextChunk -from antlr4.tree.RuleTagToken import RuleTagToken -from antlr4.tree.TokenTagToken import TokenTagToken -from antlr4.tree.Tree import ParseTree, TerminalNode, RuleNode - -# need forward declaration -Parser = None -ParseTreePattern = None - -class CannotInvokeStartRule(Exception): - - def __init__(self, e:Exception): - super().__init__(e) - -class StartRuleDoesNotConsumeFullPattern(Exception): - - pass - - -class ParseTreePatternMatcher(object): - - # Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and - # {@link Parser} object. The lexer input stream is altered for tokenizing - # the tree patterns. The parser is used as a convenient mechanism to get - # the grammar name, plus token, rule names. - def __init__(self, lexer:Lexer, parser:Parser): - self.lexer = lexer - self.parser = parser - self.start = "<" - self.stop = ">" - self.escape = "\\" # e.g., \< and \> must escape BOTH! - - # Set the delimiters used for marking rule and token tags within concrete - # syntax used by the tree pattern parser. - # - # @param start The start delimiter. - # @param stop The stop delimiter. - # @param escapeLeft The escape sequence to use for escaping a start or stop delimiter. - # - # @exception IllegalArgumentException if {@code start} is {@code null} or empty. - # @exception IllegalArgumentException if {@code stop} is {@code null} or empty. - # - def setDelimiters(self, start:str, stop:str, escapeLeft:str): - if start is None or len(start)==0: - raise Exception("start cannot be null or empty") - if stop is None or len(stop)==0: - raise Exception("stop cannot be null or empty") - self.start = start - self.stop = stop - self.escape = escapeLeft - - # Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}?# - def matchesRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int): - p = self.compileTreePattern(pattern, patternRuleIndex) - return self.matches(tree, p) - - # Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a - # compiled pattern instead of a string representation of a tree pattern. - # - def matchesPattern(self, tree:ParseTree, pattern:ParseTreePattern): - mismatchedNode = self.matchImpl(tree, pattern.patternTree, dict()) - return mismatchedNode is None - - # - # Compare {@code pattern} matched as rule {@code patternRuleIndex} against - # {@code tree} and return a {@link ParseTreeMatch} object that contains the - # matched elements, or the node at which the match failed. - # - def matchRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int): - p = self.compileTreePattern(pattern, patternRuleIndex) - return self.matchPattern(tree, p) - - # - # Compare {@code pattern} matched against {@code tree} and return a - # {@link ParseTreeMatch} object that contains the matched elements, or the - # node at which the match failed. Pass in a compiled pattern instead of a - # string representation of a tree pattern. - # - def matchPattern(self, tree:ParseTree, pattern:ParseTreePattern): - labels = dict() - mismatchedNode = self.matchImpl(tree, pattern.patternTree, labels) - from antlr4.tree.ParseTreeMatch import ParseTreeMatch - return ParseTreeMatch(tree, pattern, labels, mismatchedNode) - - # - # For repeated use of a tree pattern, compile it to a - # {@link ParseTreePattern} using this method. - # - def compileTreePattern(self, pattern:str, patternRuleIndex:int): - tokenList = self.tokenize(pattern) - tokenSrc = ListTokenSource(tokenList) - tokens = CommonTokenStream(tokenSrc) - from antlr4.ParserInterpreter import ParserInterpreter - parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames, - self.parser.ruleNames, self.parser.getATNWithBypassAlts(),tokens) - tree = None - try: - parserInterp.setErrorHandler(BailErrorStrategy()) - tree = parserInterp.parse(patternRuleIndex) - except ParseCancellationException as e: - raise e.cause - except RecognitionException as e: - raise e - except Exception as e: - raise CannotInvokeStartRule(e) - - # Make sure tree pattern compilation checks for a complete parse - if tokens.LA(1)!=Token.EOF: - raise StartRuleDoesNotConsumeFullPattern() - - from antlr4.tree.ParseTreePattern import ParseTreePattern - return ParseTreePattern(self, pattern, patternRuleIndex, tree) - - # - # Recursively walk {@code tree} against {@code patternTree}, filling - # {@code match.}{@link ParseTreeMatch#labels labels}. - # - # @return the first node encountered in {@code tree} which does not match - # a corresponding node in {@code patternTree}, or {@code null} if the match - # was successful. The specific node returned depends on the matching - # algorithm used by the implementation, and may be overridden. - # - def matchImpl(self, tree:ParseTree, patternTree:ParseTree, labels:dict): - if tree is None: - raise Exception("tree cannot be null") - if patternTree is None: - raise Exception("patternTree cannot be null") - - # x and , x and y, or x and x; or could be mismatched types - if isinstance(tree, TerminalNode) and isinstance(patternTree, TerminalNode ): - mismatchedNode = None - # both are tokens and they have same type - if tree.symbol.type == patternTree.symbol.type: - if isinstance( patternTree.symbol, TokenTagToken ): # x and - tokenTagToken = patternTree.symbol - # track label->list-of-nodes for both token name and label (if any) - self.map(labels, tokenTagToken.tokenName, tree) - if tokenTagToken.label is not None: - self.map(labels, tokenTagToken.label, tree) - elif tree.getText()==patternTree.getText(): - # x and x - pass - else: - # x and y - if mismatchedNode is None: - mismatchedNode = tree - else: - if mismatchedNode is None: - mismatchedNode = tree - - return mismatchedNode - - if isinstance(tree, ParserRuleContext) and isinstance(patternTree, ParserRuleContext): - mismatchedNode = None - # (expr ...) and - ruleTagToken = self.getRuleTagToken(patternTree) - if ruleTagToken is not None: - m = None - if tree.ruleContext.ruleIndex == patternTree.ruleContext.ruleIndex: - # track label->list-of-nodes for both rule name and label (if any) - self.map(labels, ruleTagToken.ruleName, tree) - if ruleTagToken.label is not None: - self.map(labels, ruleTagToken.label, tree) - else: - if mismatchedNode is None: - mismatchedNode = tree - - return mismatchedNode - - # (expr ...) and (expr ...) - if tree.getChildCount()!=patternTree.getChildCount(): - if mismatchedNode is None: - mismatchedNode = tree - return mismatchedNode - - n = tree.getChildCount() - for i in range(0, n): - childMatch = self.matchImpl(tree.getChild(i), patternTree.getChild(i), labels) - if childMatch is not None: - return childMatch - - return mismatchedNode - - # if nodes aren't both tokens or both rule nodes, can't match - return tree - - def map(self, labels, label, tree): - v = labels.get(label, None) - if v is None: - v = list() - labels[label] = v - v.append(tree) - - # Is {@code t} {@code (expr )} subtree?# - def getRuleTagToken(self, tree:ParseTree): - if isinstance( tree, RuleNode ): - if tree.getChildCount()==1 and isinstance(tree.getChild(0), TerminalNode ): - c = tree.getChild(0) - if isinstance( c.symbol, RuleTagToken ): - return c.symbol - return None - - def tokenize(self, pattern:str): - # split pattern into chunks: sea (raw input) and islands (, ) - chunks = self.split(pattern) - - # create token stream from text and tags - tokens = list() - for chunk in chunks: - if isinstance( chunk, TagChunk ): - # add special rule token or conjure up new token from name - if chunk.tag[0].isupper(): - ttype = self.parser.getTokenType(chunk.tag) - if ttype==Token.INVALID_TYPE: - raise Exception("Unknown token " + str(chunk.tag) + " in pattern: " + pattern) - tokens.append(TokenTagToken(chunk.tag, ttype, chunk.label)) - elif chunk.tag[0].islower(): - ruleIndex = self.parser.getRuleIndex(chunk.tag) - if ruleIndex==-1: - raise Exception("Unknown rule " + str(chunk.tag) + " in pattern: " + pattern) - ruleImaginaryTokenType = self.parser.getATNWithBypassAlts().ruleToTokenType[ruleIndex] - tokens.append(RuleTagToken(chunk.tag, ruleImaginaryTokenType, chunk.label)) - else: - raise Exception("invalid tag: " + str(chunk.tag) + " in pattern: " + pattern) - else: - self.lexer.setInputStream(InputStream(chunk.text)) - t = self.lexer.nextToken() - while t.type!=Token.EOF: - tokens.append(t) - t = self.lexer.nextToken() - return tokens - - # Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}.# - def split(self, pattern:str): - p = 0 - n = len(pattern) - chunks = list() - # find all start and stop indexes first, then collect - starts = list() - stops = list() - while p < n : - if p == pattern.find(self.escape + self.start, p): - p += len(self.escape) + len(self.start) - elif p == pattern.find(self.escape + self.stop, p): - p += len(self.escape) + len(self.stop) - elif p == pattern.find(self.start, p): - starts.append(p) - p += len(self.start) - elif p == pattern.find(self.stop, p): - stops.append(p) - p += len(self.stop) - else: - p += 1 - - nt = len(starts) - - if nt > len(stops): - raise Exception("unterminated tag in pattern: " + pattern) - if nt < len(stops): - raise Exception("missing start tag in pattern: " + pattern) - - for i in range(0, nt): - if starts[i] >= stops[i]: - raise Exception("tag delimiters out of order in pattern: " + pattern) - - # collect into chunks now - if nt==0: - chunks.append(TextChunk(pattern)) - - if nt>0 and starts[0]>0: # copy text up to first tag into chunks - text = pattern[0:starts[0]] - chunks.add(TextChunk(text)) - - for i in range(0, nt): - # copy inside of - tag = pattern[starts[i] + len(self.start) : stops[i]] - ruleOrToken = tag - label = None - colon = tag.find(':') - if colon >= 0: - label = tag[0:colon] - ruleOrToken = tag[colon+1 : len(tag)] - chunks.append(TagChunk(label, ruleOrToken)) - if i+1 < len(starts): - # copy from end of to start of next - text = pattern[stops[i] + len(self.stop) : starts[i + 1]] - chunks.append(TextChunk(text)) - - if nt > 0 : - afterLastTag = stops[nt - 1] + len(self.stop) - if afterLastTag < n : # copy text from end of last tag to end - text = pattern[afterLastTag : n] - chunks.append(TextChunk(text)) - - # strip out the escape sequences from text chunks but not tags - for i in range(0, len(chunks)): - c = chunks[i] - if isinstance( c, TextChunk ): - unescaped = c.text.replace(self.escape, "") - if len(unescaped) < len(c.text): - chunks[i] = TextChunk(unescaped) - return chunks diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/xpath/XPath.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/xpath/XPath.py deleted file mode 100644 index 3ac7d0c8208dbb2dcc90cec7dae4a29a7bd89fb9..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/xpath/XPath.py +++ /dev/null @@ -1,352 +0,0 @@ -# -# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -# Use of this file is governed by the BSD 3-clause license that -# can be found in the LICENSE.txt file in the project root. -# - -# -# Represent a subset of XPath XML path syntax for use in identifying nodes in -# parse trees. -# -#

-# Split path into words and separators {@code /} and {@code //} via ANTLR -# itself then walk path elements from left to right. At each separator-word -# pair, find set of nodes. Next stage uses those as work list.

-# -#

-# The basic interface is -# {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}. -# But that is just shorthand for:

-# -#
-# {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
-# return p.{@link #evaluate evaluate}(tree);
-# 
-# -#

-# See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this -# allows operators:

-# -#
-#
/
root
-#
//
anywhere
-#
!
invert; this must appear directly after root or anywhere -# operator
-#
-# -#

-# and path elements:

-# -#
-#
ID
token name
-#
'string'
any string literal token from the grammar
-#
expr
rule name
-#
*
wildcard matching any node
-#
-# -#

-# Whitespace is not allowed.

-# -from antlr4 import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode -from antlr4.InputStream import InputStream -from antlr4.Parser import Parser -from antlr4.RuleContext import RuleContext -from antlr4.Token import Token -from antlr4.atn.ATNDeserializer import ATNDeserializer -from antlr4.error.ErrorListener import ErrorListener -from antlr4.error.Errors import LexerNoViableAltException -from antlr4.tree.Tree import ParseTree -from antlr4.tree.Trees import Trees -from io import StringIO - - -def serializedATN(): - with StringIO() as buf: - buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\n") - buf.write("\64\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t") - buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5") - buf.write("\3\6\3\6\7\6\37\n\6\f\6\16\6\"\13\6\3\6\3\6\3\7\3\7\5") - buf.write("\7(\n\7\3\b\3\b\3\t\3\t\7\t.\n\t\f\t\16\t\61\13\t\3\t") - buf.write("\3\t\3/\2\n\3\5\5\6\7\7\t\b\13\t\r\2\17\2\21\n\3\2\4\7") - buf.write("\2\62;aa\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c") - buf.write("|\u00c2\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381") - buf.write("\u2001\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801") - buf.write("\uf902\ufdd1\ufdf2\uffff\64\2\3\3\2\2\2\2\5\3\2\2\2\2") - buf.write("\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\21\3\2\2\2\3\23") - buf.write("\3\2\2\2\5\26\3\2\2\2\7\30\3\2\2\2\t\32\3\2\2\2\13\34") - buf.write("\3\2\2\2\r\'\3\2\2\2\17)\3\2\2\2\21+\3\2\2\2\23\24\7\61") - buf.write("\2\2\24\25\7\61\2\2\25\4\3\2\2\2\26\27\7\61\2\2\27\6\3") - buf.write("\2\2\2\30\31\7,\2\2\31\b\3\2\2\2\32\33\7#\2\2\33\n\3\2") - buf.write("\2\2\34 \5\17\b\2\35\37\5\r\7\2\36\35\3\2\2\2\37\"\3\2") - buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!#\3\2\2\2\" \3\2\2\2#$\b\6") - buf.write("\2\2$\f\3\2\2\2%(\5\17\b\2&(\t\2\2\2\'%\3\2\2\2\'&\3\2") - buf.write("\2\2(\16\3\2\2\2)*\t\3\2\2*\20\3\2\2\2+/\7)\2\2,.\13\2") - buf.write("\2\2-,\3\2\2\2.\61\3\2\2\2/\60\3\2\2\2/-\3\2\2\2\60\62") - buf.write("\3\2\2\2\61/\3\2\2\2\62\63\7)\2\2\63\22\3\2\2\2\6\2 \'") - buf.write("/\3\3\6\2") - return buf.getvalue() - - -class XPathLexer(Lexer): - - atn = ATNDeserializer().deserialize(serializedATN()) - - decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] - - - TOKEN_REF = 1 - RULE_REF = 2 - ANYWHERE = 3 - ROOT = 4 - WILDCARD = 5 - BANG = 6 - ID = 7 - STRING = 8 - - modeNames = [ "DEFAULT_MODE" ] - - literalNames = [ "", - "'//'", "'/'", "'*'", "'!'" ] - - symbolicNames = [ "", - "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG", - "ID", "STRING" ] - - ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar", - "NameStartChar", "STRING" ] - - grammarFileName = "XPathLexer.g4" - - def __init__(self, input=None): - super().__init__(input) - self.checkVersion("4.8") - self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) - self._actions = None - self._predicates = None - - - def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int): - if self._actions is None: - actions = dict() - actions[4] = self.ID_action - self._actions = actions - _action = self._actions.get(ruleIndex, None) - if _action is not None: - _action(localctx, actionIndex) - else: - raise Exception("No registered action for: %d" % ruleIndex) - - def ID_action(self, localctx:RuleContext , actionIndex:int): - if actionIndex == 0: - char = self.text[0] - if char.isupper(): - self.type = XPathLexer.TOKEN_REF - else: - self.type = XPathLexer.RULE_REF - -class XPath(object): - - WILDCARD = "*" # word not operator/separator - NOT = "!" # word for invert operator - - def __init__(self, parser:Parser, path:str): - self.parser = parser - self.path = path - self.elements = self.split(path) - - def split(self, path:str): - input = InputStream(path) - lexer = XPathLexer(input) - def recover(self, e): - raise e - lexer.recover = recover - lexer.removeErrorListeners() - lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more - tokenStream = CommonTokenStream(lexer) - try: - tokenStream.fill() - except LexerNoViableAltException as e: - pos = lexer.column - msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path) - raise Exception(msg, e) - - tokens = iter(tokenStream.tokens) - elements = list() - for el in tokens: - invert = False - anywhere = False - # Check for path separators, if none assume root - if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]: - anywhere = el.type == XPathLexer.ANYWHERE - next_el = next(tokens, None) - if not next_el: - raise Exception('Missing element after %s' % el.getText()) - else: - el = next_el - # Check for bangs - if el.type == XPathLexer.BANG: - invert = True - next_el = next(tokens, None) - if not next_el: - raise Exception('Missing element after %s' % el.getText()) - else: - el = next_el - # Add searched element - if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]: - element = self.getXPathElement(el, anywhere) - element.invert = invert - elements.append(element) - elif el.type==Token.EOF: - break - else: - raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type]) - return elements - - # - # Convert word like {@code#} or {@code ID} or {@code expr} to a path - # element. {@code anywhere} is {@code true} if {@code //} precedes the - # word. - # - def getXPathElement(self, wordToken:Token, anywhere:bool): - if wordToken.type==Token.EOF: - raise Exception("Missing path element at end of path") - - word = wordToken.text - if wordToken.type==XPathLexer.WILDCARD : - return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement() - - elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]: - tsource = self.parser.getTokenStream().tokenSource - - ttype = Token.INVALID_TYPE - if wordToken.type == XPathLexer.TOKEN_REF: - if word in tsource.ruleNames: - ttype = tsource.ruleNames.index(word) + 1 - else: - if word in tsource.literalNames: - ttype = tsource.literalNames.index(word) - - if ttype == Token.INVALID_TYPE: - raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex)) - return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype) - - else: - ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1 - - if ruleIndex == -1: - raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex)) - return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex) - - - @staticmethod - def findAll(tree:ParseTree, xpath:str, parser:Parser): - p = XPath(parser, xpath) - return p.evaluate(tree) - - # - # Return a list of all nodes starting at {@code t} as root that satisfy the - # path. The root {@code /} is relative to the node passed to - # {@link #evaluate}. - # - def evaluate(self, t:ParseTree): - dummyRoot = ParserRuleContext() - dummyRoot.children = [t] # don't set t's parent. - - work = [dummyRoot] - for element in self.elements: - work_next = list() - for node in work: - if not isinstance(node, TerminalNode) and node.children: - # only try to match next element if it has children - # e.g., //func/*/stat might have a token node for which - # we can't go looking for stat nodes. - matching = element.evaluate(node) - - # See issue antlr#370 - Prevents XPath from returning the - # same node multiple times - matching = filter(lambda m: m not in work_next, matching) - - work_next.extend(matching) - work = work_next - - return work - - -class XPathElement(object): - - def __init__(self, nodeName:str): - self.nodeName = nodeName - self.invert = False - - def __str__(self): - return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]" - - - -# -# Either {@code ID} at start of path or {@code ...//ID} in middle of path. -# -class XPathRuleAnywhereElement(XPathElement): - - def __init__(self, ruleName:str, ruleIndex:int): - super().__init__(ruleName) - self.ruleIndex = ruleIndex - - def evaluate(self, t:ParseTree): - # return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted) - return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t)) - -class XPathRuleElement(XPathElement): - - def __init__(self, ruleName:str, ruleIndex:int): - super().__init__(ruleName) - self.ruleIndex = ruleIndex - - def evaluate(self, t:ParseTree): - # return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted) - return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t)) - -class XPathTokenAnywhereElement(XPathElement): - - def __init__(self, ruleName:str, tokenType:int): - super().__init__(ruleName) - self.tokenType = tokenType - - def evaluate(self, t:ParseTree): - # return all TerminalNode descendants of t that match tokenType (or do not match if inverted) - return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t)) - -class XPathTokenElement(XPathElement): - - def __init__(self, ruleName:str, tokenType:int): - super().__init__(ruleName) - self.tokenType = tokenType - - def evaluate(self, t:ParseTree): - # return all TerminalNode children of t that match tokenType (or do not match if inverted) - return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t)) - - -class XPathWildcardAnywhereElement(XPathElement): - - def __init__(self): - super().__init__(XPath.WILDCARD) - - def evaluate(self, t:ParseTree): - if self.invert: - return list() # !* is weird but valid (empty) - else: - return Trees.descendants(t) - - -class XPathWildcardElement(XPathElement): - - def __init__(self): - super().__init__(XPath.WILDCARD) - - - def evaluate(self, t:ParseTree): - if self.invert: - return list() # !* is weird but valid (empty) - else: - return Trees.getChildren(t) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/coloredlogs/tests.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/coloredlogs/tests.py deleted file mode 100644 index 650d3281a8584c1e863347643444f55db463edf9..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/coloredlogs/tests.py +++ /dev/null @@ -1,673 +0,0 @@ -# Automated tests for the `coloredlogs' package. -# -# Author: Peter Odding -# Last Change: June 11, 2021 -# URL: https://coloredlogs.readthedocs.io - -"""Automated tests for the `coloredlogs` package.""" - -# Standard library modules. -import contextlib -import logging -import logging.handlers -import os -import re -import subprocess -import sys -import tempfile - -# External dependencies. -from humanfriendly.compat import StringIO -from humanfriendly.terminal import ANSI_COLOR_CODES, ANSI_CSI, ansi_style, ansi_wrap -from humanfriendly.testing import PatchedAttribute, PatchedItem, TestCase, retry -from humanfriendly.text import format, random_string - -# The module we're testing. -import coloredlogs -import coloredlogs.cli -from coloredlogs import ( - CHROOT_FILES, - ColoredFormatter, - NameNormalizer, - decrease_verbosity, - find_defined_levels, - find_handler, - find_hostname, - find_program_name, - find_username, - get_level, - increase_verbosity, - install, - is_verbose, - level_to_number, - match_stream_handler, - parse_encoded_styles, - set_level, - walk_propagation_tree, -) -from coloredlogs.demo import demonstrate_colored_logging -from coloredlogs.syslog import SystemLogging, is_syslog_supported, match_syslog_handler -from coloredlogs.converter import ( - ColoredCronMailer, - EIGHT_COLOR_PALETTE, - capture, - convert, -) - -# External test dependencies. -from capturer import CaptureOutput -from verboselogs import VerboseLogger - -# Compiled regular expression that matches a single line of output produced by -# the default log format (does not include matching of ANSI escape sequences). -PLAIN_TEXT_PATTERN = re.compile(r''' - (?P \d{4}-\d{2}-\d{2} ) - \s (?P